1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Core registration and callback routines for MTD
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/reboot.h>
29 #include <linux/leds.h>
30 #include <linux/debugfs.h>
31 #include <linux/nvmem-provider.h>
32 #include <linux/root_dev.h>
33 #include <linux/error-injection.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/partitions.h>
40 struct backing_dev_info
*mtd_bdi
;
42 #ifdef CONFIG_PM_SLEEP
44 static int mtd_cls_suspend(struct device
*dev
)
46 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
48 return mtd
? mtd_suspend(mtd
) : 0;
51 static int mtd_cls_resume(struct device
*dev
)
53 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
60 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops
, mtd_cls_suspend
, mtd_cls_resume
);
61 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
63 #define MTD_CLS_PM_OPS NULL
66 static struct class mtd_class
= {
71 static DEFINE_IDR(mtd_idr
);
73 /* These are exported solely for the purpose of mtd_blkdevs.c. You
74 should not use them for _anything_ else */
75 DEFINE_MUTEX(mtd_table_mutex
);
76 EXPORT_SYMBOL_GPL(mtd_table_mutex
);
78 struct mtd_info
*__mtd_next_device(int i
)
80 return idr_get_next(&mtd_idr
, &i
);
82 EXPORT_SYMBOL_GPL(__mtd_next_device
);
84 static LIST_HEAD(mtd_notifiers
);
87 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
89 /* REVISIT once MTD uses the driver model better, whoever allocates
90 * the mtd_info will probably want to use the release() hook...
92 static void mtd_release(struct device
*dev
)
94 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
95 dev_t index
= MTD_DEVT(mtd
->index
);
97 idr_remove(&mtd_idr
, mtd
->index
);
98 of_node_put(mtd_get_of_node(mtd
));
100 if (mtd_is_partition(mtd
))
101 release_mtd_partition(mtd
);
103 /* remove /dev/mtdXro node */
104 device_destroy(&mtd_class
, index
+ 1);
107 static void mtd_device_release(struct kref
*kref
)
109 struct mtd_info
*mtd
= container_of(kref
, struct mtd_info
, refcnt
);
110 bool is_partition
= mtd_is_partition(mtd
);
112 debugfs_remove_recursive(mtd
->dbg
.dfs_dir
);
114 /* Try to remove the NVMEM provider */
115 nvmem_unregister(mtd
->nvmem
);
117 device_unregister(&mtd
->dev
);
120 * Clear dev so mtd can be safely re-registered later if desired.
121 * Should not be done for partition,
122 * as it was already destroyed in device_unregister().
125 memset(&mtd
->dev
, 0, sizeof(mtd
->dev
));
127 module_put(THIS_MODULE
);
130 #define MTD_DEVICE_ATTR_RO(name) \
131 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
133 #define MTD_DEVICE_ATTR_RW(name) \
134 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
136 static ssize_t
mtd_type_show(struct device
*dev
,
137 struct device_attribute
*attr
, char *buf
)
139 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
164 case MTD_MLCNANDFLASH
:
171 return sysfs_emit(buf
, "%s\n", type
);
173 MTD_DEVICE_ATTR_RO(type
);
175 static ssize_t
mtd_flags_show(struct device
*dev
,
176 struct device_attribute
*attr
, char *buf
)
178 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
180 return sysfs_emit(buf
, "0x%lx\n", (unsigned long)mtd
->flags
);
182 MTD_DEVICE_ATTR_RO(flags
);
184 static ssize_t
mtd_size_show(struct device
*dev
,
185 struct device_attribute
*attr
, char *buf
)
187 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
189 return sysfs_emit(buf
, "%llu\n", (unsigned long long)mtd
->size
);
191 MTD_DEVICE_ATTR_RO(size
);
193 static ssize_t
mtd_erasesize_show(struct device
*dev
,
194 struct device_attribute
*attr
, char *buf
)
196 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
198 return sysfs_emit(buf
, "%lu\n", (unsigned long)mtd
->erasesize
);
200 MTD_DEVICE_ATTR_RO(erasesize
);
202 static ssize_t
mtd_writesize_show(struct device
*dev
,
203 struct device_attribute
*attr
, char *buf
)
205 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
207 return sysfs_emit(buf
, "%lu\n", (unsigned long)mtd
->writesize
);
209 MTD_DEVICE_ATTR_RO(writesize
);
211 static ssize_t
mtd_subpagesize_show(struct device
*dev
,
212 struct device_attribute
*attr
, char *buf
)
214 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
215 unsigned int subpagesize
= mtd
->writesize
>> mtd
->subpage_sft
;
217 return sysfs_emit(buf
, "%u\n", subpagesize
);
219 MTD_DEVICE_ATTR_RO(subpagesize
);
221 static ssize_t
mtd_oobsize_show(struct device
*dev
,
222 struct device_attribute
*attr
, char *buf
)
224 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
226 return sysfs_emit(buf
, "%lu\n", (unsigned long)mtd
->oobsize
);
228 MTD_DEVICE_ATTR_RO(oobsize
);
230 static ssize_t
mtd_oobavail_show(struct device
*dev
,
231 struct device_attribute
*attr
, char *buf
)
233 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
235 return sysfs_emit(buf
, "%u\n", mtd
->oobavail
);
237 MTD_DEVICE_ATTR_RO(oobavail
);
239 static ssize_t
mtd_numeraseregions_show(struct device
*dev
,
240 struct device_attribute
*attr
, char *buf
)
242 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
244 return sysfs_emit(buf
, "%u\n", mtd
->numeraseregions
);
246 MTD_DEVICE_ATTR_RO(numeraseregions
);
248 static ssize_t
mtd_name_show(struct device
*dev
,
249 struct device_attribute
*attr
, char *buf
)
251 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
253 return sysfs_emit(buf
, "%s\n", mtd
->name
);
255 MTD_DEVICE_ATTR_RO(name
);
257 static ssize_t
mtd_ecc_strength_show(struct device
*dev
,
258 struct device_attribute
*attr
, char *buf
)
260 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
262 return sysfs_emit(buf
, "%u\n", mtd
->ecc_strength
);
264 MTD_DEVICE_ATTR_RO(ecc_strength
);
266 static ssize_t
mtd_bitflip_threshold_show(struct device
*dev
,
267 struct device_attribute
*attr
,
270 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
272 return sysfs_emit(buf
, "%u\n", mtd
->bitflip_threshold
);
275 static ssize_t
mtd_bitflip_threshold_store(struct device
*dev
,
276 struct device_attribute
*attr
,
277 const char *buf
, size_t count
)
279 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
280 unsigned int bitflip_threshold
;
283 retval
= kstrtouint(buf
, 0, &bitflip_threshold
);
287 mtd
->bitflip_threshold
= bitflip_threshold
;
290 MTD_DEVICE_ATTR_RW(bitflip_threshold
);
292 static ssize_t
mtd_ecc_step_size_show(struct device
*dev
,
293 struct device_attribute
*attr
, char *buf
)
295 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
297 return sysfs_emit(buf
, "%u\n", mtd
->ecc_step_size
);
300 MTD_DEVICE_ATTR_RO(ecc_step_size
);
302 static ssize_t
mtd_corrected_bits_show(struct device
*dev
,
303 struct device_attribute
*attr
, char *buf
)
305 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
306 struct mtd_ecc_stats
*ecc_stats
= &mtd
->ecc_stats
;
308 return sysfs_emit(buf
, "%u\n", ecc_stats
->corrected
);
310 MTD_DEVICE_ATTR_RO(corrected_bits
); /* ecc stats corrected */
312 static ssize_t
mtd_ecc_failures_show(struct device
*dev
,
313 struct device_attribute
*attr
, char *buf
)
315 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
316 struct mtd_ecc_stats
*ecc_stats
= &mtd
->ecc_stats
;
318 return sysfs_emit(buf
, "%u\n", ecc_stats
->failed
);
320 MTD_DEVICE_ATTR_RO(ecc_failures
); /* ecc stats errors */
322 static ssize_t
mtd_bad_blocks_show(struct device
*dev
,
323 struct device_attribute
*attr
, char *buf
)
325 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
326 struct mtd_ecc_stats
*ecc_stats
= &mtd
->ecc_stats
;
328 return sysfs_emit(buf
, "%u\n", ecc_stats
->badblocks
);
330 MTD_DEVICE_ATTR_RO(bad_blocks
);
332 static ssize_t
mtd_bbt_blocks_show(struct device
*dev
,
333 struct device_attribute
*attr
, char *buf
)
335 struct mtd_info
*mtd
= dev_get_drvdata(dev
);
336 struct mtd_ecc_stats
*ecc_stats
= &mtd
->ecc_stats
;
338 return sysfs_emit(buf
, "%u\n", ecc_stats
->bbtblocks
);
340 MTD_DEVICE_ATTR_RO(bbt_blocks
);
342 static struct attribute
*mtd_attrs
[] = {
344 &dev_attr_flags
.attr
,
346 &dev_attr_erasesize
.attr
,
347 &dev_attr_writesize
.attr
,
348 &dev_attr_subpagesize
.attr
,
349 &dev_attr_oobsize
.attr
,
350 &dev_attr_oobavail
.attr
,
351 &dev_attr_numeraseregions
.attr
,
353 &dev_attr_ecc_strength
.attr
,
354 &dev_attr_ecc_step_size
.attr
,
355 &dev_attr_corrected_bits
.attr
,
356 &dev_attr_ecc_failures
.attr
,
357 &dev_attr_bad_blocks
.attr
,
358 &dev_attr_bbt_blocks
.attr
,
359 &dev_attr_bitflip_threshold
.attr
,
362 ATTRIBUTE_GROUPS(mtd
);
364 static const struct device_type mtd_devtype
= {
366 .groups
= mtd_groups
,
367 .release
= mtd_release
,
370 static bool mtd_expert_analysis_mode
;
372 #ifdef CONFIG_DEBUG_FS
373 bool mtd_check_expert_analysis_mode(void)
375 const char *mtd_expert_analysis_warning
=
376 "Bad block checks have been entirely disabled.\n"
377 "This is only reserved for post-mortem forensics and debug purposes.\n"
378 "Never enable this mode if you do not know what you are doing!\n";
380 return WARN_ONCE(mtd_expert_analysis_mode
, mtd_expert_analysis_warning
);
382 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode
);
385 static struct dentry
*dfs_dir_mtd
;
387 static void mtd_debugfs_populate(struct mtd_info
*mtd
)
389 struct device
*dev
= &mtd
->dev
;
391 if (IS_ERR_OR_NULL(dfs_dir_mtd
))
394 mtd
->dbg
.dfs_dir
= debugfs_create_dir(dev_name(dev
), dfs_dir_mtd
);
398 unsigned mtd_mmap_capabilities(struct mtd_info
*mtd
)
402 return NOMMU_MAP_COPY
| NOMMU_MAP_DIRECT
| NOMMU_MAP_EXEC
|
403 NOMMU_MAP_READ
| NOMMU_MAP_WRITE
;
405 return NOMMU_MAP_COPY
| NOMMU_MAP_DIRECT
| NOMMU_MAP_EXEC
|
408 return NOMMU_MAP_COPY
;
411 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities
);
414 static int mtd_reboot_notifier(struct notifier_block
*n
, unsigned long state
,
417 struct mtd_info
*mtd
;
419 mtd
= container_of(n
, struct mtd_info
, reboot_notifier
);
426 * mtd_wunit_to_pairing_info - get pairing information of a wunit
427 * @mtd: pointer to new MTD device info structure
428 * @wunit: write unit we are interested in
429 * @info: returned pairing information
431 * Retrieve pairing information associated to the wunit.
432 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
433 * paired together, and where programming a page may influence the page it is
435 * The notion of page is replaced by the term wunit (write-unit) to stay
436 * consistent with the ->writesize field.
438 * The @wunit argument can be extracted from an absolute offset using
439 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
442 * From the pairing info the MTD user can find all the wunits paired with
443 * @wunit using the following loop:
445 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
447 * mtd_pairing_info_to_wunit(mtd, &info);
451 int mtd_wunit_to_pairing_info(struct mtd_info
*mtd
, int wunit
,
452 struct mtd_pairing_info
*info
)
454 struct mtd_info
*master
= mtd_get_master(mtd
);
455 int npairs
= mtd_wunit_per_eb(master
) / mtd_pairing_groups(master
);
457 if (wunit
< 0 || wunit
>= npairs
)
460 if (master
->pairing
&& master
->pairing
->get_info
)
461 return master
->pairing
->get_info(master
, wunit
, info
);
468 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info
);
471 * mtd_pairing_info_to_wunit - get wunit from pairing information
472 * @mtd: pointer to new MTD device info structure
473 * @info: pairing information struct
475 * Returns a positive number representing the wunit associated to the info
476 * struct, or a negative error code.
478 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
479 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
482 * It can also be used to only program the first page of each pair (i.e.
483 * page attached to group 0), which allows one to use an MLC NAND in
484 * software-emulated SLC mode:
487 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
488 * for (info.pair = 0; info.pair < npairs; info.pair++) {
489 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
490 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
491 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
494 int mtd_pairing_info_to_wunit(struct mtd_info
*mtd
,
495 const struct mtd_pairing_info
*info
)
497 struct mtd_info
*master
= mtd_get_master(mtd
);
498 int ngroups
= mtd_pairing_groups(master
);
499 int npairs
= mtd_wunit_per_eb(master
) / ngroups
;
501 if (!info
|| info
->pair
< 0 || info
->pair
>= npairs
||
502 info
->group
< 0 || info
->group
>= ngroups
)
505 if (master
->pairing
&& master
->pairing
->get_wunit
)
506 return mtd
->pairing
->get_wunit(master
, info
);
510 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit
);
513 * mtd_pairing_groups - get the number of pairing groups
514 * @mtd: pointer to new MTD device info structure
516 * Returns the number of pairing groups.
518 * This number is usually equal to the number of bits exposed by a single
519 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
520 * to iterate over all pages of a given pair.
522 int mtd_pairing_groups(struct mtd_info
*mtd
)
524 struct mtd_info
*master
= mtd_get_master(mtd
);
526 if (!master
->pairing
|| !master
->pairing
->ngroups
)
529 return master
->pairing
->ngroups
;
531 EXPORT_SYMBOL_GPL(mtd_pairing_groups
);
533 static int mtd_nvmem_reg_read(void *priv
, unsigned int offset
,
534 void *val
, size_t bytes
)
536 struct mtd_info
*mtd
= priv
;
540 err
= mtd_read(mtd
, offset
, bytes
, &retlen
, val
);
541 if (err
&& err
!= -EUCLEAN
)
544 return retlen
== bytes
? 0 : -EIO
;
547 static int mtd_nvmem_add(struct mtd_info
*mtd
)
549 struct device_node
*node
= mtd_get_of_node(mtd
);
550 struct nvmem_config config
= {};
552 config
.id
= NVMEM_DEVID_NONE
;
553 config
.dev
= &mtd
->dev
;
554 config
.name
= dev_name(&mtd
->dev
);
555 config
.owner
= THIS_MODULE
;
556 config
.add_legacy_fixed_of_cells
= of_device_is_compatible(node
, "nvmem-cells");
557 config
.reg_read
= mtd_nvmem_reg_read
;
558 config
.size
= mtd
->size
;
559 config
.word_size
= 1;
561 config
.read_only
= true;
562 config
.root_only
= true;
563 config
.ignore_wp
= true;
566 mtd
->nvmem
= nvmem_register(&config
);
567 if (IS_ERR(mtd
->nvmem
)) {
568 /* Just ignore if there is no NVMEM support in the kernel */
569 if (PTR_ERR(mtd
->nvmem
) == -EOPNOTSUPP
)
572 return dev_err_probe(&mtd
->dev
, PTR_ERR(mtd
->nvmem
),
573 "Failed to register NVMEM device\n");
579 static void mtd_check_of_node(struct mtd_info
*mtd
)
581 struct device_node
*partitions
, *parent_dn
, *mtd_dn
= NULL
;
582 const char *pname
, *prefix
= "partition-";
583 int plen
, mtd_name_len
, offset
, prefix_len
;
585 /* Check if MTD already has a device node */
586 if (mtd_get_of_node(mtd
))
589 if (!mtd_is_partition(mtd
))
592 parent_dn
= of_node_get(mtd_get_of_node(mtd
->parent
));
596 if (mtd_is_partition(mtd
->parent
))
597 partitions
= of_node_get(parent_dn
);
599 partitions
= of_get_child_by_name(parent_dn
, "partitions");
603 prefix_len
= strlen(prefix
);
604 mtd_name_len
= strlen(mtd
->name
);
606 /* Search if a partition is defined with the same name */
607 for_each_child_of_node(partitions
, mtd_dn
) {
608 /* Skip partition with no/wrong prefix */
609 if (!of_node_name_prefix(mtd_dn
, prefix
))
612 /* Label have priority. Check that first */
613 if (!of_property_read_string(mtd_dn
, "label", &pname
)) {
616 pname
= mtd_dn
->name
;
620 plen
= strlen(pname
) - offset
;
621 if (plen
== mtd_name_len
&&
622 !strncmp(mtd
->name
, pname
+ offset
, plen
)) {
623 mtd_set_of_node(mtd
, mtd_dn
);
629 of_node_put(partitions
);
631 of_node_put(parent_dn
);
635 * add_mtd_device - register an MTD device
636 * @mtd: pointer to new MTD device info structure
638 * Add a device to the list of MTD devices present in the system, and
639 * notify each currently active MTD 'user' of its arrival. Returns
640 * zero on success or non-zero on failure.
643 int add_mtd_device(struct mtd_info
*mtd
)
645 struct device_node
*np
= mtd_get_of_node(mtd
);
646 struct mtd_info
*master
= mtd_get_master(mtd
);
647 struct mtd_notifier
*not;
651 * May occur, for instance, on buggy drivers which call
652 * mtd_device_parse_register() multiple times on the same master MTD,
653 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
655 if (WARN_ONCE(mtd
->dev
.type
, "MTD already registered\n"))
658 BUG_ON(mtd
->writesize
== 0);
661 * MTD drivers should implement ->_{write,read}() or
662 * ->_{write,read}_oob(), but not both.
664 if (WARN_ON((mtd
->_write
&& mtd
->_write_oob
) ||
665 (mtd
->_read
&& mtd
->_read_oob
)))
668 if (WARN_ON((!mtd
->erasesize
|| !master
->_erase
) &&
669 !(mtd
->flags
& MTD_NO_ERASE
)))
673 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
674 * master is an MLC NAND and has a proper pairing scheme defined.
675 * We also reject masters that implement ->_writev() for now, because
676 * NAND controller drivers don't implement this hook, and adding the
677 * SLC -> MLC address/length conversion to this path is useless if we
680 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
&&
681 (!mtd_is_partition(mtd
) || master
->type
!= MTD_MLCNANDFLASH
||
682 !master
->pairing
|| master
->_writev
))
685 mutex_lock(&mtd_table_mutex
);
689 ofidx
= of_alias_get_id(np
, "mtd");
691 i
= idr_alloc(&mtd_idr
, mtd
, ofidx
, ofidx
+ 1, GFP_KERNEL
);
693 i
= idr_alloc(&mtd_idr
, mtd
, 0, 0, GFP_KERNEL
);
700 kref_init(&mtd
->refcnt
);
702 /* default value if not set by driver */
703 if (mtd
->bitflip_threshold
== 0)
704 mtd
->bitflip_threshold
= mtd
->ecc_strength
;
706 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
) {
707 int ngroups
= mtd_pairing_groups(master
);
709 mtd
->erasesize
/= ngroups
;
710 mtd
->size
= (u64
)mtd_div_by_eb(mtd
->size
, master
) *
714 if (is_power_of_2(mtd
->erasesize
))
715 mtd
->erasesize_shift
= ffs(mtd
->erasesize
) - 1;
717 mtd
->erasesize_shift
= 0;
719 if (is_power_of_2(mtd
->writesize
))
720 mtd
->writesize_shift
= ffs(mtd
->writesize
) - 1;
722 mtd
->writesize_shift
= 0;
724 mtd
->erasesize_mask
= (1 << mtd
->erasesize_shift
) - 1;
725 mtd
->writesize_mask
= (1 << mtd
->writesize_shift
) - 1;
727 /* Some chips always power up locked. Unlock them now */
728 if ((mtd
->flags
& MTD_WRITEABLE
) && (mtd
->flags
& MTD_POWERUP_LOCK
)) {
729 error
= mtd_unlock(mtd
, 0, mtd
->size
);
730 if (error
&& error
!= -EOPNOTSUPP
)
732 "%s: unlock failed, writes may not work\n",
734 /* Ignore unlock failures? */
738 /* Caller should have set dev.parent to match the
739 * physical device, if appropriate.
741 mtd
->dev
.type
= &mtd_devtype
;
742 mtd
->dev
.class = &mtd_class
;
743 mtd
->dev
.devt
= MTD_DEVT(i
);
744 dev_set_name(&mtd
->dev
, "mtd%d", i
);
745 dev_set_drvdata(&mtd
->dev
, mtd
);
746 mtd_check_of_node(mtd
);
747 of_node_get(mtd_get_of_node(mtd
));
748 error
= device_register(&mtd
->dev
);
750 put_device(&mtd
->dev
);
754 /* Add the nvmem provider */
755 error
= mtd_nvmem_add(mtd
);
759 mtd_debugfs_populate(mtd
);
761 device_create(&mtd_class
, mtd
->dev
.parent
, MTD_DEVT(i
) + 1, NULL
,
764 pr_debug("mtd: Giving out device %d to %s\n", i
, mtd
->name
);
765 /* No need to get a refcount on the module containing
766 the notifier, since we hold the mtd_table_mutex */
767 list_for_each_entry(not, &mtd_notifiers
, list
)
770 mutex_unlock(&mtd_table_mutex
);
772 if (of_property_read_bool(mtd_get_of_node(mtd
), "linux,rootfs")) {
773 if (IS_BUILTIN(CONFIG_MTD
)) {
774 pr_info("mtd: setting mtd%d (%s) as root device\n", mtd
->index
, mtd
->name
);
775 ROOT_DEV
= MKDEV(MTD_BLOCK_MAJOR
, mtd
->index
);
777 pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
778 mtd
->index
, mtd
->name
);
782 /* We _know_ we aren't being removed, because
783 our caller is still holding us here. So none
784 of this try_ nonsense, and no bitching about it
786 __module_get(THIS_MODULE
);
790 device_unregister(&mtd
->dev
);
792 of_node_put(mtd_get_of_node(mtd
));
793 idr_remove(&mtd_idr
, i
);
795 mutex_unlock(&mtd_table_mutex
);
800 * del_mtd_device - unregister an MTD device
801 * @mtd: pointer to MTD device info structure
803 * Remove a device from the list of MTD devices present in the system,
804 * and notify each currently active MTD 'user' of its departure.
805 * Returns zero on success or 1 on failure, which currently will happen
806 * if the requested device does not appear to be present in the list.
809 int del_mtd_device(struct mtd_info
*mtd
)
812 struct mtd_notifier
*not;
814 mutex_lock(&mtd_table_mutex
);
816 if (idr_find(&mtd_idr
, mtd
->index
) != mtd
) {
821 /* No need to get a refcount on the module containing
822 the notifier, since we hold the mtd_table_mutex */
823 list_for_each_entry(not, &mtd_notifiers
, list
)
826 kref_put(&mtd
->refcnt
, mtd_device_release
);
830 mutex_unlock(&mtd_table_mutex
);
835 * Set a few defaults based on the parent devices, if not provided by the
838 static void mtd_set_dev_defaults(struct mtd_info
*mtd
)
840 if (mtd
->dev
.parent
) {
841 if (!mtd
->owner
&& mtd
->dev
.parent
->driver
)
842 mtd
->owner
= mtd
->dev
.parent
->driver
->owner
;
844 mtd
->name
= dev_name(mtd
->dev
.parent
);
846 pr_debug("mtd device won't show a device symlink in sysfs\n");
849 INIT_LIST_HEAD(&mtd
->partitions
);
850 mutex_init(&mtd
->master
.partitions_lock
);
851 mutex_init(&mtd
->master
.chrdev_lock
);
854 static ssize_t
mtd_otp_size(struct mtd_info
*mtd
, bool is_user
)
856 struct otp_info
*info
;
862 info
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
867 ret
= mtd_get_user_prot_info(mtd
, PAGE_SIZE
, &retlen
, info
);
869 ret
= mtd_get_fact_prot_info(mtd
, PAGE_SIZE
, &retlen
, info
);
873 for (i
= 0; i
< retlen
/ sizeof(*info
); i
++)
874 size
+= info
[i
].length
;
882 /* ENODATA means there is no OTP region. */
883 return ret
== -ENODATA
? 0 : ret
;
886 static struct nvmem_device
*mtd_otp_nvmem_register(struct mtd_info
*mtd
,
887 const char *compatible
,
889 nvmem_reg_read_t reg_read
)
891 struct nvmem_device
*nvmem
= NULL
;
892 struct nvmem_config config
= {};
893 struct device_node
*np
;
895 /* DT binding is optional */
896 np
= of_get_compatible_child(mtd
->dev
.of_node
, compatible
);
898 /* OTP nvmem will be registered on the physical device */
899 config
.dev
= mtd
->dev
.parent
;
900 config
.name
= compatible
;
901 config
.id
= NVMEM_DEVID_AUTO
;
902 config
.owner
= THIS_MODULE
;
903 config
.add_legacy_fixed_of_cells
= !mtd_type_is_nand(mtd
);
904 config
.type
= NVMEM_TYPE_OTP
;
905 config
.root_only
= true;
906 config
.ignore_wp
= true;
907 config
.reg_read
= reg_read
;
912 nvmem
= nvmem_register(&config
);
913 /* Just ignore if there is no NVMEM support in the kernel */
914 if (IS_ERR(nvmem
) && PTR_ERR(nvmem
) == -EOPNOTSUPP
)
922 static int mtd_nvmem_user_otp_reg_read(void *priv
, unsigned int offset
,
923 void *val
, size_t bytes
)
925 struct mtd_info
*mtd
= priv
;
929 ret
= mtd_read_user_prot_reg(mtd
, offset
, bytes
, &retlen
, val
);
933 return retlen
== bytes
? 0 : -EIO
;
936 static int mtd_nvmem_fact_otp_reg_read(void *priv
, unsigned int offset
,
937 void *val
, size_t bytes
)
939 struct mtd_info
*mtd
= priv
;
943 ret
= mtd_read_fact_prot_reg(mtd
, offset
, bytes
, &retlen
, val
);
947 return retlen
== bytes
? 0 : -EIO
;
950 static int mtd_otp_nvmem_add(struct mtd_info
*mtd
)
952 struct device
*dev
= mtd
->dev
.parent
;
953 struct nvmem_device
*nvmem
;
957 if (mtd
->_get_user_prot_info
&& mtd
->_read_user_prot_reg
) {
958 size
= mtd_otp_size(mtd
, true);
965 nvmem
= mtd_otp_nvmem_register(mtd
, "user-otp", size
,
966 mtd_nvmem_user_otp_reg_read
);
968 err
= PTR_ERR(nvmem
);
971 mtd
->otp_user_nvmem
= nvmem
;
975 if (mtd
->_get_fact_prot_info
&& mtd
->_read_fact_prot_reg
) {
976 size
= mtd_otp_size(mtd
, false);
984 * The factory OTP contains thing such as a unique serial
985 * number and is small, so let's read it out and put it
986 * into the entropy pool.
990 otp
= kmalloc(size
, GFP_KERNEL
);
995 err
= mtd_nvmem_fact_otp_reg_read(mtd
, 0, otp
, size
);
1000 add_device_randomness(otp
, err
);
1003 nvmem
= mtd_otp_nvmem_register(mtd
, "factory-otp", size
,
1004 mtd_nvmem_fact_otp_reg_read
);
1005 if (IS_ERR(nvmem
)) {
1006 err
= PTR_ERR(nvmem
);
1009 mtd
->otp_factory_nvmem
= nvmem
;
1016 nvmem_unregister(mtd
->otp_user_nvmem
);
1017 /* Don't report error if OTP is not supported. */
1018 if (err
== -EOPNOTSUPP
)
1020 return dev_err_probe(dev
, err
, "Failed to register OTP NVMEM device\n");
1024 * mtd_device_parse_register - parse partitions and register an MTD device.
1026 * @mtd: the MTD device to register
1027 * @types: the list of MTD partition probes to try, see
1028 * 'parse_mtd_partitions()' for more information
1029 * @parser_data: MTD partition parser-specific data
1030 * @parts: fallback partition information to register, if parsing fails;
1031 * only valid if %nr_parts > %0
1032 * @nr_parts: the number of partitions in parts, if zero then the full
1033 * MTD device is registered if no partition info is found
1035 * This function aggregates MTD partitions parsing (done by
1036 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1037 * basically follows the most common pattern found in many MTD drivers:
1039 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1041 * * Then It tries to probe partitions on MTD device @mtd using parsers
1042 * specified in @types (if @types is %NULL, then the default list of parsers
1043 * is used, see 'parse_mtd_partitions()' for more information). If none are
1044 * found this functions tries to fallback to information specified in
1046 * * If no partitions were found this function just registers the MTD device
1049 * Returns zero in case of success and a negative error code in case of failure.
1051 int mtd_device_parse_register(struct mtd_info
*mtd
, const char * const *types
,
1052 struct mtd_part_parser_data
*parser_data
,
1053 const struct mtd_partition
*parts
,
1058 mtd_set_dev_defaults(mtd
);
1060 ret
= mtd_otp_nvmem_add(mtd
);
1064 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER
)) {
1065 ret
= add_mtd_device(mtd
);
1070 /* Prefer parsed partitions over driver-provided fallback */
1071 ret
= parse_mtd_partitions(mtd
, types
, parser_data
);
1072 if (ret
== -EPROBE_DEFER
)
1078 ret
= add_mtd_partitions(mtd
, parts
, nr_parts
);
1079 else if (!device_is_registered(&mtd
->dev
))
1080 ret
= add_mtd_device(mtd
);
1088 * FIXME: some drivers unfortunately call this function more than once.
1089 * So we have to check if we've already assigned the reboot notifier.
1091 * Generally, we can make multiple calls work for most cases, but it
1092 * does cause problems with parse_mtd_partitions() above (e.g.,
1093 * cmdlineparts will register partitions more than once).
1095 WARN_ONCE(mtd
->_reboot
&& mtd
->reboot_notifier
.notifier_call
,
1096 "MTD already registered\n");
1097 if (mtd
->_reboot
&& !mtd
->reboot_notifier
.notifier_call
) {
1098 mtd
->reboot_notifier
.notifier_call
= mtd_reboot_notifier
;
1099 register_reboot_notifier(&mtd
->reboot_notifier
);
1104 nvmem_unregister(mtd
->otp_user_nvmem
);
1105 nvmem_unregister(mtd
->otp_factory_nvmem
);
1108 if (ret
&& device_is_registered(&mtd
->dev
))
1109 del_mtd_device(mtd
);
1113 EXPORT_SYMBOL_GPL(mtd_device_parse_register
);
1116 * mtd_device_unregister - unregister an existing MTD device.
1118 * @master: the MTD device to unregister. This will unregister both the master
1119 * and any partitions if registered.
1121 int mtd_device_unregister(struct mtd_info
*master
)
1125 if (master
->_reboot
) {
1126 unregister_reboot_notifier(&master
->reboot_notifier
);
1127 memset(&master
->reboot_notifier
, 0, sizeof(master
->reboot_notifier
));
1130 nvmem_unregister(master
->otp_user_nvmem
);
1131 nvmem_unregister(master
->otp_factory_nvmem
);
1133 err
= del_mtd_partitions(master
);
1137 if (!device_is_registered(&master
->dev
))
1140 return del_mtd_device(master
);
1142 EXPORT_SYMBOL_GPL(mtd_device_unregister
);
1145 * register_mtd_user - register a 'user' of MTD devices.
1146 * @new: pointer to notifier info structure
1148 * Registers a pair of callbacks function to be called upon addition
1149 * or removal of MTD devices. Causes the 'add' callback to be immediately
1150 * invoked for each MTD device currently present in the system.
1152 void register_mtd_user (struct mtd_notifier
*new)
1154 struct mtd_info
*mtd
;
1156 mutex_lock(&mtd_table_mutex
);
1158 list_add(&new->list
, &mtd_notifiers
);
1160 __module_get(THIS_MODULE
);
1162 mtd_for_each_device(mtd
)
1165 mutex_unlock(&mtd_table_mutex
);
1167 EXPORT_SYMBOL_GPL(register_mtd_user
);
1170 * unregister_mtd_user - unregister a 'user' of MTD devices.
1171 * @old: pointer to notifier info structure
1173 * Removes a callback function pair from the list of 'users' to be
1174 * notified upon addition or removal of MTD devices. Causes the
1175 * 'remove' callback to be immediately invoked for each MTD device
1176 * currently present in the system.
1178 int unregister_mtd_user (struct mtd_notifier
*old
)
1180 struct mtd_info
*mtd
;
1182 mutex_lock(&mtd_table_mutex
);
1184 module_put(THIS_MODULE
);
1186 mtd_for_each_device(mtd
)
1189 list_del(&old
->list
);
1190 mutex_unlock(&mtd_table_mutex
);
1193 EXPORT_SYMBOL_GPL(unregister_mtd_user
);
1196 * get_mtd_device - obtain a validated handle for an MTD device
1197 * @mtd: last known address of the required MTD device
1198 * @num: internal device number of the required MTD device
1200 * Given a number and NULL address, return the num'th entry in the device
1201 * table, if any. Given an address and num == -1, search the device table
1202 * for a device with that address and return if it's still present. Given
1203 * both, return the num'th driver only if its address matches. Return
1204 * error code if not.
1206 struct mtd_info
*get_mtd_device(struct mtd_info
*mtd
, int num
)
1208 struct mtd_info
*ret
= NULL
, *other
;
1211 mutex_lock(&mtd_table_mutex
);
1214 mtd_for_each_device(other
) {
1220 } else if (num
>= 0) {
1221 ret
= idr_find(&mtd_idr
, num
);
1222 if (mtd
&& mtd
!= ret
)
1231 err
= __get_mtd_device(ret
);
1235 mutex_unlock(&mtd_table_mutex
);
1238 EXPORT_SYMBOL_GPL(get_mtd_device
);
1241 int __get_mtd_device(struct mtd_info
*mtd
)
1243 struct mtd_info
*master
= mtd_get_master(mtd
);
1246 if (master
->_get_device
) {
1247 err
= master
->_get_device(mtd
);
1252 if (!try_module_get(master
->owner
)) {
1253 if (master
->_put_device
)
1254 master
->_put_device(master
);
1260 kref_get(&mtd
->refcnt
);
1264 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER
))
1265 kref_get(&master
->refcnt
);
1269 EXPORT_SYMBOL_GPL(__get_mtd_device
);
1272 * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1274 * @np: device tree node
1276 struct mtd_info
*of_get_mtd_device_by_node(struct device_node
*np
)
1278 struct mtd_info
*mtd
= NULL
;
1279 struct mtd_info
*tmp
;
1282 mutex_lock(&mtd_table_mutex
);
1284 err
= -EPROBE_DEFER
;
1285 mtd_for_each_device(tmp
) {
1286 if (mtd_get_of_node(tmp
) == np
) {
1288 err
= __get_mtd_device(mtd
);
1293 mutex_unlock(&mtd_table_mutex
);
1295 return err
? ERR_PTR(err
) : mtd
;
1297 EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node
);
1300 * get_mtd_device_nm - obtain a validated handle for an MTD device by
1302 * @name: MTD device name to open
1304 * This function returns MTD device description structure in case of
1305 * success and an error code in case of failure.
1307 struct mtd_info
*get_mtd_device_nm(const char *name
)
1310 struct mtd_info
*mtd
= NULL
, *other
;
1312 mutex_lock(&mtd_table_mutex
);
1314 mtd_for_each_device(other
) {
1315 if (!strcmp(name
, other
->name
)) {
1324 err
= __get_mtd_device(mtd
);
1328 mutex_unlock(&mtd_table_mutex
);
1332 mutex_unlock(&mtd_table_mutex
);
1333 return ERR_PTR(err
);
1335 EXPORT_SYMBOL_GPL(get_mtd_device_nm
);
1337 void put_mtd_device(struct mtd_info
*mtd
)
1339 mutex_lock(&mtd_table_mutex
);
1340 __put_mtd_device(mtd
);
1341 mutex_unlock(&mtd_table_mutex
);
1344 EXPORT_SYMBOL_GPL(put_mtd_device
);
1346 void __put_mtd_device(struct mtd_info
*mtd
)
1348 struct mtd_info
*master
= mtd_get_master(mtd
);
1351 /* kref_put() can relese mtd, so keep a reference mtd->parent */
1352 struct mtd_info
*parent
= mtd
->parent
;
1355 kref_put(&mtd
->refcnt
, mtd_device_release
);
1359 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER
))
1360 kref_put(&master
->refcnt
, mtd_device_release
);
1362 module_put(master
->owner
);
1364 /* must be the last as master can be freed in the _put_device */
1365 if (master
->_put_device
)
1366 master
->_put_device(master
);
1368 EXPORT_SYMBOL_GPL(__put_mtd_device
);
1371 * Erase is an synchronous operation. Device drivers are epected to return a
1372 * negative error code if the operation failed and update instr->fail_addr
1373 * to point the portion that was not properly erased.
1375 int mtd_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
1377 struct mtd_info
*master
= mtd_get_master(mtd
);
1378 u64 mst_ofs
= mtd_get_master_ofs(mtd
, 0);
1379 struct erase_info adjinstr
;
1382 instr
->fail_addr
= MTD_FAIL_ADDR_UNKNOWN
;
1385 if (!mtd
->erasesize
|| !master
->_erase
)
1388 if (instr
->addr
>= mtd
->size
|| instr
->len
> mtd
->size
- instr
->addr
)
1390 if (!(mtd
->flags
& MTD_WRITEABLE
))
1396 ledtrig_mtd_activity();
1398 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
) {
1399 adjinstr
.addr
= (loff_t
)mtd_div_by_eb(instr
->addr
, mtd
) *
1401 adjinstr
.len
= ((u64
)mtd_div_by_eb(instr
->addr
+ instr
->len
, mtd
) *
1402 master
->erasesize
) -
1406 adjinstr
.addr
+= mst_ofs
;
1408 ret
= master
->_erase(master
, &adjinstr
);
1410 if (adjinstr
.fail_addr
!= MTD_FAIL_ADDR_UNKNOWN
) {
1411 instr
->fail_addr
= adjinstr
.fail_addr
- mst_ofs
;
1412 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
) {
1413 instr
->fail_addr
= mtd_div_by_eb(instr
->fail_addr
,
1415 instr
->fail_addr
*= mtd
->erasesize
;
1421 EXPORT_SYMBOL_GPL(mtd_erase
);
1422 ALLOW_ERROR_INJECTION(mtd_erase
, ERRNO
);
1425 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1427 int mtd_point(struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
,
1428 void **virt
, resource_size_t
*phys
)
1430 struct mtd_info
*master
= mtd_get_master(mtd
);
1436 if (!master
->_point
)
1438 if (from
< 0 || from
>= mtd
->size
|| len
> mtd
->size
- from
)
1443 from
= mtd_get_master_ofs(mtd
, from
);
1444 return master
->_point(master
, from
, len
, retlen
, virt
, phys
);
1446 EXPORT_SYMBOL_GPL(mtd_point
);
1448 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1449 int mtd_unpoint(struct mtd_info
*mtd
, loff_t from
, size_t len
)
1451 struct mtd_info
*master
= mtd_get_master(mtd
);
1453 if (!master
->_unpoint
)
1455 if (from
< 0 || from
>= mtd
->size
|| len
> mtd
->size
- from
)
1459 return master
->_unpoint(master
, mtd_get_master_ofs(mtd
, from
), len
);
1461 EXPORT_SYMBOL_GPL(mtd_unpoint
);
1464 * Allow NOMMU mmap() to directly map the device (if not NULL)
1465 * - return the address to which the offset maps
1466 * - return -ENOSYS to indicate refusal to do the mapping
1468 unsigned long mtd_get_unmapped_area(struct mtd_info
*mtd
, unsigned long len
,
1469 unsigned long offset
, unsigned long flags
)
1475 ret
= mtd_point(mtd
, offset
, len
, &retlen
, &virt
, NULL
);
1478 if (retlen
!= len
) {
1479 mtd_unpoint(mtd
, offset
, retlen
);
1482 return (unsigned long)virt
;
1484 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area
);
1486 static void mtd_update_ecc_stats(struct mtd_info
*mtd
, struct mtd_info
*master
,
1487 const struct mtd_ecc_stats
*old_stats
)
1489 struct mtd_ecc_stats diff
;
1494 diff
= master
->ecc_stats
;
1495 diff
.failed
-= old_stats
->failed
;
1496 diff
.corrected
-= old_stats
->corrected
;
1498 while (mtd
->parent
) {
1499 mtd
->ecc_stats
.failed
+= diff
.failed
;
1500 mtd
->ecc_stats
.corrected
+= diff
.corrected
;
1505 int mtd_read(struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
,
1508 struct mtd_oob_ops ops
= {
1514 ret
= mtd_read_oob(mtd
, from
, &ops
);
1515 *retlen
= ops
.retlen
;
1517 WARN_ON_ONCE(*retlen
!= len
&& mtd_is_bitflip_or_eccerr(ret
));
1521 EXPORT_SYMBOL_GPL(mtd_read
);
1522 ALLOW_ERROR_INJECTION(mtd_read
, ERRNO
);
1524 int mtd_write(struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
,
1527 struct mtd_oob_ops ops
= {
1529 .datbuf
= (u8
*)buf
,
1533 ret
= mtd_write_oob(mtd
, to
, &ops
);
1534 *retlen
= ops
.retlen
;
1538 EXPORT_SYMBOL_GPL(mtd_write
);
1539 ALLOW_ERROR_INJECTION(mtd_write
, ERRNO
);
1542 * In blackbox flight recorder like scenarios we want to make successful writes
1543 * in interrupt context. panic_write() is only intended to be called when its
1544 * known the kernel is about to panic and we need the write to succeed. Since
1545 * the kernel is not going to be running for much longer, this function can
1546 * break locks and delay to ensure the write succeeds (but not sleep).
1548 int mtd_panic_write(struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
,
1551 struct mtd_info
*master
= mtd_get_master(mtd
);
1554 if (!master
->_panic_write
)
1556 if (to
< 0 || to
>= mtd
->size
|| len
> mtd
->size
- to
)
1558 if (!(mtd
->flags
& MTD_WRITEABLE
))
1562 if (!master
->oops_panic_write
)
1563 master
->oops_panic_write
= true;
1565 return master
->_panic_write(master
, mtd_get_master_ofs(mtd
, to
), len
,
1568 EXPORT_SYMBOL_GPL(mtd_panic_write
);
1570 static int mtd_check_oob_ops(struct mtd_info
*mtd
, loff_t offs
,
1571 struct mtd_oob_ops
*ops
)
1574 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1575 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1584 if (offs
< 0 || offs
+ ops
->len
> mtd
->size
)
1590 if (ops
->ooboffs
>= mtd_oobavail(mtd
, ops
))
1593 maxooblen
= ((size_t)(mtd_div_by_ws(mtd
->size
, mtd
) -
1594 mtd_div_by_ws(offs
, mtd
)) *
1595 mtd_oobavail(mtd
, ops
)) - ops
->ooboffs
;
1596 if (ops
->ooblen
> maxooblen
)
1603 static int mtd_read_oob_std(struct mtd_info
*mtd
, loff_t from
,
1604 struct mtd_oob_ops
*ops
)
1606 struct mtd_info
*master
= mtd_get_master(mtd
);
1609 from
= mtd_get_master_ofs(mtd
, from
);
1610 if (master
->_read_oob
)
1611 ret
= master
->_read_oob(master
, from
, ops
);
1613 ret
= master
->_read(master
, from
, ops
->len
, &ops
->retlen
,
1619 static int mtd_write_oob_std(struct mtd_info
*mtd
, loff_t to
,
1620 struct mtd_oob_ops
*ops
)
1622 struct mtd_info
*master
= mtd_get_master(mtd
);
1625 to
= mtd_get_master_ofs(mtd
, to
);
1626 if (master
->_write_oob
)
1627 ret
= master
->_write_oob(master
, to
, ops
);
1629 ret
= master
->_write(master
, to
, ops
->len
, &ops
->retlen
,
1635 static int mtd_io_emulated_slc(struct mtd_info
*mtd
, loff_t start
, bool read
,
1636 struct mtd_oob_ops
*ops
)
1638 struct mtd_info
*master
= mtd_get_master(mtd
);
1639 int ngroups
= mtd_pairing_groups(master
);
1640 int npairs
= mtd_wunit_per_eb(master
) / ngroups
;
1641 struct mtd_oob_ops adjops
= *ops
;
1642 unsigned int wunit
, oobavail
;
1643 struct mtd_pairing_info info
;
1644 int max_bitflips
= 0;
1648 ebofs
= mtd_mod_by_eb(start
, mtd
);
1649 base
= (loff_t
)mtd_div_by_eb(start
, mtd
) * master
->erasesize
;
1651 info
.pair
= mtd_div_by_ws(ebofs
, mtd
);
1652 pageofs
= mtd_mod_by_ws(ebofs
, mtd
);
1653 oobavail
= mtd_oobavail(mtd
, ops
);
1655 while (ops
->retlen
< ops
->len
|| ops
->oobretlen
< ops
->ooblen
) {
1658 if (info
.pair
>= npairs
) {
1660 base
+= master
->erasesize
;
1663 wunit
= mtd_pairing_info_to_wunit(master
, &info
);
1664 pos
= mtd_wunit_to_offset(mtd
, base
, wunit
);
1666 adjops
.len
= ops
->len
- ops
->retlen
;
1667 if (adjops
.len
> mtd
->writesize
- pageofs
)
1668 adjops
.len
= mtd
->writesize
- pageofs
;
1670 adjops
.ooblen
= ops
->ooblen
- ops
->oobretlen
;
1671 if (adjops
.ooblen
> oobavail
- adjops
.ooboffs
)
1672 adjops
.ooblen
= oobavail
- adjops
.ooboffs
;
1675 ret
= mtd_read_oob_std(mtd
, pos
+ pageofs
, &adjops
);
1677 max_bitflips
= max(max_bitflips
, ret
);
1679 ret
= mtd_write_oob_std(mtd
, pos
+ pageofs
, &adjops
);
1685 max_bitflips
= max(max_bitflips
, ret
);
1686 ops
->retlen
+= adjops
.retlen
;
1687 ops
->oobretlen
+= adjops
.oobretlen
;
1688 adjops
.datbuf
+= adjops
.retlen
;
1689 adjops
.oobbuf
+= adjops
.oobretlen
;
1695 return max_bitflips
;
1698 int mtd_read_oob(struct mtd_info
*mtd
, loff_t from
, struct mtd_oob_ops
*ops
)
1700 struct mtd_info
*master
= mtd_get_master(mtd
);
1701 struct mtd_ecc_stats old_stats
= master
->ecc_stats
;
1704 ops
->retlen
= ops
->oobretlen
= 0;
1706 ret_code
= mtd_check_oob_ops(mtd
, from
, ops
);
1710 ledtrig_mtd_activity();
1712 /* Check the validity of a potential fallback on mtd->_read */
1713 if (!master
->_read_oob
&& (!master
->_read
|| ops
->oobbuf
))
1717 memset(ops
->stats
, 0, sizeof(*ops
->stats
));
1719 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
)
1720 ret_code
= mtd_io_emulated_slc(mtd
, from
, true, ops
);
1722 ret_code
= mtd_read_oob_std(mtd
, from
, ops
);
1724 mtd_update_ecc_stats(mtd
, master
, &old_stats
);
1727 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1728 * similar to mtd->_read(), returning a non-negative integer
1729 * representing max bitflips. In other cases, mtd->_read_oob() may
1730 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1732 if (unlikely(ret_code
< 0))
1734 if (mtd
->ecc_strength
== 0)
1735 return 0; /* device lacks ecc */
1737 ops
->stats
->max_bitflips
= ret_code
;
1738 return ret_code
>= mtd
->bitflip_threshold
? -EUCLEAN
: 0;
1740 EXPORT_SYMBOL_GPL(mtd_read_oob
);
1742 int mtd_write_oob(struct mtd_info
*mtd
, loff_t to
,
1743 struct mtd_oob_ops
*ops
)
1745 struct mtd_info
*master
= mtd_get_master(mtd
);
1748 ops
->retlen
= ops
->oobretlen
= 0;
1750 if (!(mtd
->flags
& MTD_WRITEABLE
))
1753 ret
= mtd_check_oob_ops(mtd
, to
, ops
);
1757 ledtrig_mtd_activity();
1759 /* Check the validity of a potential fallback on mtd->_write */
1760 if (!master
->_write_oob
&& (!master
->_write
|| ops
->oobbuf
))
1763 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
)
1764 return mtd_io_emulated_slc(mtd
, to
, false, ops
);
1766 return mtd_write_oob_std(mtd
, to
, ops
);
1768 EXPORT_SYMBOL_GPL(mtd_write_oob
);
1771 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1772 * @mtd: MTD device structure
1773 * @section: ECC section. Depending on the layout you may have all the ECC
1774 * bytes stored in a single contiguous section, or one section
1775 * per ECC chunk (and sometime several sections for a single ECC
1777 * @oobecc: OOB region struct filled with the appropriate ECC position
1780 * This function returns ECC section information in the OOB area. If you want
1781 * to get all the ECC bytes information, then you should call
1782 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1784 * Returns zero on success, a negative error code otherwise.
1786 int mtd_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
1787 struct mtd_oob_region
*oobecc
)
1789 struct mtd_info
*master
= mtd_get_master(mtd
);
1791 memset(oobecc
, 0, sizeof(*oobecc
));
1793 if (!master
|| section
< 0)
1796 if (!master
->ooblayout
|| !master
->ooblayout
->ecc
)
1799 return master
->ooblayout
->ecc(master
, section
, oobecc
);
1801 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc
);
1804 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1806 * @mtd: MTD device structure
1807 * @section: Free section you are interested in. Depending on the layout
1808 * you may have all the free bytes stored in a single contiguous
1809 * section, or one section per ECC chunk plus an extra section
1810 * for the remaining bytes (or other funky layout).
1811 * @oobfree: OOB region struct filled with the appropriate free position
1814 * This function returns free bytes position in the OOB area. If you want
1815 * to get all the free bytes information, then you should call
1816 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1818 * Returns zero on success, a negative error code otherwise.
1820 int mtd_ooblayout_free(struct mtd_info
*mtd
, int section
,
1821 struct mtd_oob_region
*oobfree
)
1823 struct mtd_info
*master
= mtd_get_master(mtd
);
1825 memset(oobfree
, 0, sizeof(*oobfree
));
1827 if (!master
|| section
< 0)
1830 if (!master
->ooblayout
|| !master
->ooblayout
->free
)
1833 return master
->ooblayout
->free(master
, section
, oobfree
);
1835 EXPORT_SYMBOL_GPL(mtd_ooblayout_free
);
1838 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1839 * @mtd: mtd info structure
1840 * @byte: the byte we are searching for
1841 * @sectionp: pointer where the section id will be stored
1842 * @oobregion: used to retrieve the ECC position
1843 * @iter: iterator function. Should be either mtd_ooblayout_free or
1844 * mtd_ooblayout_ecc depending on the region type you're searching for
1846 * This function returns the section id and oobregion information of a
1847 * specific byte. For example, say you want to know where the 4th ECC byte is
1848 * stored, you'll use:
1850 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
1852 * Returns zero on success, a negative error code otherwise.
1854 static int mtd_ooblayout_find_region(struct mtd_info
*mtd
, int byte
,
1855 int *sectionp
, struct mtd_oob_region
*oobregion
,
1856 int (*iter
)(struct mtd_info
*,
1858 struct mtd_oob_region
*oobregion
))
1860 int pos
= 0, ret
, section
= 0;
1862 memset(oobregion
, 0, sizeof(*oobregion
));
1865 ret
= iter(mtd
, section
, oobregion
);
1869 if (pos
+ oobregion
->length
> byte
)
1872 pos
+= oobregion
->length
;
1877 * Adjust region info to make it start at the beginning at the
1880 oobregion
->offset
+= byte
- pos
;
1881 oobregion
->length
-= byte
- pos
;
1882 *sectionp
= section
;
1888 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1890 * @mtd: mtd info structure
1891 * @eccbyte: the byte we are searching for
1892 * @section: pointer where the section id will be stored
1893 * @oobregion: OOB region information
1895 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1898 * Returns zero on success, a negative error code otherwise.
1900 int mtd_ooblayout_find_eccregion(struct mtd_info
*mtd
, int eccbyte
,
1902 struct mtd_oob_region
*oobregion
)
1904 return mtd_ooblayout_find_region(mtd
, eccbyte
, section
, oobregion
,
1907 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion
);
1910 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1911 * @mtd: mtd info structure
1912 * @buf: destination buffer to store OOB bytes
1913 * @oobbuf: OOB buffer
1914 * @start: first byte to retrieve
1915 * @nbytes: number of bytes to retrieve
1916 * @iter: section iterator
1918 * Extract bytes attached to a specific category (ECC or free)
1919 * from the OOB buffer and copy them into buf.
1921 * Returns zero on success, a negative error code otherwise.
1923 static int mtd_ooblayout_get_bytes(struct mtd_info
*mtd
, u8
*buf
,
1924 const u8
*oobbuf
, int start
, int nbytes
,
1925 int (*iter
)(struct mtd_info
*,
1927 struct mtd_oob_region
*oobregion
))
1929 struct mtd_oob_region oobregion
;
1932 ret
= mtd_ooblayout_find_region(mtd
, start
, §ion
,
1938 cnt
= min_t(int, nbytes
, oobregion
.length
);
1939 memcpy(buf
, oobbuf
+ oobregion
.offset
, cnt
);
1946 ret
= iter(mtd
, ++section
, &oobregion
);
1953 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1954 * @mtd: mtd info structure
1955 * @buf: source buffer to get OOB bytes from
1956 * @oobbuf: OOB buffer
1957 * @start: first OOB byte to set
1958 * @nbytes: number of OOB bytes to set
1959 * @iter: section iterator
1961 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1962 * is selected by passing the appropriate iterator.
1964 * Returns zero on success, a negative error code otherwise.
1966 static int mtd_ooblayout_set_bytes(struct mtd_info
*mtd
, const u8
*buf
,
1967 u8
*oobbuf
, int start
, int nbytes
,
1968 int (*iter
)(struct mtd_info
*,
1970 struct mtd_oob_region
*oobregion
))
1972 struct mtd_oob_region oobregion
;
1975 ret
= mtd_ooblayout_find_region(mtd
, start
, §ion
,
1981 cnt
= min_t(int, nbytes
, oobregion
.length
);
1982 memcpy(oobbuf
+ oobregion
.offset
, buf
, cnt
);
1989 ret
= iter(mtd
, ++section
, &oobregion
);
1996 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1997 * @mtd: mtd info structure
1998 * @iter: category iterator
2000 * Count the number of bytes in a given category.
2002 * Returns a positive value on success, a negative error code otherwise.
2004 static int mtd_ooblayout_count_bytes(struct mtd_info
*mtd
,
2005 int (*iter
)(struct mtd_info
*,
2007 struct mtd_oob_region
*oobregion
))
2009 struct mtd_oob_region oobregion
;
2010 int section
= 0, ret
, nbytes
= 0;
2013 ret
= iter(mtd
, section
++, &oobregion
);
2020 nbytes
+= oobregion
.length
;
2027 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
2028 * @mtd: mtd info structure
2029 * @eccbuf: destination buffer to store ECC bytes
2030 * @oobbuf: OOB buffer
2031 * @start: first ECC byte to retrieve
2032 * @nbytes: number of ECC bytes to retrieve
2034 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
2036 * Returns zero on success, a negative error code otherwise.
2038 int mtd_ooblayout_get_eccbytes(struct mtd_info
*mtd
, u8
*eccbuf
,
2039 const u8
*oobbuf
, int start
, int nbytes
)
2041 return mtd_ooblayout_get_bytes(mtd
, eccbuf
, oobbuf
, start
, nbytes
,
2044 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes
);
2047 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
2048 * @mtd: mtd info structure
2049 * @eccbuf: source buffer to get ECC bytes from
2050 * @oobbuf: OOB buffer
2051 * @start: first ECC byte to set
2052 * @nbytes: number of ECC bytes to set
2054 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2056 * Returns zero on success, a negative error code otherwise.
2058 int mtd_ooblayout_set_eccbytes(struct mtd_info
*mtd
, const u8
*eccbuf
,
2059 u8
*oobbuf
, int start
, int nbytes
)
2061 return mtd_ooblayout_set_bytes(mtd
, eccbuf
, oobbuf
, start
, nbytes
,
2064 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes
);
2067 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2068 * @mtd: mtd info structure
2069 * @databuf: destination buffer to store ECC bytes
2070 * @oobbuf: OOB buffer
2071 * @start: first ECC byte to retrieve
2072 * @nbytes: number of ECC bytes to retrieve
2074 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2076 * Returns zero on success, a negative error code otherwise.
2078 int mtd_ooblayout_get_databytes(struct mtd_info
*mtd
, u8
*databuf
,
2079 const u8
*oobbuf
, int start
, int nbytes
)
2081 return mtd_ooblayout_get_bytes(mtd
, databuf
, oobbuf
, start
, nbytes
,
2082 mtd_ooblayout_free
);
2084 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes
);
2087 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2088 * @mtd: mtd info structure
2089 * @databuf: source buffer to get data bytes from
2090 * @oobbuf: OOB buffer
2091 * @start: first ECC byte to set
2092 * @nbytes: number of ECC bytes to set
2094 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2096 * Returns zero on success, a negative error code otherwise.
2098 int mtd_ooblayout_set_databytes(struct mtd_info
*mtd
, const u8
*databuf
,
2099 u8
*oobbuf
, int start
, int nbytes
)
2101 return mtd_ooblayout_set_bytes(mtd
, databuf
, oobbuf
, start
, nbytes
,
2102 mtd_ooblayout_free
);
2104 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes
);
2107 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2108 * @mtd: mtd info structure
2110 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2112 * Returns zero on success, a negative error code otherwise.
2114 int mtd_ooblayout_count_freebytes(struct mtd_info
*mtd
)
2116 return mtd_ooblayout_count_bytes(mtd
, mtd_ooblayout_free
);
2118 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes
);
2121 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2122 * @mtd: mtd info structure
2124 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2126 * Returns zero on success, a negative error code otherwise.
2128 int mtd_ooblayout_count_eccbytes(struct mtd_info
*mtd
)
2130 return mtd_ooblayout_count_bytes(mtd
, mtd_ooblayout_ecc
);
2132 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes
);
2135 * Method to access the protection register area, present in some flash
2136 * devices. The user data is one time programmable but the factory data is read
2139 int mtd_get_fact_prot_info(struct mtd_info
*mtd
, size_t len
, size_t *retlen
,
2140 struct otp_info
*buf
)
2142 struct mtd_info
*master
= mtd_get_master(mtd
);
2144 if (!master
->_get_fact_prot_info
)
2148 return master
->_get_fact_prot_info(master
, len
, retlen
, buf
);
2150 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info
);
2152 int mtd_read_fact_prot_reg(struct mtd_info
*mtd
, loff_t from
, size_t len
,
2153 size_t *retlen
, u_char
*buf
)
2155 struct mtd_info
*master
= mtd_get_master(mtd
);
2158 if (!master
->_read_fact_prot_reg
)
2162 return master
->_read_fact_prot_reg(master
, from
, len
, retlen
, buf
);
2164 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg
);
2166 int mtd_get_user_prot_info(struct mtd_info
*mtd
, size_t len
, size_t *retlen
,
2167 struct otp_info
*buf
)
2169 struct mtd_info
*master
= mtd_get_master(mtd
);
2171 if (!master
->_get_user_prot_info
)
2175 return master
->_get_user_prot_info(master
, len
, retlen
, buf
);
2177 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info
);
2179 int mtd_read_user_prot_reg(struct mtd_info
*mtd
, loff_t from
, size_t len
,
2180 size_t *retlen
, u_char
*buf
)
2182 struct mtd_info
*master
= mtd_get_master(mtd
);
2185 if (!master
->_read_user_prot_reg
)
2189 return master
->_read_user_prot_reg(master
, from
, len
, retlen
, buf
);
2191 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg
);
2193 int mtd_write_user_prot_reg(struct mtd_info
*mtd
, loff_t to
, size_t len
,
2194 size_t *retlen
, const u_char
*buf
)
2196 struct mtd_info
*master
= mtd_get_master(mtd
);
2200 if (!master
->_write_user_prot_reg
)
2204 ret
= master
->_write_user_prot_reg(master
, to
, len
, retlen
, buf
);
2209 * If no data could be written at all, we are out of memory and
2210 * must return -ENOSPC.
2212 return (*retlen
) ? 0 : -ENOSPC
;
2214 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg
);
2216 int mtd_lock_user_prot_reg(struct mtd_info
*mtd
, loff_t from
, size_t len
)
2218 struct mtd_info
*master
= mtd_get_master(mtd
);
2220 if (!master
->_lock_user_prot_reg
)
2224 return master
->_lock_user_prot_reg(master
, from
, len
);
2226 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg
);
2228 int mtd_erase_user_prot_reg(struct mtd_info
*mtd
, loff_t from
, size_t len
)
2230 struct mtd_info
*master
= mtd_get_master(mtd
);
2232 if (!master
->_erase_user_prot_reg
)
2236 return master
->_erase_user_prot_reg(master
, from
, len
);
2238 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg
);
2240 /* Chip-supported device locking */
2241 int mtd_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
2243 struct mtd_info
*master
= mtd_get_master(mtd
);
2247 if (ofs
< 0 || ofs
>= mtd
->size
|| len
> mtd
->size
- ofs
)
2252 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
) {
2253 ofs
= (loff_t
)mtd_div_by_eb(ofs
, mtd
) * master
->erasesize
;
2254 len
= (u64
)mtd_div_by_eb(len
, mtd
) * master
->erasesize
;
2257 return master
->_lock(master
, mtd_get_master_ofs(mtd
, ofs
), len
);
2259 EXPORT_SYMBOL_GPL(mtd_lock
);
2261 int mtd_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
2263 struct mtd_info
*master
= mtd_get_master(mtd
);
2265 if (!master
->_unlock
)
2267 if (ofs
< 0 || ofs
>= mtd
->size
|| len
> mtd
->size
- ofs
)
2272 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
) {
2273 ofs
= (loff_t
)mtd_div_by_eb(ofs
, mtd
) * master
->erasesize
;
2274 len
= (u64
)mtd_div_by_eb(len
, mtd
) * master
->erasesize
;
2277 return master
->_unlock(master
, mtd_get_master_ofs(mtd
, ofs
), len
);
2279 EXPORT_SYMBOL_GPL(mtd_unlock
);
2281 int mtd_is_locked(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
2283 struct mtd_info
*master
= mtd_get_master(mtd
);
2285 if (!master
->_is_locked
)
2287 if (ofs
< 0 || ofs
>= mtd
->size
|| len
> mtd
->size
- ofs
)
2292 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
) {
2293 ofs
= (loff_t
)mtd_div_by_eb(ofs
, mtd
) * master
->erasesize
;
2294 len
= (u64
)mtd_div_by_eb(len
, mtd
) * master
->erasesize
;
2297 return master
->_is_locked(master
, mtd_get_master_ofs(mtd
, ofs
), len
);
2299 EXPORT_SYMBOL_GPL(mtd_is_locked
);
2301 int mtd_block_isreserved(struct mtd_info
*mtd
, loff_t ofs
)
2303 struct mtd_info
*master
= mtd_get_master(mtd
);
2305 if (ofs
< 0 || ofs
>= mtd
->size
)
2307 if (!master
->_block_isreserved
)
2310 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
)
2311 ofs
= (loff_t
)mtd_div_by_eb(ofs
, mtd
) * master
->erasesize
;
2313 return master
->_block_isreserved(master
, mtd_get_master_ofs(mtd
, ofs
));
2315 EXPORT_SYMBOL_GPL(mtd_block_isreserved
);
2317 int mtd_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
2319 struct mtd_info
*master
= mtd_get_master(mtd
);
2321 if (ofs
< 0 || ofs
>= mtd
->size
)
2323 if (!master
->_block_isbad
)
2326 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
)
2327 ofs
= (loff_t
)mtd_div_by_eb(ofs
, mtd
) * master
->erasesize
;
2329 return master
->_block_isbad(master
, mtd_get_master_ofs(mtd
, ofs
));
2331 EXPORT_SYMBOL_GPL(mtd_block_isbad
);
2333 int mtd_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
2335 struct mtd_info
*master
= mtd_get_master(mtd
);
2338 if (!master
->_block_markbad
)
2340 if (ofs
< 0 || ofs
>= mtd
->size
)
2342 if (!(mtd
->flags
& MTD_WRITEABLE
))
2345 if (mtd
->flags
& MTD_SLC_ON_MLC_EMULATION
)
2346 ofs
= (loff_t
)mtd_div_by_eb(ofs
, mtd
) * master
->erasesize
;
2348 ret
= master
->_block_markbad(master
, mtd_get_master_ofs(mtd
, ofs
));
2352 while (mtd
->parent
) {
2353 mtd
->ecc_stats
.badblocks
++;
2359 EXPORT_SYMBOL_GPL(mtd_block_markbad
);
2360 ALLOW_ERROR_INJECTION(mtd_block_markbad
, ERRNO
);
2363 * default_mtd_writev - the default writev method
2364 * @mtd: mtd device description object pointer
2365 * @vecs: the vectors to write
2366 * @count: count of vectors in @vecs
2367 * @to: the MTD device offset to write to
2368 * @retlen: on exit contains the count of bytes written to the MTD device.
2370 * This function returns zero in case of success and a negative error code in
2373 static int default_mtd_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
2374 unsigned long count
, loff_t to
, size_t *retlen
)
2377 size_t totlen
= 0, thislen
;
2380 for (i
= 0; i
< count
; i
++) {
2381 if (!vecs
[i
].iov_len
)
2383 ret
= mtd_write(mtd
, to
, vecs
[i
].iov_len
, &thislen
,
2386 if (ret
|| thislen
!= vecs
[i
].iov_len
)
2388 to
+= vecs
[i
].iov_len
;
2395 * mtd_writev - the vector-based MTD write method
2396 * @mtd: mtd device description object pointer
2397 * @vecs: the vectors to write
2398 * @count: count of vectors in @vecs
2399 * @to: the MTD device offset to write to
2400 * @retlen: on exit contains the count of bytes written to the MTD device.
2402 * This function returns zero in case of success and a negative error code in
2405 int mtd_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
2406 unsigned long count
, loff_t to
, size_t *retlen
)
2408 struct mtd_info
*master
= mtd_get_master(mtd
);
2411 if (!(mtd
->flags
& MTD_WRITEABLE
))
2414 if (!master
->_writev
)
2415 return default_mtd_writev(mtd
, vecs
, count
, to
, retlen
);
2417 return master
->_writev(master
, vecs
, count
,
2418 mtd_get_master_ofs(mtd
, to
), retlen
);
2420 EXPORT_SYMBOL_GPL(mtd_writev
);
2423 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2424 * @mtd: mtd device description object pointer
2425 * @size: a pointer to the ideal or maximum size of the allocation, points
2426 * to the actual allocation size on success.
2428 * This routine attempts to allocate a contiguous kernel buffer up to
2429 * the specified size, backing off the size of the request exponentially
2430 * until the request succeeds or until the allocation size falls below
2431 * the system page size. This attempts to make sure it does not adversely
2432 * impact system performance, so when allocating more than one page, we
2433 * ask the memory allocator to avoid re-trying, swapping, writing back
2434 * or performing I/O.
2436 * Note, this function also makes sure that the allocated buffer is aligned to
2437 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2439 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2440 * to handle smaller (i.e. degraded) buffer allocations under low- or
2441 * fragmented-memory situations where such reduced allocations, from a
2442 * requested ideal, are allowed.
2444 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2446 void *mtd_kmalloc_up_to(const struct mtd_info
*mtd
, size_t *size
)
2448 gfp_t flags
= __GFP_NOWARN
| __GFP_DIRECT_RECLAIM
| __GFP_NORETRY
;
2449 size_t min_alloc
= max_t(size_t, mtd
->writesize
, PAGE_SIZE
);
2452 *size
= min_t(size_t, *size
, KMALLOC_MAX_SIZE
);
2454 while (*size
> min_alloc
) {
2455 kbuf
= kmalloc(*size
, flags
);
2460 *size
= ALIGN(*size
, mtd
->writesize
);
2464 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2465 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2467 return kmalloc(*size
, GFP_KERNEL
);
2469 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to
);
2471 #ifdef CONFIG_PROC_FS
2473 /*====================================================================*/
2474 /* Support for /proc/mtd */
2476 static int mtd_proc_show(struct seq_file
*m
, void *v
)
2478 struct mtd_info
*mtd
;
2480 seq_puts(m
, "dev: size erasesize name\n");
2481 mutex_lock(&mtd_table_mutex
);
2482 mtd_for_each_device(mtd
) {
2483 seq_printf(m
, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2484 mtd
->index
, (unsigned long long)mtd
->size
,
2485 mtd
->erasesize
, mtd
->name
);
2487 mutex_unlock(&mtd_table_mutex
);
2490 #endif /* CONFIG_PROC_FS */
2492 /*====================================================================*/
2495 static struct backing_dev_info
* __init
mtd_bdi_init(const char *name
)
2497 struct backing_dev_info
*bdi
;
2500 bdi
= bdi_alloc(NUMA_NO_NODE
);
2502 return ERR_PTR(-ENOMEM
);
2507 * We put '-0' suffix to the name to get the same name format as we
2508 * used to get. Since this is called only once, we get a unique name.
2510 ret
= bdi_register(bdi
, "%.28s-0", name
);
2514 return ret
? ERR_PTR(ret
) : bdi
;
2517 static struct proc_dir_entry
*proc_mtd
;
2519 static int __init
init_mtd(void)
2523 ret
= class_register(&mtd_class
);
2527 mtd_bdi
= mtd_bdi_init("mtd");
2528 if (IS_ERR(mtd_bdi
)) {
2529 ret
= PTR_ERR(mtd_bdi
);
2533 proc_mtd
= proc_create_single("mtd", 0, NULL
, mtd_proc_show
);
2535 ret
= init_mtdchar();
2539 dfs_dir_mtd
= debugfs_create_dir("mtd", NULL
);
2540 debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd
,
2541 &mtd_expert_analysis_mode
);
2547 remove_proc_entry("mtd", NULL
);
2548 bdi_unregister(mtd_bdi
);
2551 class_unregister(&mtd_class
);
2553 pr_err("Error registering mtd class or bdi: %d\n", ret
);
2557 static void __exit
cleanup_mtd(void)
2559 debugfs_remove_recursive(dfs_dir_mtd
);
2562 remove_proc_entry("mtd", NULL
);
2563 class_unregister(&mtd_class
);
2564 bdi_unregister(mtd_bdi
);
2566 idr_destroy(&mtd_idr
);
2569 module_init(init_mtd
);
2570 module_exit(cleanup_mtd
);
2572 MODULE_LICENSE("GPL");
2573 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2574 MODULE_DESCRIPTION("Core MTD registration and access routines");