2 * Simple MTD partitioning layer
4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/kmod.h>
30 #include <linux/mtd/mtd.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/err.h>
36 /* Our partition linked list */
37 static LIST_HEAD(mtd_partitions
);
38 static DEFINE_MUTEX(mtd_partitions_mutex
);
41 * struct mtd_part - our partition node structure
43 * @mtd: struct holding partition details
44 * @parent: parent mtd - flash device or another partition
45 * @offset: partition offset relative to the *flash device*
49 struct mtd_info
*parent
;
51 struct list_head list
;
55 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
56 * the pointer to that structure.
58 static inline struct mtd_part
*mtd_to_part(const struct mtd_info
*mtd
)
60 return container_of(mtd
, struct mtd_part
, mtd
);
65 * MTD methods which simply translate the effective address and pass through
66 * to the _real_ device.
69 static int part_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
70 size_t *retlen
, u_char
*buf
)
72 struct mtd_part
*part
= mtd_to_part(mtd
);
73 struct mtd_ecc_stats stats
;
76 stats
= part
->parent
->ecc_stats
;
77 res
= part
->parent
->_read(part
->parent
, from
+ part
->offset
, len
,
79 if (unlikely(mtd_is_eccerr(res
)))
80 mtd
->ecc_stats
.failed
+=
81 part
->parent
->ecc_stats
.failed
- stats
.failed
;
83 mtd
->ecc_stats
.corrected
+=
84 part
->parent
->ecc_stats
.corrected
- stats
.corrected
;
88 static int part_point(struct mtd_info
*mtd
, loff_t from
, size_t len
,
89 size_t *retlen
, void **virt
, resource_size_t
*phys
)
91 struct mtd_part
*part
= mtd_to_part(mtd
);
93 return part
->parent
->_point(part
->parent
, from
+ part
->offset
, len
,
97 static int part_unpoint(struct mtd_info
*mtd
, loff_t from
, size_t len
)
99 struct mtd_part
*part
= mtd_to_part(mtd
);
101 return part
->parent
->_unpoint(part
->parent
, from
+ part
->offset
, len
);
104 static unsigned long part_get_unmapped_area(struct mtd_info
*mtd
,
106 unsigned long offset
,
109 struct mtd_part
*part
= mtd_to_part(mtd
);
111 offset
+= part
->offset
;
112 return part
->parent
->_get_unmapped_area(part
->parent
, len
, offset
,
116 static int part_read_oob(struct mtd_info
*mtd
, loff_t from
,
117 struct mtd_oob_ops
*ops
)
119 struct mtd_part
*part
= mtd_to_part(mtd
);
122 if (from
>= mtd
->size
)
124 if (ops
->datbuf
&& from
+ ops
->len
> mtd
->size
)
128 * If OOB is also requested, make sure that we do not read past the end
134 len
= mtd_oobavail(mtd
, ops
);
135 pages
= mtd_div_by_ws(mtd
->size
, mtd
);
136 pages
-= mtd_div_by_ws(from
, mtd
);
137 if (ops
->ooboffs
+ ops
->ooblen
> pages
* len
)
141 res
= part
->parent
->_read_oob(part
->parent
, from
+ part
->offset
, ops
);
143 if (mtd_is_bitflip(res
))
144 mtd
->ecc_stats
.corrected
++;
145 if (mtd_is_eccerr(res
))
146 mtd
->ecc_stats
.failed
++;
151 static int part_read_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
152 size_t len
, size_t *retlen
, u_char
*buf
)
154 struct mtd_part
*part
= mtd_to_part(mtd
);
155 return part
->parent
->_read_user_prot_reg(part
->parent
, from
, len
,
159 static int part_get_user_prot_info(struct mtd_info
*mtd
, size_t len
,
160 size_t *retlen
, struct otp_info
*buf
)
162 struct mtd_part
*part
= mtd_to_part(mtd
);
163 return part
->parent
->_get_user_prot_info(part
->parent
, len
, retlen
,
167 static int part_read_fact_prot_reg(struct mtd_info
*mtd
, loff_t from
,
168 size_t len
, size_t *retlen
, u_char
*buf
)
170 struct mtd_part
*part
= mtd_to_part(mtd
);
171 return part
->parent
->_read_fact_prot_reg(part
->parent
, from
, len
,
175 static int part_get_fact_prot_info(struct mtd_info
*mtd
, size_t len
,
176 size_t *retlen
, struct otp_info
*buf
)
178 struct mtd_part
*part
= mtd_to_part(mtd
);
179 return part
->parent
->_get_fact_prot_info(part
->parent
, len
, retlen
,
183 static int part_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
184 size_t *retlen
, const u_char
*buf
)
186 struct mtd_part
*part
= mtd_to_part(mtd
);
187 return part
->parent
->_write(part
->parent
, to
+ part
->offset
, len
,
191 static int part_panic_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
192 size_t *retlen
, const u_char
*buf
)
194 struct mtd_part
*part
= mtd_to_part(mtd
);
195 return part
->parent
->_panic_write(part
->parent
, to
+ part
->offset
, len
,
199 static int part_write_oob(struct mtd_info
*mtd
, loff_t to
,
200 struct mtd_oob_ops
*ops
)
202 struct mtd_part
*part
= mtd_to_part(mtd
);
206 if (ops
->datbuf
&& to
+ ops
->len
> mtd
->size
)
208 return part
->parent
->_write_oob(part
->parent
, to
+ part
->offset
, ops
);
211 static int part_write_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
212 size_t len
, size_t *retlen
, u_char
*buf
)
214 struct mtd_part
*part
= mtd_to_part(mtd
);
215 return part
->parent
->_write_user_prot_reg(part
->parent
, from
, len
,
219 static int part_lock_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
222 struct mtd_part
*part
= mtd_to_part(mtd
);
223 return part
->parent
->_lock_user_prot_reg(part
->parent
, from
, len
);
226 static int part_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
227 unsigned long count
, loff_t to
, size_t *retlen
)
229 struct mtd_part
*part
= mtd_to_part(mtd
);
230 return part
->parent
->_writev(part
->parent
, vecs
, count
,
231 to
+ part
->offset
, retlen
);
234 static int part_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
236 struct mtd_part
*part
= mtd_to_part(mtd
);
239 instr
->addr
+= part
->offset
;
240 ret
= part
->parent
->_erase(part
->parent
, instr
);
242 if (instr
->fail_addr
!= MTD_FAIL_ADDR_UNKNOWN
)
243 instr
->fail_addr
-= part
->offset
;
244 instr
->addr
-= part
->offset
;
249 void mtd_erase_callback(struct erase_info
*instr
)
251 if (instr
->mtd
->_erase
== part_erase
) {
252 struct mtd_part
*part
= mtd_to_part(instr
->mtd
);
254 if (instr
->fail_addr
!= MTD_FAIL_ADDR_UNKNOWN
)
255 instr
->fail_addr
-= part
->offset
;
256 instr
->addr
-= part
->offset
;
259 instr
->callback(instr
);
261 EXPORT_SYMBOL_GPL(mtd_erase_callback
);
263 static int part_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
265 struct mtd_part
*part
= mtd_to_part(mtd
);
266 return part
->parent
->_lock(part
->parent
, ofs
+ part
->offset
, len
);
269 static int part_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
271 struct mtd_part
*part
= mtd_to_part(mtd
);
272 return part
->parent
->_unlock(part
->parent
, ofs
+ part
->offset
, len
);
275 static int part_is_locked(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
277 struct mtd_part
*part
= mtd_to_part(mtd
);
278 return part
->parent
->_is_locked(part
->parent
, ofs
+ part
->offset
, len
);
281 static void part_sync(struct mtd_info
*mtd
)
283 struct mtd_part
*part
= mtd_to_part(mtd
);
284 part
->parent
->_sync(part
->parent
);
287 static int part_suspend(struct mtd_info
*mtd
)
289 struct mtd_part
*part
= mtd_to_part(mtd
);
290 return part
->parent
->_suspend(part
->parent
);
293 static void part_resume(struct mtd_info
*mtd
)
295 struct mtd_part
*part
= mtd_to_part(mtd
);
296 part
->parent
->_resume(part
->parent
);
299 static int part_block_isreserved(struct mtd_info
*mtd
, loff_t ofs
)
301 struct mtd_part
*part
= mtd_to_part(mtd
);
303 return part
->parent
->_block_isreserved(part
->parent
, ofs
);
306 static int part_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
308 struct mtd_part
*part
= mtd_to_part(mtd
);
310 return part
->parent
->_block_isbad(part
->parent
, ofs
);
313 static int part_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
315 struct mtd_part
*part
= mtd_to_part(mtd
);
319 res
= part
->parent
->_block_markbad(part
->parent
, ofs
);
321 mtd
->ecc_stats
.badblocks
++;
325 static int part_get_device(struct mtd_info
*mtd
)
327 struct mtd_part
*part
= mtd_to_part(mtd
);
328 return part
->parent
->_get_device(part
->parent
);
331 static void part_put_device(struct mtd_info
*mtd
)
333 struct mtd_part
*part
= mtd_to_part(mtd
);
334 part
->parent
->_put_device(part
->parent
);
337 static int part_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
338 struct mtd_oob_region
*oobregion
)
340 struct mtd_part
*part
= mtd_to_part(mtd
);
342 return mtd_ooblayout_ecc(part
->parent
, section
, oobregion
);
345 static int part_ooblayout_free(struct mtd_info
*mtd
, int section
,
346 struct mtd_oob_region
*oobregion
)
348 struct mtd_part
*part
= mtd_to_part(mtd
);
350 return mtd_ooblayout_free(part
->parent
, section
, oobregion
);
353 static const struct mtd_ooblayout_ops part_ooblayout_ops
= {
354 .ecc
= part_ooblayout_ecc
,
355 .free
= part_ooblayout_free
,
358 static int part_max_bad_blocks(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
360 struct mtd_part
*part
= mtd_to_part(mtd
);
362 return part
->parent
->_max_bad_blocks(part
->parent
,
363 ofs
+ part
->offset
, len
);
366 static inline void free_partition(struct mtd_part
*p
)
373 * mtd_parse_part - parse MTD partition looking for subpartitions
375 * @slave: part that is supposed to be a container and should be parsed
376 * @types: NULL-terminated array with names of partition parsers to try
378 * Some partitions are kind of containers with extra subpartitions (volumes).
379 * There can be various formats of such containers. This function tries to use
380 * specified parsers to analyze given partition and registers found
381 * subpartitions on success.
383 static int mtd_parse_part(struct mtd_part
*slave
, const char *const *types
)
385 struct mtd_partitions parsed
;
388 err
= parse_mtd_partitions(&slave
->mtd
, types
, &parsed
, NULL
);
391 else if (!parsed
.nr_parts
)
394 err
= add_mtd_partitions(&slave
->mtd
, parsed
.parts
, parsed
.nr_parts
);
396 mtd_part_parser_cleanup(&parsed
);
401 static struct mtd_part
*allocate_partition(struct mtd_info
*parent
,
402 const struct mtd_partition
*part
, int partno
,
405 int wr_alignment
= (parent
->flags
& MTD_NO_ERASE
) ? parent
->writesize
:
407 struct mtd_part
*slave
;
412 /* allocate the partition structure */
413 slave
= kzalloc(sizeof(*slave
), GFP_KERNEL
);
414 name
= kstrdup(part
->name
, GFP_KERNEL
);
415 if (!name
|| !slave
) {
416 printk(KERN_ERR
"memory allocation error while creating partitions for \"%s
\"\n",
420 return ERR_PTR(-ENOMEM);
423 /* set up the MTD object for this partition */
424 slave->mtd.type = parent->type;
425 slave->mtd.flags = parent->flags & ~part->mask_flags;
426 slave->mtd.size = part->size;
427 slave->mtd.writesize = parent->writesize;
428 slave->mtd.writebufsize = parent->writebufsize;
429 slave->mtd.oobsize = parent->oobsize;
430 slave->mtd.oobavail = parent->oobavail;
431 slave->mtd.subpage_sft = parent->subpage_sft;
432 slave->mtd.pairing = parent->pairing;
434 slave->mtd.name = name;
435 slave->mtd.owner = parent->owner;
437 /* NOTE: Historically, we didn't arrange MTDs as a tree out of
438 * concern for showing the same data in multiple partitions.
439 * However, it is very useful to have the master node present,
440 * so the MTD_PARTITIONED_MASTER option allows that. The master
441 * will have device nodes etc only if this is set, so make the
442 * parent conditional on that option. Note, this is a way to
443 * distinguish between the master and the partition in sysfs.
445 slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
448 slave->mtd.dev.of_node = part->of_node;
450 slave->mtd._read = part_read;
451 slave->mtd._write = part_write;
453 if (parent->_panic_write)
454 slave->mtd._panic_write = part_panic_write;
456 if (parent->_point && parent->_unpoint) {
457 slave->mtd._point = part_point;
458 slave->mtd._unpoint = part_unpoint;
461 if (parent->_get_unmapped_area)
462 slave->mtd._get_unmapped_area = part_get_unmapped_area;
463 if (parent->_read_oob)
464 slave->mtd._read_oob = part_read_oob;
465 if (parent->_write_oob)
466 slave->mtd._write_oob = part_write_oob;
467 if (parent->_read_user_prot_reg)
468 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
469 if (parent->_read_fact_prot_reg)
470 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
471 if (parent->_write_user_prot_reg)
472 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
473 if (parent->_lock_user_prot_reg)
474 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
475 if (parent->_get_user_prot_info)
476 slave->mtd._get_user_prot_info = part_get_user_prot_info;
477 if (parent->_get_fact_prot_info)
478 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
480 slave->mtd._sync = part_sync;
481 if (!partno && !parent->dev.class && parent->_suspend &&
483 slave->mtd._suspend = part_suspend;
484 slave->mtd._resume = part_resume;
487 slave->mtd._writev = part_writev;
489 slave->mtd._lock = part_lock;
491 slave->mtd._unlock = part_unlock;
492 if (parent->_is_locked)
493 slave->mtd._is_locked = part_is_locked;
494 if (parent->_block_isreserved)
495 slave->mtd._block_isreserved = part_block_isreserved;
496 if (parent->_block_isbad)
497 slave->mtd._block_isbad = part_block_isbad;
498 if (parent->_block_markbad)
499 slave->mtd._block_markbad = part_block_markbad;
500 if (parent->_max_bad_blocks)
501 slave->mtd._max_bad_blocks = part_max_bad_blocks;
503 if (parent->_get_device)
504 slave->mtd._get_device = part_get_device;
505 if (parent->_put_device)
506 slave->mtd._put_device = part_put_device;
508 slave->mtd._erase = part_erase;
509 slave->parent = parent;
510 slave->offset = part->offset;
512 if (slave->offset == MTDPART_OFS_APPEND)
513 slave->offset = cur_offset;
514 if (slave->offset == MTDPART_OFS_NXTBLK) {
516 slave->offset = cur_offset;
517 remainder = do_div(tmp, wr_alignment);
519 slave->offset += wr_alignment - remainder;
520 printk(KERN_NOTICE "Moving partition
%d
: "
521 "0x
%012llx
-> 0x
%012llx
\n", partno,
522 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
525 if (slave->offset == MTDPART_OFS_RETAIN) {
526 slave->offset = cur_offset;
527 if (parent->size - slave->offset >= slave->mtd.size) {
528 slave->mtd.size = parent->size - slave->offset
531 printk(KERN_ERR "mtd partition
\"%s
\" doesn
't have enough space: %#llx < %#llx, disabled\n",
532 part->name, parent->size - slave->offset,
534 /* register to preserve ordering */
538 if (slave->mtd.size == MTDPART_SIZ_FULL)
539 slave->mtd.size = parent->size - slave->offset;
541 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
542 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
544 /* let's
do some sanity checks */
545 if (slave
->offset
>= parent
->size
) {
546 /* let's register it anyway to preserve ordering */
549 printk(KERN_ERR
"mtd: partition \"%s
\" is out of reach
-- disabled
\n",
553 if (slave->offset + slave->mtd.size > parent->size) {
554 slave->mtd.size = parent->size - slave->offset;
555 printk(KERN_WARNING"mtd
: partition
\"%s
\" extends beyond the end of device
\"%s
\" -- size truncated to
%#llx\n",
556 part->name, parent->name, (unsigned long long)slave->mtd.size);
558 if (parent->numeraseregions > 1) {
559 /* Deal with variable erase size stuff */
560 int i, max = parent->numeraseregions;
561 u64 end = slave->offset + slave->mtd.size;
562 struct mtd_erase_region_info *regions = parent->eraseregions;
564 /* Find the first erase regions which is part of this
566 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
568 /* The loop searched for the region _behind_ the first one */
572 /* Pick biggest erasesize */
573 for (; i < max && regions[i].offset < end; i++) {
574 if (slave->mtd.erasesize < regions[i].erasesize) {
575 slave->mtd.erasesize = regions[i].erasesize;
578 BUG_ON(slave->mtd.erasesize == 0);
580 /* Single erase size */
581 slave->mtd.erasesize = parent->erasesize;
585 * Slave erasesize might differ from the master one if the master
586 * exposes several regions with different erasesize. Adjust
587 * wr_alignment accordingly.
589 if (!(slave->mtd.flags & MTD_NO_ERASE))
590 wr_alignment = slave->mtd.erasesize;
593 remainder = do_div(tmp, wr_alignment);
594 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
595 /* Doesn't start on a boundary of major erase size */
596 /* FIXME: Let it be writable if it is on a boundary of
597 * _minor_ erase size though */
598 slave->mtd.flags &= ~MTD_WRITEABLE;
599 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
603 tmp = slave->mtd.size;
604 remainder = do_div(tmp, wr_alignment);
605 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
606 slave->mtd.flags &= ~MTD_WRITEABLE;
607 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
611 mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
612 slave->mtd.ecc_step_size = parent->ecc_step_size;
613 slave->mtd.ecc_strength = parent->ecc_strength;
614 slave->mtd.bitflip_threshold = parent->bitflip_threshold;
616 if (parent->_block_isbad) {
619 while (offs < slave->mtd.size) {
620 if (mtd_block_isreserved(parent, offs + slave->offset))
621 slave->mtd.ecc_stats.bbtblocks++;
622 else if (mtd_block_isbad(parent, offs + slave->offset))
623 slave->mtd.ecc_stats.badblocks++;
624 offs += slave->mtd.erasesize;
632 static ssize_t mtd_partition_offset_show(struct device *dev,
633 struct device_attribute *attr, char *buf)
635 struct mtd_info *mtd = dev_get_drvdata(dev);
636 struct mtd_part *part = mtd_to_part(mtd);
637 return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
640 static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
642 static const struct attribute *mtd_partition_attrs[] = {
643 &dev_attr_offset.attr,
647 static int mtd_add_partition_attrs(struct mtd_part *new)
649 int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
652 "mtd: failed to create partition attrs, err=%d\n", ret);
656 int mtd_add_partition(struct mtd_info *parent, const char *name,
657 long long offset, long long length)
659 struct mtd_partition part;
660 struct mtd_part *new;
663 /* the direct offset is expected */
664 if (offset == MTDPART_OFS_APPEND ||
665 offset == MTDPART_OFS_NXTBLK)
668 if (length == MTDPART_SIZ_FULL)
669 length = parent->size - offset;
674 memset(&part, 0, sizeof(part));
677 part.offset = offset;
679 new = allocate_partition(parent, &part, -1, offset);
683 mutex_lock(&mtd_partitions_mutex);
684 list_add(&new->list, &mtd_partitions);
685 mutex_unlock(&mtd_partitions_mutex);
687 add_mtd_device(&new->mtd);
689 mtd_add_partition_attrs(new);
693 EXPORT_SYMBOL_GPL(mtd_add_partition);
696 * __mtd_del_partition - delete MTD partition
698 * @priv: internal MTD struct for partition to be deleted
700 * This function must be called with the partitions mutex locked.
702 static int __mtd_del_partition(struct mtd_part *priv)
704 struct mtd_part *child, *next;
707 list_for_each_entry_safe(child, next, &mtd_partitions, list) {
708 if (child->parent == &priv->mtd) {
709 err = __mtd_del_partition(child);
715 sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
717 err = del_mtd_device(&priv->mtd);
721 list_del(&priv->list);
722 free_partition(priv);
728 * This function unregisters and destroy all slave MTD objects which are
729 * attached to the given MTD object.
731 int del_mtd_partitions(struct mtd_info *mtd)
733 struct mtd_part *slave, *next;
736 mutex_lock(&mtd_partitions_mutex);
737 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
738 if (slave->parent == mtd) {
739 ret = __mtd_del_partition(slave);
743 mutex_unlock(&mtd_partitions_mutex);
748 int mtd_del_partition(struct mtd_info *mtd, int partno)
750 struct mtd_part *slave, *next;
753 mutex_lock(&mtd_partitions_mutex);
754 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
755 if ((slave->parent == mtd) &&
756 (slave->mtd.index == partno)) {
757 ret = __mtd_del_partition(slave);
760 mutex_unlock(&mtd_partitions_mutex);
764 EXPORT_SYMBOL_GPL(mtd_del_partition);
767 * This function, given a master MTD object and a partition table, creates
768 * and registers slave MTD objects which are bound to the master according to
769 * the partition definitions.
771 * For historical reasons, this function's caller only registers the master
772 * if the MTD_PARTITIONED_MASTER config option is set.
775 int add_mtd_partitions(struct mtd_info *master,
776 const struct mtd_partition *parts,
779 struct mtd_part *slave;
780 uint64_t cur_offset = 0;
783 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
785 for (i = 0; i < nbparts; i++) {
786 slave = allocate_partition(master, parts + i, i, cur_offset);
788 del_mtd_partitions(master);
789 return PTR_ERR(slave);
792 mutex_lock(&mtd_partitions_mutex);
793 list_add(&slave->list, &mtd_partitions);
794 mutex_unlock(&mtd_partitions_mutex);
796 add_mtd_device(&slave->mtd);
797 mtd_add_partition_attrs(slave);
799 mtd_parse_part(slave, parts[i].types);
801 cur_offset = slave->offset + slave->mtd.size;
807 static DEFINE_SPINLOCK(part_parser_lock);
808 static LIST_HEAD(part_parsers);
810 static struct mtd_part_parser *mtd_part_parser_get(const char *name)
812 struct mtd_part_parser *p, *ret = NULL;
814 spin_lock(&part_parser_lock);
816 list_for_each_entry(p, &part_parsers, list)
817 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
822 spin_unlock(&part_parser_lock);
827 static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
829 module_put(p->owner);
833 * Many partition parsers just expected the core to kfree() all their data in
834 * one chunk. Do that by default.
836 static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
842 int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
847 p->cleanup = &mtd_part_parser_cleanup_default;
849 spin_lock(&part_parser_lock);
850 list_add(&p->list, &part_parsers);
851 spin_unlock(&part_parser_lock);
855 EXPORT_SYMBOL_GPL(__register_mtd_parser);
857 void deregister_mtd_parser(struct mtd_part_parser *p)
859 spin_lock(&part_parser_lock);
861 spin_unlock(&part_parser_lock);
863 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
866 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
867 * are changing this array!
869 static const char * const default_mtd_part_types[] = {
875 static int mtd_part_do_parse(struct mtd_part_parser *parser,
876 struct mtd_info *master,
877 struct mtd_partitions *pparts,
878 struct mtd_part_parser_data *data)
882 ret = (*parser->parse_fn)(master, &pparts->parts, data);
883 pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
887 pr_notice("%d %s partitions found on MTD device %s\n", ret,
888 parser->name, master->name);
890 pparts->nr_parts = ret;
891 pparts->parser = parser;
897 * parse_mtd_partitions - parse MTD partitions
898 * @master: the master partition (describes whole MTD device)
899 * @types: names of partition parsers to try or %NULL
900 * @pparts: info about partitions found is returned here
901 * @data: MTD partition parser-specific data
903 * This function tries to find partition on MTD device @master. It uses MTD
904 * partition parsers, specified in @types. However, if @types is %NULL, then
905 * the default list of parsers is used. The default list contains only the
906 * "cmdlinepart" and "ofpart" parsers ATM.
907 * Note: If there are more then one parser in @types, the kernel only takes the
908 * partitions parsed out by the first parser.
910 * This function may return:
911 * o a negative error code in case of failure
912 * o zero otherwise, and @pparts will describe the partitions, number of
913 * partitions, and the parser which parsed them. Caller must release
914 * resources with mtd_part_parser_cleanup() when finished with the returned
917 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
918 struct mtd_partitions *pparts,
919 struct mtd_part_parser_data *data)
921 struct mtd_part_parser *parser;
925 types = default_mtd_part_types;
927 for ( ; *types; types++) {
928 pr_debug("%s: parsing partitions %s\n", master->name, *types);
929 parser = mtd_part_parser_get(*types);
930 if (!parser && !request_module("%s", *types))
931 parser = mtd_part_parser_get(*types);
932 pr_debug("%s: got parser %s\n", master->name,
933 parser ? parser->name : NULL);
936 ret = mtd_part_do_parse(parser, master, pparts, data);
937 /* Found partitions! */
940 mtd_part_parser_put(parser);
942 * Stash the first error we see; only report it if no parser
951 void mtd_part_parser_cleanup(struct mtd_partitions *parts)
953 const struct mtd_part_parser *parser;
958 parser = parts->parser;
961 parser->cleanup(parts->parts, parts->nr_parts);
963 mtd_part_parser_put(parser);
967 int mtd_is_partition(const struct mtd_info *mtd)
969 struct mtd_part *part;
972 mutex_lock(&mtd_partitions_mutex);
973 list_for_each_entry(part, &mtd_partitions, list)
974 if (&part->mtd == mtd) {
978 mutex_unlock(&mtd_partitions_mutex);
982 EXPORT_SYMBOL_GPL(mtd_is_partition);
984 /* Returns the size of the entire flash chip */
985 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
987 if (!mtd_is_partition(mtd))
990 return mtd_get_device_size(mtd_to_part(mtd)->parent);
992 EXPORT_SYMBOL_GPL(mtd_get_device_size);