KVM: nVMX: Fix returned value of MSR_IA32_VMX_VMCS_ENUM
[linux/fpc-iii.git] / drivers / mtd / mtdpart.c
blob1ca9aec141ff01fcd90dada11986f25d2fbaae4b
1 /*
2 * Simple MTD partitioning layer
4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/kmod.h>
30 #include <linux/mtd/mtd.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/err.h>
34 #include "mtdcore.h"
36 /* Our partition linked list */
37 static LIST_HEAD(mtd_partitions);
38 static DEFINE_MUTEX(mtd_partitions_mutex);
40 /* Our partition node structure */
41 struct mtd_part {
42 struct mtd_info mtd;
43 struct mtd_info *master;
44 uint64_t offset;
45 struct list_head list;
49 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
50 * the pointer to that structure with this macro.
52 #define PART(x) ((struct mtd_part *)(x))
56 * MTD methods which simply translate the effective address and pass through
57 * to the _real_ device.
60 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
61 size_t *retlen, u_char *buf)
63 struct mtd_part *part = PART(mtd);
64 struct mtd_ecc_stats stats;
65 int res;
67 stats = part->master->ecc_stats;
68 res = part->master->_read(part->master, from + part->offset, len,
69 retlen, buf);
70 if (unlikely(mtd_is_eccerr(res)))
71 mtd->ecc_stats.failed +=
72 part->master->ecc_stats.failed - stats.failed;
73 else
74 mtd->ecc_stats.corrected +=
75 part->master->ecc_stats.corrected - stats.corrected;
76 return res;
79 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
80 size_t *retlen, void **virt, resource_size_t *phys)
82 struct mtd_part *part = PART(mtd);
84 return part->master->_point(part->master, from + part->offset, len,
85 retlen, virt, phys);
88 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
90 struct mtd_part *part = PART(mtd);
92 return part->master->_unpoint(part->master, from + part->offset, len);
95 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
96 unsigned long len,
97 unsigned long offset,
98 unsigned long flags)
100 struct mtd_part *part = PART(mtd);
102 offset += part->offset;
103 return part->master->_get_unmapped_area(part->master, len, offset,
104 flags);
107 static int part_read_oob(struct mtd_info *mtd, loff_t from,
108 struct mtd_oob_ops *ops)
110 struct mtd_part *part = PART(mtd);
111 int res;
113 if (from >= mtd->size)
114 return -EINVAL;
115 if (ops->datbuf && from + ops->len > mtd->size)
116 return -EINVAL;
119 * If OOB is also requested, make sure that we do not read past the end
120 * of this partition.
122 if (ops->oobbuf) {
123 size_t len, pages;
125 if (ops->mode == MTD_OPS_AUTO_OOB)
126 len = mtd->oobavail;
127 else
128 len = mtd->oobsize;
129 pages = mtd_div_by_ws(mtd->size, mtd);
130 pages -= mtd_div_by_ws(from, mtd);
131 if (ops->ooboffs + ops->ooblen > pages * len)
132 return -EINVAL;
135 res = part->master->_read_oob(part->master, from + part->offset, ops);
136 if (unlikely(res)) {
137 if (mtd_is_bitflip(res))
138 mtd->ecc_stats.corrected++;
139 if (mtd_is_eccerr(res))
140 mtd->ecc_stats.failed++;
142 return res;
145 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
146 size_t len, size_t *retlen, u_char *buf)
148 struct mtd_part *part = PART(mtd);
149 return part->master->_read_user_prot_reg(part->master, from, len,
150 retlen, buf);
153 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
154 size_t *retlen, struct otp_info *buf)
156 struct mtd_part *part = PART(mtd);
157 return part->master->_get_user_prot_info(part->master, len, retlen,
158 buf);
161 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
162 size_t len, size_t *retlen, u_char *buf)
164 struct mtd_part *part = PART(mtd);
165 return part->master->_read_fact_prot_reg(part->master, from, len,
166 retlen, buf);
169 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
170 size_t *retlen, struct otp_info *buf)
172 struct mtd_part *part = PART(mtd);
173 return part->master->_get_fact_prot_info(part->master, len, retlen,
174 buf);
177 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
178 size_t *retlen, const u_char *buf)
180 struct mtd_part *part = PART(mtd);
181 return part->master->_write(part->master, to + part->offset, len,
182 retlen, buf);
185 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
186 size_t *retlen, const u_char *buf)
188 struct mtd_part *part = PART(mtd);
189 return part->master->_panic_write(part->master, to + part->offset, len,
190 retlen, buf);
193 static int part_write_oob(struct mtd_info *mtd, loff_t to,
194 struct mtd_oob_ops *ops)
196 struct mtd_part *part = PART(mtd);
198 if (to >= mtd->size)
199 return -EINVAL;
200 if (ops->datbuf && to + ops->len > mtd->size)
201 return -EINVAL;
202 return part->master->_write_oob(part->master, to + part->offset, ops);
205 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
206 size_t len, size_t *retlen, u_char *buf)
208 struct mtd_part *part = PART(mtd);
209 return part->master->_write_user_prot_reg(part->master, from, len,
210 retlen, buf);
213 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
214 size_t len)
216 struct mtd_part *part = PART(mtd);
217 return part->master->_lock_user_prot_reg(part->master, from, len);
220 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
221 unsigned long count, loff_t to, size_t *retlen)
223 struct mtd_part *part = PART(mtd);
224 return part->master->_writev(part->master, vecs, count,
225 to + part->offset, retlen);
228 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
230 struct mtd_part *part = PART(mtd);
231 int ret;
233 instr->addr += part->offset;
234 ret = part->master->_erase(part->master, instr);
235 if (ret) {
236 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
237 instr->fail_addr -= part->offset;
238 instr->addr -= part->offset;
240 return ret;
243 void mtd_erase_callback(struct erase_info *instr)
245 if (instr->mtd->_erase == part_erase) {
246 struct mtd_part *part = PART(instr->mtd);
248 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
249 instr->fail_addr -= part->offset;
250 instr->addr -= part->offset;
252 if (instr->callback)
253 instr->callback(instr);
255 EXPORT_SYMBOL_GPL(mtd_erase_callback);
257 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
259 struct mtd_part *part = PART(mtd);
260 return part->master->_lock(part->master, ofs + part->offset, len);
263 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
265 struct mtd_part *part = PART(mtd);
266 return part->master->_unlock(part->master, ofs + part->offset, len);
269 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
271 struct mtd_part *part = PART(mtd);
272 return part->master->_is_locked(part->master, ofs + part->offset, len);
275 static void part_sync(struct mtd_info *mtd)
277 struct mtd_part *part = PART(mtd);
278 part->master->_sync(part->master);
281 static int part_suspend(struct mtd_info *mtd)
283 struct mtd_part *part = PART(mtd);
284 return part->master->_suspend(part->master);
287 static void part_resume(struct mtd_info *mtd)
289 struct mtd_part *part = PART(mtd);
290 part->master->_resume(part->master);
293 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
295 struct mtd_part *part = PART(mtd);
296 ofs += part->offset;
297 return part->master->_block_isbad(part->master, ofs);
300 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
302 struct mtd_part *part = PART(mtd);
303 int res;
305 ofs += part->offset;
306 res = part->master->_block_markbad(part->master, ofs);
307 if (!res)
308 mtd->ecc_stats.badblocks++;
309 return res;
312 static inline void free_partition(struct mtd_part *p)
314 kfree(p->mtd.name);
315 kfree(p);
319 * This function unregisters and destroy all slave MTD objects which are
320 * attached to the given master MTD object.
323 int del_mtd_partitions(struct mtd_info *master)
325 struct mtd_part *slave, *next;
326 int ret, err = 0;
328 mutex_lock(&mtd_partitions_mutex);
329 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
330 if (slave->master == master) {
331 ret = del_mtd_device(&slave->mtd);
332 if (ret < 0) {
333 err = ret;
334 continue;
336 list_del(&slave->list);
337 free_partition(slave);
339 mutex_unlock(&mtd_partitions_mutex);
341 return err;
344 static struct mtd_part *allocate_partition(struct mtd_info *master,
345 const struct mtd_partition *part, int partno,
346 uint64_t cur_offset)
348 struct mtd_part *slave;
349 char *name;
351 /* allocate the partition structure */
352 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
353 name = kstrdup(part->name, GFP_KERNEL);
354 if (!name || !slave) {
355 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
356 master->name);
357 kfree(name);
358 kfree(slave);
359 return ERR_PTR(-ENOMEM);
362 /* set up the MTD object for this partition */
363 slave->mtd.type = master->type;
364 slave->mtd.flags = master->flags & ~part->mask_flags;
365 slave->mtd.size = part->size;
366 slave->mtd.writesize = master->writesize;
367 slave->mtd.writebufsize = master->writebufsize;
368 slave->mtd.oobsize = master->oobsize;
369 slave->mtd.oobavail = master->oobavail;
370 slave->mtd.subpage_sft = master->subpage_sft;
372 slave->mtd.name = name;
373 slave->mtd.owner = master->owner;
374 slave->mtd.backing_dev_info = master->backing_dev_info;
376 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
377 * to have the same data be in two different partitions.
379 slave->mtd.dev.parent = master->dev.parent;
381 slave->mtd._read = part_read;
382 slave->mtd._write = part_write;
384 if (master->_panic_write)
385 slave->mtd._panic_write = part_panic_write;
387 if (master->_point && master->_unpoint) {
388 slave->mtd._point = part_point;
389 slave->mtd._unpoint = part_unpoint;
392 if (master->_get_unmapped_area)
393 slave->mtd._get_unmapped_area = part_get_unmapped_area;
394 if (master->_read_oob)
395 slave->mtd._read_oob = part_read_oob;
396 if (master->_write_oob)
397 slave->mtd._write_oob = part_write_oob;
398 if (master->_read_user_prot_reg)
399 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
400 if (master->_read_fact_prot_reg)
401 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
402 if (master->_write_user_prot_reg)
403 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
404 if (master->_lock_user_prot_reg)
405 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
406 if (master->_get_user_prot_info)
407 slave->mtd._get_user_prot_info = part_get_user_prot_info;
408 if (master->_get_fact_prot_info)
409 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
410 if (master->_sync)
411 slave->mtd._sync = part_sync;
412 if (!partno && !master->dev.class && master->_suspend &&
413 master->_resume) {
414 slave->mtd._suspend = part_suspend;
415 slave->mtd._resume = part_resume;
417 if (master->_writev)
418 slave->mtd._writev = part_writev;
419 if (master->_lock)
420 slave->mtd._lock = part_lock;
421 if (master->_unlock)
422 slave->mtd._unlock = part_unlock;
423 if (master->_is_locked)
424 slave->mtd._is_locked = part_is_locked;
425 if (master->_block_isbad)
426 slave->mtd._block_isbad = part_block_isbad;
427 if (master->_block_markbad)
428 slave->mtd._block_markbad = part_block_markbad;
429 slave->mtd._erase = part_erase;
430 slave->master = master;
431 slave->offset = part->offset;
433 if (slave->offset == MTDPART_OFS_APPEND)
434 slave->offset = cur_offset;
435 if (slave->offset == MTDPART_OFS_NXTBLK) {
436 slave->offset = cur_offset;
437 if (mtd_mod_by_eb(cur_offset, master) != 0) {
438 /* Round up to next erasesize */
439 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
440 printk(KERN_NOTICE "Moving partition %d: "
441 "0x%012llx -> 0x%012llx\n", partno,
442 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
445 if (slave->offset == MTDPART_OFS_RETAIN) {
446 slave->offset = cur_offset;
447 if (master->size - slave->offset >= slave->mtd.size) {
448 slave->mtd.size = master->size - slave->offset
449 - slave->mtd.size;
450 } else {
451 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
452 part->name, master->size - slave->offset,
453 slave->mtd.size);
454 /* register to preserve ordering */
455 goto out_register;
458 if (slave->mtd.size == MTDPART_SIZ_FULL)
459 slave->mtd.size = master->size - slave->offset;
461 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
462 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
464 /* let's do some sanity checks */
465 if (slave->offset >= master->size) {
466 /* let's register it anyway to preserve ordering */
467 slave->offset = 0;
468 slave->mtd.size = 0;
469 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
470 part->name);
471 goto out_register;
473 if (slave->offset + slave->mtd.size > master->size) {
474 slave->mtd.size = master->size - slave->offset;
475 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
476 part->name, master->name, (unsigned long long)slave->mtd.size);
478 if (master->numeraseregions > 1) {
479 /* Deal with variable erase size stuff */
480 int i, max = master->numeraseregions;
481 u64 end = slave->offset + slave->mtd.size;
482 struct mtd_erase_region_info *regions = master->eraseregions;
484 /* Find the first erase regions which is part of this
485 * partition. */
486 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
488 /* The loop searched for the region _behind_ the first one */
489 if (i > 0)
490 i--;
492 /* Pick biggest erasesize */
493 for (; i < max && regions[i].offset < end; i++) {
494 if (slave->mtd.erasesize < regions[i].erasesize) {
495 slave->mtd.erasesize = regions[i].erasesize;
498 BUG_ON(slave->mtd.erasesize == 0);
499 } else {
500 /* Single erase size */
501 slave->mtd.erasesize = master->erasesize;
504 if ((slave->mtd.flags & MTD_WRITEABLE) &&
505 mtd_mod_by_eb(slave->offset, &slave->mtd)) {
506 /* Doesn't start on a boundary of major erase size */
507 /* FIXME: Let it be writable if it is on a boundary of
508 * _minor_ erase size though */
509 slave->mtd.flags &= ~MTD_WRITEABLE;
510 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
511 part->name);
513 if ((slave->mtd.flags & MTD_WRITEABLE) &&
514 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
515 slave->mtd.flags &= ~MTD_WRITEABLE;
516 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
517 part->name);
520 slave->mtd.ecclayout = master->ecclayout;
521 slave->mtd.ecc_step_size = master->ecc_step_size;
522 slave->mtd.ecc_strength = master->ecc_strength;
523 slave->mtd.bitflip_threshold = master->bitflip_threshold;
525 if (master->_block_isbad) {
526 uint64_t offs = 0;
528 while (offs < slave->mtd.size) {
529 if (mtd_block_isbad(master, offs + slave->offset))
530 slave->mtd.ecc_stats.badblocks++;
531 offs += slave->mtd.erasesize;
535 out_register:
536 return slave;
539 int mtd_add_partition(struct mtd_info *master, const char *name,
540 long long offset, long long length)
542 struct mtd_partition part;
543 struct mtd_part *p, *new;
544 uint64_t start, end;
545 int ret = 0;
547 /* the direct offset is expected */
548 if (offset == MTDPART_OFS_APPEND ||
549 offset == MTDPART_OFS_NXTBLK)
550 return -EINVAL;
552 if (length == MTDPART_SIZ_FULL)
553 length = master->size - offset;
555 if (length <= 0)
556 return -EINVAL;
558 part.name = name;
559 part.size = length;
560 part.offset = offset;
561 part.mask_flags = 0;
562 part.ecclayout = NULL;
564 new = allocate_partition(master, &part, -1, offset);
565 if (IS_ERR(new))
566 return PTR_ERR(new);
568 start = offset;
569 end = offset + length;
571 mutex_lock(&mtd_partitions_mutex);
572 list_for_each_entry(p, &mtd_partitions, list)
573 if (p->master == master) {
574 if ((start >= p->offset) &&
575 (start < (p->offset + p->mtd.size)))
576 goto err_inv;
578 if ((end >= p->offset) &&
579 (end < (p->offset + p->mtd.size)))
580 goto err_inv;
583 list_add(&new->list, &mtd_partitions);
584 mutex_unlock(&mtd_partitions_mutex);
586 add_mtd_device(&new->mtd);
588 return ret;
589 err_inv:
590 mutex_unlock(&mtd_partitions_mutex);
591 free_partition(new);
592 return -EINVAL;
594 EXPORT_SYMBOL_GPL(mtd_add_partition);
596 int mtd_del_partition(struct mtd_info *master, int partno)
598 struct mtd_part *slave, *next;
599 int ret = -EINVAL;
601 mutex_lock(&mtd_partitions_mutex);
602 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
603 if ((slave->master == master) &&
604 (slave->mtd.index == partno)) {
605 ret = del_mtd_device(&slave->mtd);
606 if (ret < 0)
607 break;
609 list_del(&slave->list);
610 free_partition(slave);
611 break;
613 mutex_unlock(&mtd_partitions_mutex);
615 return ret;
617 EXPORT_SYMBOL_GPL(mtd_del_partition);
620 * This function, given a master MTD object and a partition table, creates
621 * and registers slave MTD objects which are bound to the master according to
622 * the partition definitions.
624 * We don't register the master, or expect the caller to have done so,
625 * for reasons of data integrity.
628 int add_mtd_partitions(struct mtd_info *master,
629 const struct mtd_partition *parts,
630 int nbparts)
632 struct mtd_part *slave;
633 uint64_t cur_offset = 0;
634 int i;
636 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
638 for (i = 0; i < nbparts; i++) {
639 slave = allocate_partition(master, parts + i, i, cur_offset);
640 if (IS_ERR(slave))
641 return PTR_ERR(slave);
643 mutex_lock(&mtd_partitions_mutex);
644 list_add(&slave->list, &mtd_partitions);
645 mutex_unlock(&mtd_partitions_mutex);
647 add_mtd_device(&slave->mtd);
649 cur_offset = slave->offset + slave->mtd.size;
652 return 0;
655 static DEFINE_SPINLOCK(part_parser_lock);
656 static LIST_HEAD(part_parsers);
658 static struct mtd_part_parser *get_partition_parser(const char *name)
660 struct mtd_part_parser *p, *ret = NULL;
662 spin_lock(&part_parser_lock);
664 list_for_each_entry(p, &part_parsers, list)
665 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
666 ret = p;
667 break;
670 spin_unlock(&part_parser_lock);
672 return ret;
675 #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
677 void register_mtd_parser(struct mtd_part_parser *p)
679 spin_lock(&part_parser_lock);
680 list_add(&p->list, &part_parsers);
681 spin_unlock(&part_parser_lock);
683 EXPORT_SYMBOL_GPL(register_mtd_parser);
685 void deregister_mtd_parser(struct mtd_part_parser *p)
687 spin_lock(&part_parser_lock);
688 list_del(&p->list);
689 spin_unlock(&part_parser_lock);
691 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
694 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
695 * are changing this array!
697 static const char * const default_mtd_part_types[] = {
698 "cmdlinepart",
699 "ofpart",
700 NULL
704 * parse_mtd_partitions - parse MTD partitions
705 * @master: the master partition (describes whole MTD device)
706 * @types: names of partition parsers to try or %NULL
707 * @pparts: array of partitions found is returned here
708 * @data: MTD partition parser-specific data
710 * This function tries to find partition on MTD device @master. It uses MTD
711 * partition parsers, specified in @types. However, if @types is %NULL, then
712 * the default list of parsers is used. The default list contains only the
713 * "cmdlinepart" and "ofpart" parsers ATM.
714 * Note: If there are more then one parser in @types, the kernel only takes the
715 * partitions parsed out by the first parser.
717 * This function may return:
718 * o a negative error code in case of failure
719 * o zero if no partitions were found
720 * o a positive number of found partitions, in which case on exit @pparts will
721 * point to an array containing this number of &struct mtd_info objects.
723 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
724 struct mtd_partition **pparts,
725 struct mtd_part_parser_data *data)
727 struct mtd_part_parser *parser;
728 int ret = 0;
730 if (!types)
731 types = default_mtd_part_types;
733 for ( ; ret <= 0 && *types; types++) {
734 parser = get_partition_parser(*types);
735 if (!parser && !request_module("%s", *types))
736 parser = get_partition_parser(*types);
737 if (!parser)
738 continue;
739 ret = (*parser->parse_fn)(master, pparts, data);
740 put_partition_parser(parser);
741 if (ret > 0) {
742 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
743 ret, parser->name, master->name);
744 break;
747 return ret;
750 int mtd_is_partition(const struct mtd_info *mtd)
752 struct mtd_part *part;
753 int ispart = 0;
755 mutex_lock(&mtd_partitions_mutex);
756 list_for_each_entry(part, &mtd_partitions, list)
757 if (&part->mtd == mtd) {
758 ispart = 1;
759 break;
761 mutex_unlock(&mtd_partitions_mutex);
763 return ispart;
765 EXPORT_SYMBOL_GPL(mtd_is_partition);
767 /* Returns the size of the entire flash chip */
768 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
770 if (!mtd_is_partition(mtd))
771 return mtd->size;
773 return PART(mtd)->master->size;
775 EXPORT_SYMBOL_GPL(mtd_get_device_size);