2 * MTD device concatenation layer
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
6 * NAND support by Christian Gan <cgan@iders.ca>
10 * $Id: mtdconcat.c,v 1.11 2005/11/07 11:14:20 gleixner Exp $
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/concat.h>
22 #include <asm/div64.h>
25 * Our storage structure:
26 * Subdev points to an array of pointers to struct mtd_info objects
27 * which is allocated along with this structure
33 struct mtd_info
**subdev
;
37 * how to calculate the size required for the above structure,
38 * including the pointer array subdev points to:
40 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
41 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
44 * Given a pointer to the MTD object in the mtd_concat structure,
45 * we can retrieve the pointer to that structure with this macro.
47 #define CONCAT(x) ((struct mtd_concat *)(x))
50 * MTD methods which look up the relevant subdevice, translate the
51 * effective address and pass through to the subdevice.
55 concat_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
56 size_t * retlen
, u_char
* buf
)
58 struct mtd_concat
*concat
= CONCAT(mtd
);
59 int ret
= 0, err
= -EINVAL
;
64 for (i
= 0; i
< concat
->num_subdev
; i
++) {
65 struct mtd_info
*subdev
= concat
->subdev
[i
];
68 if (from
>= subdev
->size
) {
69 /* Not destined for this subdev */
74 if (from
+ len
> subdev
->size
)
75 /* First part goes into this subdev */
76 size
= subdev
->size
- from
;
78 /* Entire transaction goes into this subdev */
81 err
= subdev
->read(subdev
, from
, size
, &retsize
, buf
);
83 if (err
&& (err
!= -EBADMSG
) && (err
!= -EUCLEAN
))
86 /* Save information about bitflips! */
104 return err
? err
: ret
;
108 concat_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
109 size_t * retlen
, const u_char
* buf
)
111 struct mtd_concat
*concat
= CONCAT(mtd
);
115 if (!(mtd
->flags
& MTD_WRITEABLE
))
120 for (i
= 0; i
< concat
->num_subdev
; i
++) {
121 struct mtd_info
*subdev
= concat
->subdev
[i
];
122 size_t size
, retsize
;
124 if (to
>= subdev
->size
) {
129 if (to
+ len
> subdev
->size
)
130 size
= subdev
->size
- to
;
134 if (!(subdev
->flags
& MTD_WRITEABLE
))
137 err
= subdev
->write(subdev
, to
, size
, &retsize
, buf
);
155 concat_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
156 unsigned long count
, loff_t to
, size_t * retlen
)
158 struct mtd_concat
*concat
= CONCAT(mtd
);
159 struct kvec
*vecs_copy
;
160 unsigned long entry_low
, entry_high
;
161 size_t total_len
= 0;
165 if (!(mtd
->flags
& MTD_WRITEABLE
))
170 /* Calculate total length of data */
171 for (i
= 0; i
< count
; i
++)
172 total_len
+= vecs
[i
].iov_len
;
174 /* Do not allow write past end of device */
175 if ((to
+ total_len
) > mtd
->size
)
178 /* Check alignment */
179 if (mtd
->writesize
> 1) {
181 if (do_div(__to
, mtd
->writesize
) || (total_len
% mtd
->writesize
))
185 /* make a copy of vecs */
186 vecs_copy
= kmalloc(sizeof(struct kvec
) * count
, GFP_KERNEL
);
189 memcpy(vecs_copy
, vecs
, sizeof(struct kvec
) * count
);
192 for (i
= 0; i
< concat
->num_subdev
; i
++) {
193 struct mtd_info
*subdev
= concat
->subdev
[i
];
194 size_t size
, wsize
, retsize
, old_iov_len
;
196 if (to
>= subdev
->size
) {
201 size
= min(total_len
, (size_t)(subdev
->size
- to
));
202 wsize
= size
; /* store for future use */
204 entry_high
= entry_low
;
205 while (entry_high
< count
) {
206 if (size
<= vecs_copy
[entry_high
].iov_len
)
208 size
-= vecs_copy
[entry_high
++].iov_len
;
211 old_iov_len
= vecs_copy
[entry_high
].iov_len
;
212 vecs_copy
[entry_high
].iov_len
= size
;
214 if (!(subdev
->flags
& MTD_WRITEABLE
))
217 err
= subdev
->writev(subdev
, &vecs_copy
[entry_low
],
218 entry_high
- entry_low
+ 1, to
, &retsize
);
220 vecs_copy
[entry_high
].iov_len
= old_iov_len
- size
;
221 vecs_copy
[entry_high
].iov_base
+= size
;
223 entry_low
= entry_high
;
243 concat_read_oob(struct mtd_info
*mtd
, loff_t from
, struct mtd_oob_ops
*ops
)
245 struct mtd_concat
*concat
= CONCAT(mtd
);
246 struct mtd_oob_ops devops
= *ops
;
251 for (i
= 0; i
< concat
->num_subdev
; i
++) {
252 struct mtd_info
*subdev
= concat
->subdev
[i
];
254 if (from
>= subdev
->size
) {
255 from
-= subdev
->size
;
260 if (from
+ devops
.len
> subdev
->size
)
261 devops
.len
= subdev
->size
- from
;
263 err
= subdev
->read_oob(subdev
, from
, &devops
);
264 ops
->retlen
+= devops
.retlen
;
268 devops
.len
= ops
->len
- ops
->retlen
;
273 devops
.datbuf
+= devops
.retlen
;
275 devops
.oobbuf
+= devops
.ooblen
;
283 concat_write_oob(struct mtd_info
*mtd
, loff_t to
, struct mtd_oob_ops
*ops
)
285 struct mtd_concat
*concat
= CONCAT(mtd
);
286 struct mtd_oob_ops devops
= *ops
;
289 if (!(mtd
->flags
& MTD_WRITEABLE
))
294 for (i
= 0; i
< concat
->num_subdev
; i
++) {
295 struct mtd_info
*subdev
= concat
->subdev
[i
];
297 if (to
>= subdev
->size
) {
302 /* partial write ? */
303 if (to
+ devops
.len
> subdev
->size
)
304 devops
.len
= subdev
->size
- to
;
306 err
= subdev
->write_oob(subdev
, to
, &devops
);
307 ops
->retlen
+= devops
.retlen
;
311 devops
.len
= ops
->len
- ops
->retlen
;
316 devops
.datbuf
+= devops
.retlen
;
318 devops
.oobbuf
+= devops
.ooblen
;
324 static void concat_erase_callback(struct erase_info
*instr
)
326 wake_up((wait_queue_head_t
*) instr
->priv
);
329 static int concat_dev_erase(struct mtd_info
*mtd
, struct erase_info
*erase
)
332 wait_queue_head_t waitq
;
333 DECLARE_WAITQUEUE(wait
, current
);
336 * This code was stol^H^H^H^Hinspired by mtdchar.c
338 init_waitqueue_head(&waitq
);
341 erase
->callback
= concat_erase_callback
;
342 erase
->priv
= (unsigned long) &waitq
;
345 * FIXME: Allow INTERRUPTIBLE. Which means
346 * not having the wait_queue head on the stack.
348 err
= mtd
->erase(mtd
, erase
);
350 set_current_state(TASK_UNINTERRUPTIBLE
);
351 add_wait_queue(&waitq
, &wait
);
352 if (erase
->state
!= MTD_ERASE_DONE
353 && erase
->state
!= MTD_ERASE_FAILED
)
355 remove_wait_queue(&waitq
, &wait
);
356 set_current_state(TASK_RUNNING
);
358 err
= (erase
->state
== MTD_ERASE_FAILED
) ? -EIO
: 0;
363 static int concat_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
365 struct mtd_concat
*concat
= CONCAT(mtd
);
366 struct mtd_info
*subdev
;
368 u_int32_t length
, offset
= 0;
369 struct erase_info
*erase
;
371 if (!(mtd
->flags
& MTD_WRITEABLE
))
374 if (instr
->addr
> concat
->mtd
.size
)
377 if (instr
->len
+ instr
->addr
> concat
->mtd
.size
)
381 * Check for proper erase block alignment of the to-be-erased area.
382 * It is easier to do this based on the super device's erase
383 * region info rather than looking at each particular sub-device
386 if (!concat
->mtd
.numeraseregions
) {
387 /* the easy case: device has uniform erase block size */
388 if (instr
->addr
& (concat
->mtd
.erasesize
- 1))
390 if (instr
->len
& (concat
->mtd
.erasesize
- 1))
393 /* device has variable erase size */
394 struct mtd_erase_region_info
*erase_regions
=
395 concat
->mtd
.eraseregions
;
398 * Find the erase region where the to-be-erased area begins:
400 for (i
= 0; i
< concat
->mtd
.numeraseregions
&&
401 instr
->addr
>= erase_regions
[i
].offset
; i
++) ;
405 * Now erase_regions[i] is the region in which the
406 * to-be-erased area begins. Verify that the starting
407 * offset is aligned to this region's erase size:
409 if (instr
->addr
& (erase_regions
[i
].erasesize
- 1))
413 * now find the erase region where the to-be-erased area ends:
415 for (; i
< concat
->mtd
.numeraseregions
&&
416 (instr
->addr
+ instr
->len
) >= erase_regions
[i
].offset
;
420 * check if the ending offset is aligned to this region's erase size
422 if ((instr
->addr
+ instr
->len
) & (erase_regions
[i
].erasesize
-
427 instr
->fail_addr
= 0xffffffff;
429 /* make a local copy of instr to avoid modifying the caller's struct */
430 erase
= kmalloc(sizeof (struct erase_info
), GFP_KERNEL
);
439 * find the subdevice where the to-be-erased area begins, adjust
440 * starting offset to be relative to the subdevice start
442 for (i
= 0; i
< concat
->num_subdev
; i
++) {
443 subdev
= concat
->subdev
[i
];
444 if (subdev
->size
<= erase
->addr
) {
445 erase
->addr
-= subdev
->size
;
446 offset
+= subdev
->size
;
452 /* must never happen since size limit has been verified above */
453 BUG_ON(i
>= concat
->num_subdev
);
455 /* now do the erase: */
457 for (; length
> 0; i
++) {
458 /* loop for all subdevices affected by this request */
459 subdev
= concat
->subdev
[i
]; /* get current subdevice */
461 /* limit length to subdevice's size: */
462 if (erase
->addr
+ length
> subdev
->size
)
463 erase
->len
= subdev
->size
- erase
->addr
;
467 if (!(subdev
->flags
& MTD_WRITEABLE
)) {
471 length
-= erase
->len
;
472 if ((err
= concat_dev_erase(subdev
, erase
))) {
473 /* sanity check: should never happen since
474 * block alignment has been checked above */
475 BUG_ON(err
== -EINVAL
);
476 if (erase
->fail_addr
!= 0xffffffff)
477 instr
->fail_addr
= erase
->fail_addr
+ offset
;
481 * erase->addr specifies the offset of the area to be
482 * erased *within the current subdevice*. It can be
483 * non-zero only the first time through this loop, i.e.
484 * for the first subdevice where blocks need to be erased.
485 * All the following erases must begin at the start of the
486 * current subdevice, i.e. at offset zero.
489 offset
+= subdev
->size
;
491 instr
->state
= erase
->state
;
497 instr
->callback(instr
);
501 static int concat_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
503 struct mtd_concat
*concat
= CONCAT(mtd
);
504 int i
, err
= -EINVAL
;
506 if ((len
+ ofs
) > mtd
->size
)
509 for (i
= 0; i
< concat
->num_subdev
; i
++) {
510 struct mtd_info
*subdev
= concat
->subdev
[i
];
513 if (ofs
>= subdev
->size
) {
518 if (ofs
+ len
> subdev
->size
)
519 size
= subdev
->size
- ofs
;
523 err
= subdev
->lock(subdev
, ofs
, size
);
539 static int concat_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
541 struct mtd_concat
*concat
= CONCAT(mtd
);
544 if ((len
+ ofs
) > mtd
->size
)
547 for (i
= 0; i
< concat
->num_subdev
; i
++) {
548 struct mtd_info
*subdev
= concat
->subdev
[i
];
551 if (ofs
>= subdev
->size
) {
556 if (ofs
+ len
> subdev
->size
)
557 size
= subdev
->size
- ofs
;
561 err
= subdev
->unlock(subdev
, ofs
, size
);
577 static void concat_sync(struct mtd_info
*mtd
)
579 struct mtd_concat
*concat
= CONCAT(mtd
);
582 for (i
= 0; i
< concat
->num_subdev
; i
++) {
583 struct mtd_info
*subdev
= concat
->subdev
[i
];
584 subdev
->sync(subdev
);
588 static int concat_suspend(struct mtd_info
*mtd
)
590 struct mtd_concat
*concat
= CONCAT(mtd
);
593 for (i
= 0; i
< concat
->num_subdev
; i
++) {
594 struct mtd_info
*subdev
= concat
->subdev
[i
];
595 if ((rc
= subdev
->suspend(subdev
)) < 0)
601 static void concat_resume(struct mtd_info
*mtd
)
603 struct mtd_concat
*concat
= CONCAT(mtd
);
606 for (i
= 0; i
< concat
->num_subdev
; i
++) {
607 struct mtd_info
*subdev
= concat
->subdev
[i
];
608 subdev
->resume(subdev
);
612 static int concat_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
614 struct mtd_concat
*concat
= CONCAT(mtd
);
617 if (!concat
->subdev
[0]->block_isbad
)
623 for (i
= 0; i
< concat
->num_subdev
; i
++) {
624 struct mtd_info
*subdev
= concat
->subdev
[i
];
626 if (ofs
>= subdev
->size
) {
631 res
= subdev
->block_isbad(subdev
, ofs
);
638 static int concat_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
640 struct mtd_concat
*concat
= CONCAT(mtd
);
641 int i
, err
= -EINVAL
;
643 if (!concat
->subdev
[0]->block_markbad
)
649 for (i
= 0; i
< concat
->num_subdev
; i
++) {
650 struct mtd_info
*subdev
= concat
->subdev
[i
];
652 if (ofs
>= subdev
->size
) {
657 err
= subdev
->block_markbad(subdev
, ofs
);
665 * This function constructs a virtual MTD device by concatenating
666 * num_devs MTD devices. A pointer to the new device object is
667 * stored to *new_dev upon success. This function does _not_
668 * register any devices: this is the caller's responsibility.
670 struct mtd_info
*mtd_concat_create(struct mtd_info
*subdev
[], /* subdevices to concatenate */
671 int num_devs
, /* number of subdevices */
673 { /* name for the new device */
676 struct mtd_concat
*concat
;
677 u_int32_t max_erasesize
, curr_erasesize
;
678 int num_erase_region
;
680 printk(KERN_NOTICE
"Concatenating MTD devices:\n");
681 for (i
= 0; i
< num_devs
; i
++)
682 printk(KERN_NOTICE
"(%d): \"%s\"\n", i
, subdev
[i
]->name
);
683 printk(KERN_NOTICE
"into device \"%s\"\n", name
);
685 /* allocate the device structure */
686 size
= SIZEOF_STRUCT_MTD_CONCAT(num_devs
);
687 concat
= kmalloc(size
, GFP_KERNEL
);
690 ("memory allocation error while creating concatenated device \"%s\"\n",
694 memset(concat
, 0, size
);
695 concat
->subdev
= (struct mtd_info
**) (concat
+ 1);
698 * Set up the new "super" device's MTD object structure, check for
699 * incompatibilites between the subdevices.
701 concat
->mtd
.type
= subdev
[0]->type
;
702 concat
->mtd
.flags
= subdev
[0]->flags
;
703 concat
->mtd
.size
= subdev
[0]->size
;
704 concat
->mtd
.erasesize
= subdev
[0]->erasesize
;
705 concat
->mtd
.writesize
= subdev
[0]->writesize
;
706 concat
->mtd
.oobsize
= subdev
[0]->oobsize
;
707 concat
->mtd
.ecctype
= subdev
[0]->ecctype
;
708 concat
->mtd
.eccsize
= subdev
[0]->eccsize
;
709 if (subdev
[0]->writev
)
710 concat
->mtd
.writev
= concat_writev
;
711 if (subdev
[0]->read_oob
)
712 concat
->mtd
.read_oob
= concat_read_oob
;
713 if (subdev
[0]->write_oob
)
714 concat
->mtd
.write_oob
= concat_write_oob
;
715 if (subdev
[0]->block_isbad
)
716 concat
->mtd
.block_isbad
= concat_block_isbad
;
717 if (subdev
[0]->block_markbad
)
718 concat
->mtd
.block_markbad
= concat_block_markbad
;
720 concat
->subdev
[0] = subdev
[0];
722 for (i
= 1; i
< num_devs
; i
++) {
723 if (concat
->mtd
.type
!= subdev
[i
]->type
) {
725 printk("Incompatible device type on \"%s\"\n",
729 if (concat
->mtd
.flags
!= subdev
[i
]->flags
) {
731 * Expect all flags except MTD_WRITEABLE to be
732 * equal on all subdevices.
734 if ((concat
->mtd
.flags
^ subdev
[i
]->
735 flags
) & ~MTD_WRITEABLE
) {
737 printk("Incompatible device flags on \"%s\"\n",
741 /* if writeable attribute differs,
742 make super device writeable */
744 subdev
[i
]->flags
& MTD_WRITEABLE
;
746 concat
->mtd
.size
+= subdev
[i
]->size
;
747 if (concat
->mtd
.writesize
!= subdev
[i
]->writesize
||
748 concat
->mtd
.oobsize
!= subdev
[i
]->oobsize
||
749 concat
->mtd
.ecctype
!= subdev
[i
]->ecctype
||
750 concat
->mtd
.eccsize
!= subdev
[i
]->eccsize
||
751 !concat
->mtd
.read_oob
!= !subdev
[i
]->read_oob
||
752 !concat
->mtd
.write_oob
!= !subdev
[i
]->write_oob
) {
754 printk("Incompatible OOB or ECC data on \"%s\"\n",
758 concat
->subdev
[i
] = subdev
[i
];
762 concat
->mtd
.ecclayout
= subdev
[0]->ecclayout
;
764 concat
->num_subdev
= num_devs
;
765 concat
->mtd
.name
= name
;
767 concat
->mtd
.erase
= concat_erase
;
768 concat
->mtd
.read
= concat_read
;
769 concat
->mtd
.write
= concat_write
;
770 concat
->mtd
.sync
= concat_sync
;
771 concat
->mtd
.lock
= concat_lock
;
772 concat
->mtd
.unlock
= concat_unlock
;
773 concat
->mtd
.suspend
= concat_suspend
;
774 concat
->mtd
.resume
= concat_resume
;
777 * Combine the erase block size info of the subdevices:
779 * first, walk the map of the new device and see how
780 * many changes in erase size we have
782 max_erasesize
= curr_erasesize
= subdev
[0]->erasesize
;
783 num_erase_region
= 1;
784 for (i
= 0; i
< num_devs
; i
++) {
785 if (subdev
[i
]->numeraseregions
== 0) {
786 /* current subdevice has uniform erase size */
787 if (subdev
[i
]->erasesize
!= curr_erasesize
) {
788 /* if it differs from the last subdevice's erase size, count it */
790 curr_erasesize
= subdev
[i
]->erasesize
;
791 if (curr_erasesize
> max_erasesize
)
792 max_erasesize
= curr_erasesize
;
795 /* current subdevice has variable erase size */
797 for (j
= 0; j
< subdev
[i
]->numeraseregions
; j
++) {
799 /* walk the list of erase regions, count any changes */
800 if (subdev
[i
]->eraseregions
[j
].erasesize
!=
804 subdev
[i
]->eraseregions
[j
].
806 if (curr_erasesize
> max_erasesize
)
807 max_erasesize
= curr_erasesize
;
813 if (num_erase_region
== 1) {
815 * All subdevices have the same uniform erase size.
818 concat
->mtd
.erasesize
= curr_erasesize
;
819 concat
->mtd
.numeraseregions
= 0;
822 * erase block size varies across the subdevices: allocate
823 * space to store the data describing the variable erase regions
825 struct mtd_erase_region_info
*erase_region_p
;
826 u_int32_t begin
, position
;
828 concat
->mtd
.erasesize
= max_erasesize
;
829 concat
->mtd
.numeraseregions
= num_erase_region
;
830 concat
->mtd
.eraseregions
= erase_region_p
=
831 kmalloc(num_erase_region
*
832 sizeof (struct mtd_erase_region_info
), GFP_KERNEL
);
833 if (!erase_region_p
) {
836 ("memory allocation error while creating erase region list"
837 " for device \"%s\"\n", name
);
842 * walk the map of the new device once more and fill in
843 * in erase region info:
845 curr_erasesize
= subdev
[0]->erasesize
;
846 begin
= position
= 0;
847 for (i
= 0; i
< num_devs
; i
++) {
848 if (subdev
[i
]->numeraseregions
== 0) {
849 /* current subdevice has uniform erase size */
850 if (subdev
[i
]->erasesize
!= curr_erasesize
) {
852 * fill in an mtd_erase_region_info structure for the area
853 * we have walked so far:
855 erase_region_p
->offset
= begin
;
856 erase_region_p
->erasesize
=
858 erase_region_p
->numblocks
=
859 (position
- begin
) / curr_erasesize
;
862 curr_erasesize
= subdev
[i
]->erasesize
;
865 position
+= subdev
[i
]->size
;
867 /* current subdevice has variable erase size */
869 for (j
= 0; j
< subdev
[i
]->numeraseregions
; j
++) {
870 /* walk the list of erase regions, count any changes */
871 if (subdev
[i
]->eraseregions
[j
].
872 erasesize
!= curr_erasesize
) {
873 erase_region_p
->offset
= begin
;
874 erase_region_p
->erasesize
=
876 erase_region_p
->numblocks
=
878 begin
) / curr_erasesize
;
882 subdev
[i
]->eraseregions
[j
].
887 subdev
[i
]->eraseregions
[j
].
888 numblocks
* curr_erasesize
;
892 /* Now write the final entry */
893 erase_region_p
->offset
= begin
;
894 erase_region_p
->erasesize
= curr_erasesize
;
895 erase_region_p
->numblocks
= (position
- begin
) / curr_erasesize
;
902 * This function destroys an MTD object obtained from concat_mtd_devs()
905 void mtd_concat_destroy(struct mtd_info
*mtd
)
907 struct mtd_concat
*concat
= CONCAT(mtd
);
908 if (concat
->mtd
.numeraseregions
)
909 kfree(concat
->mtd
.eraseregions
);
913 EXPORT_SYMBOL(mtd_concat_create
);
914 EXPORT_SYMBOL(mtd_concat_destroy
);
916 MODULE_LICENSE("GPL");
917 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
918 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");