2 * MTD device concatenation layer
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
6 * NAND support by Christian Gan <cgan@iders.ca>
11 #include <linux/mtd/mtd.h>
12 #include <linux/compat.h>
13 #include <linux/mtd/concat.h>
14 #include <ubi_uboot.h>
17 * Our storage structure:
18 * Subdev points to an array of pointers to struct mtd_info objects
19 * which is allocated along with this structure
25 struct mtd_info
**subdev
;
29 * how to calculate the size required for the above structure,
30 * including the pointer array subdev points to:
32 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
33 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
36 * Given a pointer to the MTD object in the mtd_concat structure,
37 * we can retrieve the pointer to that structure with this macro.
39 #define CONCAT(x) ((struct mtd_concat *)(x))
42 * MTD methods which look up the relevant subdevice, translate the
43 * effective address and pass through to the subdevice.
47 concat_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
48 size_t * retlen
, u_char
* buf
)
50 struct mtd_concat
*concat
= CONCAT(mtd
);
56 for (i
= 0; i
< concat
->num_subdev
; i
++) {
57 struct mtd_info
*subdev
= concat
->subdev
[i
];
60 if (from
>= subdev
->size
) {
61 /* Not destined for this subdev */
66 if (from
+ len
> subdev
->size
)
67 /* First part goes into this subdev */
68 size
= subdev
->size
- from
;
70 /* Entire transaction goes into this subdev */
73 err
= mtd_read(subdev
, from
, size
, &retsize
, buf
);
75 /* Save information about bitflips! */
77 if (mtd_is_eccerr(err
)) {
78 mtd
->ecc_stats
.failed
++;
80 } else if (mtd_is_bitflip(err
)) {
81 mtd
->ecc_stats
.corrected
++;
82 /* Do not overwrite -EBADMSG !! */
101 concat_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
102 size_t * retlen
, const u_char
* buf
)
104 struct mtd_concat
*concat
= CONCAT(mtd
);
110 for (i
= 0; i
< concat
->num_subdev
; i
++) {
111 struct mtd_info
*subdev
= concat
->subdev
[i
];
112 size_t size
, retsize
;
114 if (to
>= subdev
->size
) {
119 if (to
+ len
> subdev
->size
)
120 size
= subdev
->size
- to
;
124 err
= mtd_write(subdev
, to
, size
, &retsize
, buf
);
141 concat_read_oob(struct mtd_info
*mtd
, loff_t from
, struct mtd_oob_ops
*ops
)
143 struct mtd_concat
*concat
= CONCAT(mtd
);
144 struct mtd_oob_ops devops
= *ops
;
147 ops
->retlen
= ops
->oobretlen
= 0;
149 for (i
= 0; i
< concat
->num_subdev
; i
++) {
150 struct mtd_info
*subdev
= concat
->subdev
[i
];
152 if (from
>= subdev
->size
) {
153 from
-= subdev
->size
;
158 if (from
+ devops
.len
> subdev
->size
)
159 devops
.len
= subdev
->size
- from
;
161 err
= mtd_read_oob(subdev
, from
, &devops
);
162 ops
->retlen
+= devops
.retlen
;
163 ops
->oobretlen
+= devops
.oobretlen
;
165 /* Save information about bitflips! */
167 if (mtd_is_eccerr(err
)) {
168 mtd
->ecc_stats
.failed
++;
170 } else if (mtd_is_bitflip(err
)) {
171 mtd
->ecc_stats
.corrected
++;
172 /* Do not overwrite -EBADMSG !! */
180 devops
.len
= ops
->len
- ops
->retlen
;
183 devops
.datbuf
+= devops
.retlen
;
186 devops
.ooblen
= ops
->ooblen
- ops
->oobretlen
;
189 devops
.oobbuf
+= ops
->oobretlen
;
198 concat_write_oob(struct mtd_info
*mtd
, loff_t to
, struct mtd_oob_ops
*ops
)
200 struct mtd_concat
*concat
= CONCAT(mtd
);
201 struct mtd_oob_ops devops
= *ops
;
204 if (!(mtd
->flags
& MTD_WRITEABLE
))
209 for (i
= 0; i
< concat
->num_subdev
; i
++) {
210 struct mtd_info
*subdev
= concat
->subdev
[i
];
212 if (to
>= subdev
->size
) {
217 /* partial write ? */
218 if (to
+ devops
.len
> subdev
->size
)
219 devops
.len
= subdev
->size
- to
;
221 err
= mtd_write_oob(subdev
, to
, &devops
);
222 ops
->retlen
+= devops
.retlen
;
227 devops
.len
= ops
->len
- ops
->retlen
;
230 devops
.datbuf
+= devops
.retlen
;
233 devops
.ooblen
= ops
->ooblen
- ops
->oobretlen
;
236 devops
.oobbuf
+= devops
.oobretlen
;
243 static void concat_erase_callback(struct erase_info
*instr
)
245 /* Nothing to do here in U-Boot */
248 static int concat_dev_erase(struct mtd_info
*mtd
, struct erase_info
*erase
)
251 wait_queue_head_t waitq
;
252 DECLARE_WAITQUEUE(wait
, current
);
255 * This code was stol^H^H^H^Hinspired by mtdchar.c
257 init_waitqueue_head(&waitq
);
260 erase
->callback
= concat_erase_callback
;
261 erase
->priv
= (unsigned long) &waitq
;
264 * FIXME: Allow INTERRUPTIBLE. Which means
265 * not having the wait_queue head on the stack.
267 err
= mtd_erase(mtd
, erase
);
269 set_current_state(TASK_UNINTERRUPTIBLE
);
270 add_wait_queue(&waitq
, &wait
);
271 if (erase
->state
!= MTD_ERASE_DONE
272 && erase
->state
!= MTD_ERASE_FAILED
)
274 remove_wait_queue(&waitq
, &wait
);
275 set_current_state(TASK_RUNNING
);
277 err
= (erase
->state
== MTD_ERASE_FAILED
) ? -EIO
: 0;
282 static int concat_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
284 struct mtd_concat
*concat
= CONCAT(mtd
);
285 struct mtd_info
*subdev
;
287 uint64_t length
, offset
= 0;
288 struct erase_info
*erase
;
291 * Check for proper erase block alignment of the to-be-erased area.
292 * It is easier to do this based on the super device's erase
293 * region info rather than looking at each particular sub-device
296 if (!concat
->mtd
.numeraseregions
) {
297 /* the easy case: device has uniform erase block size */
298 if (instr
->addr
& (concat
->mtd
.erasesize
- 1))
300 if (instr
->len
& (concat
->mtd
.erasesize
- 1))
303 /* device has variable erase size */
304 struct mtd_erase_region_info
*erase_regions
=
305 concat
->mtd
.eraseregions
;
308 * Find the erase region where the to-be-erased area begins:
310 for (i
= 0; i
< concat
->mtd
.numeraseregions
&&
311 instr
->addr
>= erase_regions
[i
].offset
; i
++) ;
315 * Now erase_regions[i] is the region in which the
316 * to-be-erased area begins. Verify that the starting
317 * offset is aligned to this region's erase size:
319 if (instr
->addr
& (erase_regions
[i
].erasesize
- 1))
323 * now find the erase region where the to-be-erased area ends:
325 for (; i
< concat
->mtd
.numeraseregions
&&
326 (instr
->addr
+ instr
->len
) >= erase_regions
[i
].offset
;
330 * check if the ending offset is aligned to this region's erase size
332 if ((instr
->addr
+ instr
->len
) & (erase_regions
[i
].erasesize
-
337 /* make a local copy of instr to avoid modifying the caller's struct */
338 erase
= kmalloc(sizeof (struct erase_info
), GFP_KERNEL
);
347 * find the subdevice where the to-be-erased area begins, adjust
348 * starting offset to be relative to the subdevice start
350 for (i
= 0; i
< concat
->num_subdev
; i
++) {
351 subdev
= concat
->subdev
[i
];
352 if (subdev
->size
<= erase
->addr
) {
353 erase
->addr
-= subdev
->size
;
354 offset
+= subdev
->size
;
360 /* must never happen since size limit has been verified above */
361 BUG_ON(i
>= concat
->num_subdev
);
363 /* now do the erase: */
365 for (; length
> 0; i
++) {
366 /* loop for all subdevices affected by this request */
367 subdev
= concat
->subdev
[i
]; /* get current subdevice */
369 /* limit length to subdevice's size: */
370 if (erase
->addr
+ length
> subdev
->size
)
371 erase
->len
= subdev
->size
- erase
->addr
;
375 length
-= erase
->len
;
376 if ((err
= concat_dev_erase(subdev
, erase
))) {
377 /* sanity check: should never happen since
378 * block alignment has been checked above */
379 BUG_ON(err
== -EINVAL
);
380 if (erase
->fail_addr
!= MTD_FAIL_ADDR_UNKNOWN
)
381 instr
->fail_addr
= erase
->fail_addr
+ offset
;
385 * erase->addr specifies the offset of the area to be
386 * erased *within the current subdevice*. It can be
387 * non-zero only the first time through this loop, i.e.
388 * for the first subdevice where blocks need to be erased.
389 * All the following erases must begin at the start of the
390 * current subdevice, i.e. at offset zero.
393 offset
+= subdev
->size
;
395 instr
->state
= erase
->state
;
401 instr
->callback(instr
);
405 static int concat_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
407 struct mtd_concat
*concat
= CONCAT(mtd
);
408 int i
, err
= -EINVAL
;
410 for (i
= 0; i
< concat
->num_subdev
; i
++) {
411 struct mtd_info
*subdev
= concat
->subdev
[i
];
414 if (ofs
>= subdev
->size
) {
419 if (ofs
+ len
> subdev
->size
)
420 size
= subdev
->size
- ofs
;
424 err
= mtd_lock(subdev
, ofs
, size
);
440 static int concat_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
442 struct mtd_concat
*concat
= CONCAT(mtd
);
445 for (i
= 0; i
< concat
->num_subdev
; i
++) {
446 struct mtd_info
*subdev
= concat
->subdev
[i
];
449 if (ofs
>= subdev
->size
) {
454 if (ofs
+ len
> subdev
->size
)
455 size
= subdev
->size
- ofs
;
459 err
= mtd_unlock(subdev
, ofs
, size
);
475 static void concat_sync(struct mtd_info
*mtd
)
477 struct mtd_concat
*concat
= CONCAT(mtd
);
480 for (i
= 0; i
< concat
->num_subdev
; i
++) {
481 struct mtd_info
*subdev
= concat
->subdev
[i
];
486 static int concat_block_isbad(struct mtd_info
*mtd
, loff_t ofs
)
488 struct mtd_concat
*concat
= CONCAT(mtd
);
491 if (!mtd_can_have_bb(concat
->subdev
[0]))
494 for (i
= 0; i
< concat
->num_subdev
; i
++) {
495 struct mtd_info
*subdev
= concat
->subdev
[i
];
497 if (ofs
>= subdev
->size
) {
502 res
= mtd_block_isbad(subdev
, ofs
);
509 static int concat_block_markbad(struct mtd_info
*mtd
, loff_t ofs
)
511 struct mtd_concat
*concat
= CONCAT(mtd
);
512 int i
, err
= -EINVAL
;
514 if (!mtd_can_have_bb(concat
->subdev
[0]))
517 for (i
= 0; i
< concat
->num_subdev
; i
++) {
518 struct mtd_info
*subdev
= concat
->subdev
[i
];
520 if (ofs
>= subdev
->size
) {
525 err
= mtd_block_markbad(subdev
, ofs
);
527 mtd
->ecc_stats
.badblocks
++;
535 * This function constructs a virtual MTD device by concatenating
536 * num_devs MTD devices. A pointer to the new device object is
537 * stored to *new_dev upon success. This function does _not_
538 * register any devices: this is the caller's responsibility.
540 struct mtd_info
*mtd_concat_create(struct mtd_info
*subdev
[], /* subdevices to concatenate */
541 int num_devs
, /* number of subdevices */
543 { /* name for the new device */
546 struct mtd_concat
*concat
;
547 uint32_t max_erasesize
, curr_erasesize
;
548 int num_erase_region
;
550 debug("Concatenating MTD devices:\n");
551 for (i
= 0; i
< num_devs
; i
++)
552 debug("(%d): \"%s\"\n", i
, subdev
[i
]->name
);
553 debug("into device \"%s\"\n", name
);
555 /* allocate the device structure */
556 size
= SIZEOF_STRUCT_MTD_CONCAT(num_devs
);
557 concat
= kzalloc(size
, GFP_KERNEL
);
560 ("memory allocation error while creating concatenated device \"%s\"\n",
564 concat
->subdev
= (struct mtd_info
**) (concat
+ 1);
567 * Set up the new "super" device's MTD object structure, check for
568 * incompatibilites between the subdevices.
570 concat
->mtd
.type
= subdev
[0]->type
;
571 concat
->mtd
.flags
= subdev
[0]->flags
;
572 concat
->mtd
.size
= subdev
[0]->size
;
573 concat
->mtd
.erasesize
= subdev
[0]->erasesize
;
574 concat
->mtd
.writesize
= subdev
[0]->writesize
;
575 concat
->mtd
.subpage_sft
= subdev
[0]->subpage_sft
;
576 concat
->mtd
.oobsize
= subdev
[0]->oobsize
;
577 concat
->mtd
.oobavail
= subdev
[0]->oobavail
;
578 if (subdev
[0]->_read_oob
)
579 concat
->mtd
._read_oob
= concat_read_oob
;
580 if (subdev
[0]->_write_oob
)
581 concat
->mtd
._write_oob
= concat_write_oob
;
582 if (subdev
[0]->_block_isbad
)
583 concat
->mtd
._block_isbad
= concat_block_isbad
;
584 if (subdev
[0]->_block_markbad
)
585 concat
->mtd
._block_markbad
= concat_block_markbad
;
587 concat
->mtd
.ecc_stats
.badblocks
= subdev
[0]->ecc_stats
.badblocks
;
589 concat
->subdev
[0] = subdev
[0];
591 for (i
= 1; i
< num_devs
; i
++) {
592 if (concat
->mtd
.type
!= subdev
[i
]->type
) {
594 printk("Incompatible device type on \"%s\"\n",
598 if (concat
->mtd
.flags
!= subdev
[i
]->flags
) {
600 * Expect all flags except MTD_WRITEABLE to be
601 * equal on all subdevices.
603 if ((concat
->mtd
.flags
^ subdev
[i
]->
604 flags
) & ~MTD_WRITEABLE
) {
606 printk("Incompatible device flags on \"%s\"\n",
610 /* if writeable attribute differs,
611 make super device writeable */
613 subdev
[i
]->flags
& MTD_WRITEABLE
;
616 concat
->mtd
.size
+= subdev
[i
]->size
;
617 concat
->mtd
.ecc_stats
.badblocks
+=
618 subdev
[i
]->ecc_stats
.badblocks
;
619 if (concat
->mtd
.writesize
!= subdev
[i
]->writesize
||
620 concat
->mtd
.subpage_sft
!= subdev
[i
]->subpage_sft
||
621 concat
->mtd
.oobsize
!= subdev
[i
]->oobsize
||
622 !concat
->mtd
._read_oob
!= !subdev
[i
]->_read_oob
||
623 !concat
->mtd
._write_oob
!= !subdev
[i
]->_write_oob
) {
625 printk("Incompatible OOB or ECC data on \"%s\"\n",
629 concat
->subdev
[i
] = subdev
[i
];
633 concat
->mtd
.ecclayout
= subdev
[0]->ecclayout
;
635 concat
->num_subdev
= num_devs
;
636 concat
->mtd
.name
= name
;
638 concat
->mtd
._erase
= concat_erase
;
639 concat
->mtd
._read
= concat_read
;
640 concat
->mtd
._write
= concat_write
;
641 concat
->mtd
._sync
= concat_sync
;
642 concat
->mtd
._lock
= concat_lock
;
643 concat
->mtd
._unlock
= concat_unlock
;
646 * Combine the erase block size info of the subdevices:
648 * first, walk the map of the new device and see how
649 * many changes in erase size we have
651 max_erasesize
= curr_erasesize
= subdev
[0]->erasesize
;
652 num_erase_region
= 1;
653 for (i
= 0; i
< num_devs
; i
++) {
654 if (subdev
[i
]->numeraseregions
== 0) {
655 /* current subdevice has uniform erase size */
656 if (subdev
[i
]->erasesize
!= curr_erasesize
) {
657 /* if it differs from the last subdevice's erase size, count it */
659 curr_erasesize
= subdev
[i
]->erasesize
;
660 if (curr_erasesize
> max_erasesize
)
661 max_erasesize
= curr_erasesize
;
664 /* current subdevice has variable erase size */
666 for (j
= 0; j
< subdev
[i
]->numeraseregions
; j
++) {
668 /* walk the list of erase regions, count any changes */
669 if (subdev
[i
]->eraseregions
[j
].erasesize
!=
673 subdev
[i
]->eraseregions
[j
].
675 if (curr_erasesize
> max_erasesize
)
676 max_erasesize
= curr_erasesize
;
682 if (num_erase_region
== 1) {
684 * All subdevices have the same uniform erase size.
687 concat
->mtd
.erasesize
= curr_erasesize
;
688 concat
->mtd
.numeraseregions
= 0;
693 * erase block size varies across the subdevices: allocate
694 * space to store the data describing the variable erase regions
696 struct mtd_erase_region_info
*erase_region_p
;
697 uint64_t begin
, position
;
699 concat
->mtd
.erasesize
= max_erasesize
;
700 concat
->mtd
.numeraseregions
= num_erase_region
;
701 concat
->mtd
.eraseregions
= erase_region_p
=
702 kmalloc(num_erase_region
*
703 sizeof (struct mtd_erase_region_info
), GFP_KERNEL
);
704 if (!erase_region_p
) {
707 ("memory allocation error while creating erase region list"
708 " for device \"%s\"\n", name
);
713 * walk the map of the new device once more and fill in
714 * in erase region info:
716 curr_erasesize
= subdev
[0]->erasesize
;
717 begin
= position
= 0;
718 for (i
= 0; i
< num_devs
; i
++) {
719 if (subdev
[i
]->numeraseregions
== 0) {
720 /* current subdevice has uniform erase size */
721 if (subdev
[i
]->erasesize
!= curr_erasesize
) {
723 * fill in an mtd_erase_region_info structure for the area
724 * we have walked so far:
726 erase_region_p
->offset
= begin
;
727 erase_region_p
->erasesize
=
729 tmp64
= position
- begin
;
730 do_div(tmp64
, curr_erasesize
);
731 erase_region_p
->numblocks
= tmp64
;
734 curr_erasesize
= subdev
[i
]->erasesize
;
737 position
+= subdev
[i
]->size
;
739 /* current subdevice has variable erase size */
741 for (j
= 0; j
< subdev
[i
]->numeraseregions
; j
++) {
742 /* walk the list of erase regions, count any changes */
743 if (subdev
[i
]->eraseregions
[j
].
744 erasesize
!= curr_erasesize
) {
745 erase_region_p
->offset
= begin
;
746 erase_region_p
->erasesize
=
748 tmp64
= position
- begin
;
749 do_div(tmp64
, curr_erasesize
);
750 erase_region_p
->numblocks
= tmp64
;
754 subdev
[i
]->eraseregions
[j
].
759 subdev
[i
]->eraseregions
[j
].
760 numblocks
* (uint64_t)curr_erasesize
;
764 /* Now write the final entry */
765 erase_region_p
->offset
= begin
;
766 erase_region_p
->erasesize
= curr_erasesize
;
767 tmp64
= position
- begin
;
768 do_div(tmp64
, curr_erasesize
);
769 erase_region_p
->numblocks
= tmp64
;