2 * Copyright (c) 2012 Linutronix GmbH
3 * Author: Richard Weinberger <richard@nod.at>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
16 #include <linux/crc32.h>
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object
23 size_t ubi_calc_fm_size(struct ubi_device
*ubi
)
27 size
= sizeof(struct ubi_fm_hdr
) + \
28 sizeof(struct ubi_fm_scan_pool
) + \
29 sizeof(struct ubi_fm_scan_pool
) + \
30 (ubi
->peb_count
* sizeof(struct ubi_fm_ec
)) + \
31 (sizeof(struct ubi_fm_eba
) + \
32 (ubi
->peb_count
* sizeof(__be32
))) + \
33 sizeof(struct ubi_fm_volhdr
) * UBI_MAX_VOLUMES
;
34 return roundup(size
, ubi
->leb_size
);
39 * new_fm_vhdr - allocate a new volume header for fastmap usage.
40 * @ubi: UBI device description object
41 * @vol_id: the VID of the new header
43 * Returns a new struct ubi_vid_hdr on success.
44 * NULL indicates out of memory.
46 static struct ubi_vid_hdr
*new_fm_vhdr(struct ubi_device
*ubi
, int vol_id
)
48 struct ubi_vid_hdr
*new;
50 new = ubi_zalloc_vid_hdr(ubi
, GFP_KERNEL
);
54 new->vol_type
= UBI_VID_DYNAMIC
;
55 new->vol_id
= cpu_to_be32(vol_id
);
57 /* UBI implementations without fastmap support have to delete the
60 new->compat
= UBI_COMPAT_DELETE
;
67 * add_aeb - create and add a attach erase block to a given list.
68 * @ai: UBI attach info object
69 * @list: the target list
70 * @pnum: PEB number of the new attach erase block
71 * @ec: erease counter of the new LEB
72 * @scrub: scrub this PEB after attaching
74 * Returns 0 on success, < 0 indicates an internal error.
76 static int add_aeb(struct ubi_attach_info
*ai
, struct list_head
*list
,
77 int pnum
, int ec
, int scrub
)
79 struct ubi_ainf_peb
*aeb
;
81 aeb
= kmem_cache_alloc(ai
->aeb_slab_cache
, GFP_KERNEL
);
89 aeb
->copy_flag
= aeb
->sqnum
= 0;
91 ai
->ec_sum
+= aeb
->ec
;
94 if (ai
->max_ec
< aeb
->ec
)
97 if (ai
->min_ec
> aeb
->ec
)
100 list_add_tail(&aeb
->u
.list
, list
);
106 * add_vol - create and add a new volume to ubi_attach_info.
107 * @ai: ubi_attach_info object
108 * @vol_id: VID of the new volume
109 * @used_ebs: number of used EBS
110 * @data_pad: data padding value of the new volume
111 * @vol_type: volume type
112 * @last_eb_bytes: number of bytes in the last LEB
114 * Returns the new struct ubi_ainf_volume on success.
115 * NULL indicates an error.
117 static struct ubi_ainf_volume
*add_vol(struct ubi_attach_info
*ai
, int vol_id
,
118 int used_ebs
, int data_pad
, u8 vol_type
,
121 struct ubi_ainf_volume
*av
;
122 struct rb_node
**p
= &ai
->volumes
.rb_node
, *parent
= NULL
;
126 av
= rb_entry(parent
, struct ubi_ainf_volume
, rb
);
128 if (vol_id
> av
->vol_id
)
130 else if (vol_id
> av
->vol_id
)
134 av
= kmalloc(sizeof(struct ubi_ainf_volume
), GFP_KERNEL
);
138 av
->highest_lnum
= av
->leb_count
= 0;
140 av
->used_ebs
= used_ebs
;
141 av
->data_pad
= data_pad
;
142 av
->last_data_size
= last_eb_bytes
;
144 av
->vol_type
= vol_type
;
147 dbg_bld("found volume (ID %i)", vol_id
);
149 rb_link_node(&av
->rb
, parent
, p
);
150 rb_insert_color(&av
->rb
, &ai
->volumes
);
157 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
158 * from it's original list.
159 * @ai: ubi_attach_info object
160 * @aeb: the to be assigned SEB
161 * @av: target scan volume
163 static void assign_aeb_to_av(struct ubi_attach_info
*ai
,
164 struct ubi_ainf_peb
*aeb
,
165 struct ubi_ainf_volume
*av
)
167 struct ubi_ainf_peb
*tmp_aeb
;
168 struct rb_node
**p
= &ai
->volumes
.rb_node
, *parent
= NULL
;
170 p
= &av
->root
.rb_node
;
174 tmp_aeb
= rb_entry(parent
, struct ubi_ainf_peb
, u
.rb
);
175 if (aeb
->lnum
!= tmp_aeb
->lnum
) {
176 if (aeb
->lnum
< tmp_aeb
->lnum
)
186 list_del(&aeb
->u
.list
);
189 rb_link_node(&aeb
->u
.rb
, parent
, p
);
190 rb_insert_color(&aeb
->u
.rb
, &av
->root
);
194 * update_vol - inserts or updates a LEB which was found a pool.
195 * @ubi: the UBI device object
196 * @ai: attach info object
197 * @av: the volume this LEB belongs to
198 * @new_vh: the volume header derived from new_aeb
199 * @new_aeb: the AEB to be examined
201 * Returns 0 on success, < 0 indicates an internal error.
203 static int update_vol(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
204 struct ubi_ainf_volume
*av
, struct ubi_vid_hdr
*new_vh
,
205 struct ubi_ainf_peb
*new_aeb
)
207 struct rb_node
**p
= &av
->root
.rb_node
, *parent
= NULL
;
208 struct ubi_ainf_peb
*aeb
, *victim
;
213 aeb
= rb_entry(parent
, struct ubi_ainf_peb
, u
.rb
);
215 if (be32_to_cpu(new_vh
->lnum
) != aeb
->lnum
) {
216 if (be32_to_cpu(new_vh
->lnum
) < aeb
->lnum
)
224 /* This case can happen if the fastmap gets written
225 * because of a volume change (creation, deletion, ..).
226 * Then a PEB can be within the persistent EBA and the pool.
228 if (aeb
->pnum
== new_aeb
->pnum
) {
229 ubi_assert(aeb
->lnum
== new_aeb
->lnum
);
230 kmem_cache_free(ai
->aeb_slab_cache
, new_aeb
);
235 cmp_res
= ubi_compare_lebs(ubi
, aeb
, new_aeb
->pnum
, new_vh
);
239 /* new_aeb is newer */
241 victim
= kmem_cache_alloc(ai
->aeb_slab_cache
,
246 victim
->ec
= aeb
->ec
;
247 victim
->pnum
= aeb
->pnum
;
248 list_add_tail(&victim
->u
.list
, &ai
->erase
);
250 if (av
->highest_lnum
== be32_to_cpu(new_vh
->lnum
))
251 av
->last_data_size
= \
252 be32_to_cpu(new_vh
->data_size
);
254 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
255 av
->vol_id
, aeb
->lnum
, new_aeb
->pnum
);
257 aeb
->ec
= new_aeb
->ec
;
258 aeb
->pnum
= new_aeb
->pnum
;
259 aeb
->copy_flag
= new_vh
->copy_flag
;
260 aeb
->scrub
= new_aeb
->scrub
;
261 kmem_cache_free(ai
->aeb_slab_cache
, new_aeb
);
263 /* new_aeb is older */
265 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
266 av
->vol_id
, aeb
->lnum
, new_aeb
->pnum
);
267 list_add_tail(&new_aeb
->u
.list
, &ai
->erase
);
272 /* This LEB is new, let's add it to the volume */
274 if (av
->highest_lnum
<= be32_to_cpu(new_vh
->lnum
)) {
275 av
->highest_lnum
= be32_to_cpu(new_vh
->lnum
);
276 av
->last_data_size
= be32_to_cpu(new_vh
->data_size
);
279 if (av
->vol_type
== UBI_STATIC_VOLUME
)
280 av
->used_ebs
= be32_to_cpu(new_vh
->used_ebs
);
284 rb_link_node(&new_aeb
->u
.rb
, parent
, p
);
285 rb_insert_color(&new_aeb
->u
.rb
, &av
->root
);
291 * process_pool_aeb - we found a non-empty PEB in a pool.
292 * @ubi: UBI device object
293 * @ai: attach info object
294 * @new_vh: the volume header derived from new_aeb
295 * @new_aeb: the AEB to be examined
297 * Returns 0 on success, < 0 indicates an internal error.
299 static int process_pool_aeb(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
300 struct ubi_vid_hdr
*new_vh
,
301 struct ubi_ainf_peb
*new_aeb
)
303 struct ubi_ainf_volume
*av
, *tmp_av
= NULL
;
304 struct rb_node
**p
= &ai
->volumes
.rb_node
, *parent
= NULL
;
307 if (be32_to_cpu(new_vh
->vol_id
) == UBI_FM_SB_VOLUME_ID
||
308 be32_to_cpu(new_vh
->vol_id
) == UBI_FM_DATA_VOLUME_ID
) {
309 kmem_cache_free(ai
->aeb_slab_cache
, new_aeb
);
314 /* Find the volume this SEB belongs to */
317 tmp_av
= rb_entry(parent
, struct ubi_ainf_volume
, rb
);
319 if (be32_to_cpu(new_vh
->vol_id
) > tmp_av
->vol_id
)
321 else if (be32_to_cpu(new_vh
->vol_id
) < tmp_av
->vol_id
)
332 ubi_err("orphaned volume in fastmap pool!");
333 return UBI_BAD_FASTMAP
;
336 ubi_assert(be32_to_cpu(new_vh
->vol_id
) == av
->vol_id
);
338 return update_vol(ubi
, ai
, av
, new_vh
, new_aeb
);
342 * unmap_peb - unmap a PEB.
343 * If fastmap detects a free PEB in the pool it has to check whether
344 * this PEB has been unmapped after writing the fastmap.
346 * @ai: UBI attach info object
347 * @pnum: The PEB to be unmapped
349 static void unmap_peb(struct ubi_attach_info
*ai
, int pnum
)
351 struct ubi_ainf_volume
*av
;
352 struct rb_node
*node
, *node2
;
353 struct ubi_ainf_peb
*aeb
;
355 for (node
= rb_first(&ai
->volumes
); node
; node
= rb_next(node
)) {
356 av
= rb_entry(node
, struct ubi_ainf_volume
, rb
);
358 for (node2
= rb_first(&av
->root
); node2
;
359 node2
= rb_next(node2
)) {
360 aeb
= rb_entry(node2
, struct ubi_ainf_peb
, u
.rb
);
361 if (aeb
->pnum
== pnum
) {
362 rb_erase(&aeb
->u
.rb
, &av
->root
);
363 kmem_cache_free(ai
->aeb_slab_cache
, aeb
);
371 * scan_pool - scans a pool for changed (no longer empty PEBs).
372 * @ubi: UBI device object
373 * @ai: attach info object
374 * @pebs: an array of all PEB numbers in the to be scanned pool
375 * @pool_size: size of the pool (number of entries in @pebs)
376 * @max_sqnum: pointer to the maximal sequence number
377 * @eba_orphans: list of PEBs which need to be scanned
378 * @free: list of PEBs which are most likely free (and go into @ai->free)
380 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
381 * < 0 indicates an internal error.
383 static int scan_pool(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
384 int *pebs
, int pool_size
, unsigned long long *max_sqnum
,
385 struct list_head
*eba_orphans
, struct list_head
*free
)
387 struct ubi_vid_hdr
*vh
;
388 struct ubi_ec_hdr
*ech
;
389 struct ubi_ainf_peb
*new_aeb
, *tmp_aeb
;
390 int i
, pnum
, err
, found_orphan
, ret
= 0;
392 ech
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
396 vh
= ubi_zalloc_vid_hdr(ubi
, GFP_KERNEL
);
402 dbg_bld("scanning fastmap pool: size = %i", pool_size
);
405 * Now scan all PEBs in the pool to find changes which have been made
406 * after the creation of the fastmap
408 for (i
= 0; i
< pool_size
; i
++) {
412 pnum
= be32_to_cpu(pebs
[i
]);
414 if (ubi_io_is_bad(ubi
, pnum
)) {
415 ubi_err("bad PEB in fastmap pool!");
416 ret
= UBI_BAD_FASTMAP
;
420 err
= ubi_io_read_ec_hdr(ubi
, pnum
, ech
, 0);
421 if (err
&& err
!= UBI_IO_BITFLIPS
) {
422 ubi_err("unable to read EC header! PEB:%i err:%i",
424 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
426 } else if (ret
== UBI_IO_BITFLIPS
)
430 * Older UBI implementations have image_seq set to zero, so
431 * we shouldn't fail if image_seq == 0.
433 image_seq
= be32_to_cpu(ech
->image_seq
);
435 if (image_seq
&& (image_seq
!= ubi
->image_seq
)) {
436 ubi_err("bad image seq: 0x%x, expected: 0x%x",
437 be32_to_cpu(ech
->image_seq
), ubi
->image_seq
);
438 ret
= UBI_BAD_FASTMAP
;
442 err
= ubi_io_read_vid_hdr(ubi
, pnum
, vh
, 0);
443 if (err
== UBI_IO_FF
|| err
== UBI_IO_FF_BITFLIPS
) {
444 unsigned long long ec
= be64_to_cpu(ech
->ec
);
446 dbg_bld("Adding PEB to free: %i", pnum
);
447 if (err
== UBI_IO_FF_BITFLIPS
)
448 add_aeb(ai
, free
, pnum
, ec
, 1);
450 add_aeb(ai
, free
, pnum
, ec
, 0);
452 } else if (err
== 0 || err
== UBI_IO_BITFLIPS
) {
453 dbg_bld("Found non empty PEB:%i in pool", pnum
);
455 if (err
== UBI_IO_BITFLIPS
)
459 list_for_each_entry(tmp_aeb
, eba_orphans
, u
.list
) {
460 if (tmp_aeb
->pnum
== pnum
) {
466 kmem_cache_free(ai
->aeb_slab_cache
, tmp_aeb
);
467 list_del(&tmp_aeb
->u
.list
);
470 new_aeb
= kmem_cache_alloc(ai
->aeb_slab_cache
,
477 new_aeb
->ec
= be64_to_cpu(ech
->ec
);
478 new_aeb
->pnum
= pnum
;
479 new_aeb
->lnum
= be32_to_cpu(vh
->lnum
);
480 new_aeb
->sqnum
= be64_to_cpu(vh
->sqnum
);
481 new_aeb
->copy_flag
= vh
->copy_flag
;
482 new_aeb
->scrub
= scrub
;
484 if (*max_sqnum
< new_aeb
->sqnum
)
485 *max_sqnum
= new_aeb
->sqnum
;
487 err
= process_pool_aeb(ubi
, ai
, vh
, new_aeb
);
489 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
493 /* We are paranoid and fall back to scanning mode */
494 ubi_err("fastmap pool PEBs contains damaged PEBs!");
495 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
502 ubi_free_vid_hdr(ubi
, vh
);
508 * count_fastmap_pebs - Counts the PEBs found by fastmap.
509 * @ai: The UBI attach info object
511 static int count_fastmap_pebs(struct ubi_attach_info
*ai
)
513 struct ubi_ainf_peb
*aeb
;
514 struct ubi_ainf_volume
*av
;
515 struct rb_node
*rb1
, *rb2
;
518 list_for_each_entry(aeb
, &ai
->erase
, u
.list
)
521 list_for_each_entry(aeb
, &ai
->free
, u
.list
)
524 ubi_rb_for_each_entry(rb1
, av
, &ai
->volumes
, rb
)
525 ubi_rb_for_each_entry(rb2
, aeb
, &av
->root
, u
.rb
)
532 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
533 * @ubi: UBI device object
534 * @ai: UBI attach info object
535 * @fm: the fastmap to be attached
537 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
538 * < 0 indicates an internal error.
540 static int ubi_attach_fastmap(struct ubi_device
*ubi
,
541 struct ubi_attach_info
*ai
,
542 struct ubi_fastmap_layout
*fm
)
544 struct list_head used
, eba_orphans
, free
;
545 struct ubi_ainf_volume
*av
;
546 struct ubi_ainf_peb
*aeb
, *tmp_aeb
, *_tmp_aeb
;
547 struct ubi_ec_hdr
*ech
;
548 struct ubi_fm_sb
*fmsb
;
549 struct ubi_fm_hdr
*fmhdr
;
550 struct ubi_fm_scan_pool
*fmpl1
, *fmpl2
;
551 struct ubi_fm_ec
*fmec
;
552 struct ubi_fm_volhdr
*fmvhdr
;
553 struct ubi_fm_eba
*fm_eba
;
554 int ret
, i
, j
, pool_size
, wl_pool_size
;
555 size_t fm_pos
= 0, fm_size
= ubi
->fm_size
;
556 unsigned long long max_sqnum
= 0;
557 void *fm_raw
= ubi
->fm_buf
;
559 INIT_LIST_HEAD(&used
);
560 INIT_LIST_HEAD(&free
);
561 INIT_LIST_HEAD(&eba_orphans
);
562 INIT_LIST_HEAD(&ai
->corr
);
563 INIT_LIST_HEAD(&ai
->free
);
564 INIT_LIST_HEAD(&ai
->erase
);
565 INIT_LIST_HEAD(&ai
->alien
);
566 ai
->volumes
= RB_ROOT
;
567 ai
->min_ec
= UBI_MAX_ERASECOUNTER
;
569 ai
->aeb_slab_cache
= kmem_cache_create("ubi_ainf_peb_slab",
570 sizeof(struct ubi_ainf_peb
),
572 if (!ai
->aeb_slab_cache
) {
577 fmsb
= (struct ubi_fm_sb
*)(fm_raw
);
578 ai
->max_sqnum
= fmsb
->sqnum
;
579 fm_pos
+= sizeof(struct ubi_fm_sb
);
580 if (fm_pos
>= fm_size
)
583 fmhdr
= (struct ubi_fm_hdr
*)(fm_raw
+ fm_pos
);
584 fm_pos
+= sizeof(*fmhdr
);
585 if (fm_pos
>= fm_size
)
588 if (be32_to_cpu(fmhdr
->magic
) != UBI_FM_HDR_MAGIC
) {
589 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
590 be32_to_cpu(fmhdr
->magic
), UBI_FM_HDR_MAGIC
);
594 fmpl1
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
595 fm_pos
+= sizeof(*fmpl1
);
596 if (fm_pos
>= fm_size
)
598 if (be32_to_cpu(fmpl1
->magic
) != UBI_FM_POOL_MAGIC
) {
599 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
600 be32_to_cpu(fmpl1
->magic
), UBI_FM_POOL_MAGIC
);
604 fmpl2
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
605 fm_pos
+= sizeof(*fmpl2
);
606 if (fm_pos
>= fm_size
)
608 if (be32_to_cpu(fmpl2
->magic
) != UBI_FM_POOL_MAGIC
) {
609 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
610 be32_to_cpu(fmpl2
->magic
), UBI_FM_POOL_MAGIC
);
614 pool_size
= be16_to_cpu(fmpl1
->size
);
615 wl_pool_size
= be16_to_cpu(fmpl2
->size
);
616 fm
->max_pool_size
= be16_to_cpu(fmpl1
->max_size
);
617 fm
->max_wl_pool_size
= be16_to_cpu(fmpl2
->max_size
);
619 if (pool_size
> UBI_FM_MAX_POOL_SIZE
|| pool_size
< 0) {
620 ubi_err("bad pool size: %i", pool_size
);
624 if (wl_pool_size
> UBI_FM_MAX_POOL_SIZE
|| wl_pool_size
< 0) {
625 ubi_err("bad WL pool size: %i", wl_pool_size
);
630 if (fm
->max_pool_size
> UBI_FM_MAX_POOL_SIZE
||
631 fm
->max_pool_size
< 0) {
632 ubi_err("bad maximal pool size: %i", fm
->max_pool_size
);
636 if (fm
->max_wl_pool_size
> UBI_FM_MAX_POOL_SIZE
||
637 fm
->max_wl_pool_size
< 0) {
638 ubi_err("bad maximal WL pool size: %i", fm
->max_wl_pool_size
);
642 /* read EC values from free list */
643 for (i
= 0; i
< be32_to_cpu(fmhdr
->free_peb_count
); i
++) {
644 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
645 fm_pos
+= sizeof(*fmec
);
646 if (fm_pos
>= fm_size
)
649 add_aeb(ai
, &ai
->free
, be32_to_cpu(fmec
->pnum
),
650 be32_to_cpu(fmec
->ec
), 0);
653 /* read EC values from used list */
654 for (i
= 0; i
< be32_to_cpu(fmhdr
->used_peb_count
); i
++) {
655 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
656 fm_pos
+= sizeof(*fmec
);
657 if (fm_pos
>= fm_size
)
660 add_aeb(ai
, &used
, be32_to_cpu(fmec
->pnum
),
661 be32_to_cpu(fmec
->ec
), 0);
664 /* read EC values from scrub list */
665 for (i
= 0; i
< be32_to_cpu(fmhdr
->scrub_peb_count
); i
++) {
666 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
667 fm_pos
+= sizeof(*fmec
);
668 if (fm_pos
>= fm_size
)
671 add_aeb(ai
, &used
, be32_to_cpu(fmec
->pnum
),
672 be32_to_cpu(fmec
->ec
), 1);
675 /* read EC values from erase list */
676 for (i
= 0; i
< be32_to_cpu(fmhdr
->erase_peb_count
); i
++) {
677 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
678 fm_pos
+= sizeof(*fmec
);
679 if (fm_pos
>= fm_size
)
682 add_aeb(ai
, &ai
->erase
, be32_to_cpu(fmec
->pnum
),
683 be32_to_cpu(fmec
->ec
), 1);
686 ai
->mean_ec
= div_u64(ai
->ec_sum
, ai
->ec_count
);
687 ai
->bad_peb_count
= be32_to_cpu(fmhdr
->bad_peb_count
);
689 /* Iterate over all volumes and read their EBA table */
690 for (i
= 0; i
< be32_to_cpu(fmhdr
->vol_count
); i
++) {
691 fmvhdr
= (struct ubi_fm_volhdr
*)(fm_raw
+ fm_pos
);
692 fm_pos
+= sizeof(*fmvhdr
);
693 if (fm_pos
>= fm_size
)
696 if (be32_to_cpu(fmvhdr
->magic
) != UBI_FM_VHDR_MAGIC
) {
697 ubi_err("bad fastmap vol header magic: 0x%x, " \
699 be32_to_cpu(fmvhdr
->magic
), UBI_FM_VHDR_MAGIC
);
703 av
= add_vol(ai
, be32_to_cpu(fmvhdr
->vol_id
),
704 be32_to_cpu(fmvhdr
->used_ebs
),
705 be32_to_cpu(fmvhdr
->data_pad
),
707 be32_to_cpu(fmvhdr
->last_eb_bytes
));
713 if (ai
->highest_vol_id
< be32_to_cpu(fmvhdr
->vol_id
))
714 ai
->highest_vol_id
= be32_to_cpu(fmvhdr
->vol_id
);
716 fm_eba
= (struct ubi_fm_eba
*)(fm_raw
+ fm_pos
);
717 fm_pos
+= sizeof(*fm_eba
);
718 fm_pos
+= (sizeof(__be32
) * be32_to_cpu(fm_eba
->reserved_pebs
));
719 if (fm_pos
>= fm_size
)
722 if (be32_to_cpu(fm_eba
->magic
) != UBI_FM_EBA_MAGIC
) {
723 ubi_err("bad fastmap EBA header magic: 0x%x, " \
725 be32_to_cpu(fm_eba
->magic
), UBI_FM_EBA_MAGIC
);
729 for (j
= 0; j
< be32_to_cpu(fm_eba
->reserved_pebs
); j
++) {
730 int pnum
= be32_to_cpu(fm_eba
->pnum
[j
]);
732 if ((int)be32_to_cpu(fm_eba
->pnum
[j
]) < 0)
736 list_for_each_entry(tmp_aeb
, &used
, u
.list
) {
737 if (tmp_aeb
->pnum
== pnum
) {
743 /* This can happen if a PEB is already in an EBA known
744 * by this fastmap but the PEB itself is not in the used
746 * In this case the PEB can be within the fastmap pool
747 * or while writing the fastmap it was in the protection
751 aeb
= kmem_cache_alloc(ai
->aeb_slab_cache
,
760 aeb
->pnum
= be32_to_cpu(fm_eba
->pnum
[j
]);
762 aeb
->scrub
= aeb
->copy_flag
= aeb
->sqnum
= 0;
763 list_add_tail(&aeb
->u
.list
, &eba_orphans
);
769 if (av
->highest_lnum
<= aeb
->lnum
)
770 av
->highest_lnum
= aeb
->lnum
;
772 assign_aeb_to_av(ai
, aeb
, av
);
774 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
775 aeb
->pnum
, aeb
->lnum
, av
->vol_id
);
778 ech
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
784 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &eba_orphans
,
788 if (ubi_io_is_bad(ubi
, tmp_aeb
->pnum
)) {
789 ubi_err("bad PEB in fastmap EBA orphan list");
790 ret
= UBI_BAD_FASTMAP
;
795 err
= ubi_io_read_ec_hdr(ubi
, tmp_aeb
->pnum
, ech
, 0);
796 if (err
&& err
!= UBI_IO_BITFLIPS
) {
797 ubi_err("unable to read EC header! PEB:%i " \
798 "err:%i", tmp_aeb
->pnum
, err
);
799 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
803 } else if (err
== UBI_IO_BITFLIPS
)
806 tmp_aeb
->ec
= be64_to_cpu(ech
->ec
);
807 assign_aeb_to_av(ai
, tmp_aeb
, av
);
813 ret
= scan_pool(ubi
, ai
, fmpl1
->pebs
, pool_size
, &max_sqnum
,
814 &eba_orphans
, &free
);
818 ret
= scan_pool(ubi
, ai
, fmpl2
->pebs
, wl_pool_size
, &max_sqnum
,
819 &eba_orphans
, &free
);
823 if (max_sqnum
> ai
->max_sqnum
)
824 ai
->max_sqnum
= max_sqnum
;
826 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &free
, u
.list
)
827 list_move_tail(&tmp_aeb
->u
.list
, &ai
->free
);
829 ubi_assert(list_empty(&used
));
830 ubi_assert(list_empty(&eba_orphans
));
831 ubi_assert(list_empty(&free
));
834 * If fastmap is leaking PEBs (must not happen), raise a
835 * fat warning and fall back to scanning mode.
836 * We do this here because in ubi_wl_init() it's too late
837 * and we cannot fall back to scanning.
839 if (WARN_ON(count_fastmap_pebs(ai
) != ubi
->peb_count
-
840 ai
->bad_peb_count
- fm
->used_blocks
))
846 ret
= UBI_BAD_FASTMAP
;
848 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &used
, u
.list
) {
849 kmem_cache_free(ai
->aeb_slab_cache
, tmp_aeb
);
850 list_del(&tmp_aeb
->u
.list
);
852 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &eba_orphans
, u
.list
) {
853 kmem_cache_free(ai
->aeb_slab_cache
, tmp_aeb
);
854 list_del(&tmp_aeb
->u
.list
);
856 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &free
, u
.list
) {
857 kmem_cache_free(ai
->aeb_slab_cache
, tmp_aeb
);
858 list_del(&tmp_aeb
->u
.list
);
865 * ubi_scan_fastmap - scan the fastmap.
866 * @ubi: UBI device object
867 * @ai: UBI attach info to be filled
868 * @fm_anchor: The fastmap starts at this PEB
870 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
871 * UBI_BAD_FASTMAP if one was found but is not usable.
872 * < 0 indicates an internal error.
874 int ubi_scan_fastmap(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
877 struct ubi_fm_sb
*fmsb
, *fmsb2
;
878 struct ubi_vid_hdr
*vh
;
879 struct ubi_ec_hdr
*ech
;
880 struct ubi_fastmap_layout
*fm
;
881 int i
, used_blocks
, pnum
, ret
= 0;
884 unsigned long long sqnum
= 0;
886 mutex_lock(&ubi
->fm_mutex
);
887 memset(ubi
->fm_buf
, 0, ubi
->fm_size
);
889 fmsb
= kmalloc(sizeof(*fmsb
), GFP_KERNEL
);
895 fm
= kzalloc(sizeof(*fm
), GFP_KERNEL
);
902 ret
= ubi_io_read(ubi
, fmsb
, fm_anchor
, ubi
->leb_start
, sizeof(*fmsb
));
903 if (ret
&& ret
!= UBI_IO_BITFLIPS
)
905 else if (ret
== UBI_IO_BITFLIPS
)
906 fm
->to_be_tortured
[0] = 1;
908 if (be32_to_cpu(fmsb
->magic
) != UBI_FM_SB_MAGIC
) {
909 ubi_err("bad super block magic: 0x%x, expected: 0x%x",
910 be32_to_cpu(fmsb
->magic
), UBI_FM_SB_MAGIC
);
911 ret
= UBI_BAD_FASTMAP
;
915 if (fmsb
->version
!= UBI_FM_FMT_VERSION
) {
916 ubi_err("bad fastmap version: %i, expected: %i",
917 fmsb
->version
, UBI_FM_FMT_VERSION
);
918 ret
= UBI_BAD_FASTMAP
;
922 used_blocks
= be32_to_cpu(fmsb
->used_blocks
);
923 if (used_blocks
> UBI_FM_MAX_BLOCKS
|| used_blocks
< 1) {
924 ubi_err("number of fastmap blocks is invalid: %i", used_blocks
);
925 ret
= UBI_BAD_FASTMAP
;
929 fm_size
= ubi
->leb_size
* used_blocks
;
930 if (fm_size
!= ubi
->fm_size
) {
931 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size
,
933 ret
= UBI_BAD_FASTMAP
;
937 ech
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
943 vh
= ubi_zalloc_vid_hdr(ubi
, GFP_KERNEL
);
949 for (i
= 0; i
< used_blocks
; i
++) {
952 pnum
= be32_to_cpu(fmsb
->block_loc
[i
]);
954 if (ubi_io_is_bad(ubi
, pnum
)) {
955 ret
= UBI_BAD_FASTMAP
;
959 ret
= ubi_io_read_ec_hdr(ubi
, pnum
, ech
, 0);
960 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
961 ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
964 ret
= UBI_BAD_FASTMAP
;
966 } else if (ret
== UBI_IO_BITFLIPS
)
967 fm
->to_be_tortured
[i
] = 1;
969 image_seq
= be32_to_cpu(ech
->image_seq
);
971 ubi
->image_seq
= image_seq
;
974 * Older UBI implementations have image_seq set to zero, so
975 * we shouldn't fail if image_seq == 0.
977 if (image_seq
&& (image_seq
!= ubi
->image_seq
)) {
978 ubi_err("wrong image seq:%d instead of %d",
979 be32_to_cpu(ech
->image_seq
), ubi
->image_seq
);
980 ret
= UBI_BAD_FASTMAP
;
984 ret
= ubi_io_read_vid_hdr(ubi
, pnum
, vh
, 0);
985 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
986 ubi_err("unable to read fastmap block# %i (PEB: %i)",
992 if (be32_to_cpu(vh
->vol_id
) != UBI_FM_SB_VOLUME_ID
) {
993 ubi_err("bad fastmap anchor vol_id: 0x%x," \
995 be32_to_cpu(vh
->vol_id
),
996 UBI_FM_SB_VOLUME_ID
);
997 ret
= UBI_BAD_FASTMAP
;
1001 if (be32_to_cpu(vh
->vol_id
) != UBI_FM_DATA_VOLUME_ID
) {
1002 ubi_err("bad fastmap data vol_id: 0x%x," \
1004 be32_to_cpu(vh
->vol_id
),
1005 UBI_FM_DATA_VOLUME_ID
);
1006 ret
= UBI_BAD_FASTMAP
;
1011 if (sqnum
< be64_to_cpu(vh
->sqnum
))
1012 sqnum
= be64_to_cpu(vh
->sqnum
);
1014 ret
= ubi_io_read(ubi
, ubi
->fm_buf
+ (ubi
->leb_size
* i
), pnum
,
1015 ubi
->leb_start
, ubi
->leb_size
);
1016 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
1017 ubi_err("unable to read fastmap block# %i (PEB: %i, " \
1018 "err: %i)", i
, pnum
, ret
);
1026 fmsb2
= (struct ubi_fm_sb
*)(ubi
->fm_buf
);
1027 tmp_crc
= be32_to_cpu(fmsb2
->data_crc
);
1028 fmsb2
->data_crc
= 0;
1029 crc
= crc32(UBI_CRC32_INIT
, ubi
->fm_buf
, fm_size
);
1030 if (crc
!= tmp_crc
) {
1031 ubi_err("fastmap data CRC is invalid");
1032 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc
, crc
);
1033 ret
= UBI_BAD_FASTMAP
;
1037 fmsb2
->sqnum
= sqnum
;
1039 fm
->used_blocks
= used_blocks
;
1041 ret
= ubi_attach_fastmap(ubi
, ai
, fm
);
1044 ret
= UBI_BAD_FASTMAP
;
1048 for (i
= 0; i
< used_blocks
; i
++) {
1049 struct ubi_wl_entry
*e
;
1051 e
= kmem_cache_alloc(ubi_wl_entry_slab
, GFP_KERNEL
);
1060 e
->pnum
= be32_to_cpu(fmsb2
->block_loc
[i
]);
1061 e
->ec
= be32_to_cpu(fmsb2
->block_ec
[i
]);
1066 ubi
->fm_pool
.max_size
= ubi
->fm
->max_pool_size
;
1067 ubi
->fm_wl_pool
.max_size
= ubi
->fm
->max_wl_pool_size
;
1068 ubi_msg("attached by fastmap");
1069 ubi_msg("fastmap pool size: %d", ubi
->fm_pool
.max_size
);
1070 ubi_msg("fastmap WL pool size: %d", ubi
->fm_wl_pool
.max_size
);
1071 ubi
->fm_disabled
= 0;
1073 ubi_free_vid_hdr(ubi
, vh
);
1076 mutex_unlock(&ubi
->fm_mutex
);
1077 if (ret
== UBI_BAD_FASTMAP
)
1078 ubi_err("Attach by fastmap failed, doing a full scan!");
1082 ubi_free_vid_hdr(ubi
, vh
);
1091 * ubi_write_fastmap - writes a fastmap.
1092 * @ubi: UBI device object
1093 * @new_fm: the to be written fastmap
1095 * Returns 0 on success, < 0 indicates an internal error.
1097 static int ubi_write_fastmap(struct ubi_device
*ubi
,
1098 struct ubi_fastmap_layout
*new_fm
)
1102 struct ubi_fm_sb
*fmsb
;
1103 struct ubi_fm_hdr
*fmh
;
1104 struct ubi_fm_scan_pool
*fmpl1
, *fmpl2
;
1105 struct ubi_fm_ec
*fec
;
1106 struct ubi_fm_volhdr
*fvh
;
1107 struct ubi_fm_eba
*feba
;
1108 struct rb_node
*node
;
1109 struct ubi_wl_entry
*wl_e
;
1110 struct ubi_volume
*vol
;
1111 struct ubi_vid_hdr
*avhdr
, *dvhdr
;
1112 struct ubi_work
*ubi_wrk
;
1113 int ret
, i
, j
, free_peb_count
, used_peb_count
, vol_count
;
1114 int scrub_peb_count
, erase_peb_count
;
1116 fm_raw
= ubi
->fm_buf
;
1117 memset(ubi
->fm_buf
, 0, ubi
->fm_size
);
1119 avhdr
= new_fm_vhdr(ubi
, UBI_FM_SB_VOLUME_ID
);
1125 dvhdr
= new_fm_vhdr(ubi
, UBI_FM_DATA_VOLUME_ID
);
1131 spin_lock(&ubi
->volumes_lock
);
1132 spin_lock(&ubi
->wl_lock
);
1134 fmsb
= (struct ubi_fm_sb
*)fm_raw
;
1135 fm_pos
+= sizeof(*fmsb
);
1136 ubi_assert(fm_pos
<= ubi
->fm_size
);
1138 fmh
= (struct ubi_fm_hdr
*)(fm_raw
+ fm_pos
);
1139 fm_pos
+= sizeof(*fmh
);
1140 ubi_assert(fm_pos
<= ubi
->fm_size
);
1142 fmsb
->magic
= cpu_to_be32(UBI_FM_SB_MAGIC
);
1143 fmsb
->version
= UBI_FM_FMT_VERSION
;
1144 fmsb
->used_blocks
= cpu_to_be32(new_fm
->used_blocks
);
1145 /* the max sqnum will be filled in while *reading* the fastmap */
1148 fmh
->magic
= cpu_to_be32(UBI_FM_HDR_MAGIC
);
1151 scrub_peb_count
= 0;
1152 erase_peb_count
= 0;
1155 fmpl1
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
1156 fm_pos
+= sizeof(*fmpl1
);
1157 fmpl1
->magic
= cpu_to_be32(UBI_FM_POOL_MAGIC
);
1158 fmpl1
->size
= cpu_to_be16(ubi
->fm_pool
.size
);
1159 fmpl1
->max_size
= cpu_to_be16(ubi
->fm_pool
.max_size
);
1161 for (i
= 0; i
< ubi
->fm_pool
.size
; i
++)
1162 fmpl1
->pebs
[i
] = cpu_to_be32(ubi
->fm_pool
.pebs
[i
]);
1164 fmpl2
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
1165 fm_pos
+= sizeof(*fmpl2
);
1166 fmpl2
->magic
= cpu_to_be32(UBI_FM_POOL_MAGIC
);
1167 fmpl2
->size
= cpu_to_be16(ubi
->fm_wl_pool
.size
);
1168 fmpl2
->max_size
= cpu_to_be16(ubi
->fm_wl_pool
.max_size
);
1170 for (i
= 0; i
< ubi
->fm_wl_pool
.size
; i
++)
1171 fmpl2
->pebs
[i
] = cpu_to_be32(ubi
->fm_wl_pool
.pebs
[i
]);
1173 for (node
= rb_first(&ubi
->free
); node
; node
= rb_next(node
)) {
1174 wl_e
= rb_entry(node
, struct ubi_wl_entry
, u
.rb
);
1175 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1177 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1178 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1181 fm_pos
+= sizeof(*fec
);
1182 ubi_assert(fm_pos
<= ubi
->fm_size
);
1184 fmh
->free_peb_count
= cpu_to_be32(free_peb_count
);
1186 for (node
= rb_first(&ubi
->used
); node
; node
= rb_next(node
)) {
1187 wl_e
= rb_entry(node
, struct ubi_wl_entry
, u
.rb
);
1188 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1190 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1191 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1194 fm_pos
+= sizeof(*fec
);
1195 ubi_assert(fm_pos
<= ubi
->fm_size
);
1197 fmh
->used_peb_count
= cpu_to_be32(used_peb_count
);
1199 for (node
= rb_first(&ubi
->scrub
); node
; node
= rb_next(node
)) {
1200 wl_e
= rb_entry(node
, struct ubi_wl_entry
, u
.rb
);
1201 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1203 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1204 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1207 fm_pos
+= sizeof(*fec
);
1208 ubi_assert(fm_pos
<= ubi
->fm_size
);
1210 fmh
->scrub_peb_count
= cpu_to_be32(scrub_peb_count
);
1213 list_for_each_entry(ubi_wrk
, &ubi
->works
, list
) {
1214 if (ubi_is_erase_work(ubi_wrk
)) {
1218 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1220 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1221 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1224 fm_pos
+= sizeof(*fec
);
1225 ubi_assert(fm_pos
<= ubi
->fm_size
);
1228 fmh
->erase_peb_count
= cpu_to_be32(erase_peb_count
);
1230 for (i
= 0; i
< UBI_MAX_VOLUMES
+ UBI_INT_VOL_COUNT
; i
++) {
1231 vol
= ubi
->volumes
[i
];
1238 fvh
= (struct ubi_fm_volhdr
*)(fm_raw
+ fm_pos
);
1239 fm_pos
+= sizeof(*fvh
);
1240 ubi_assert(fm_pos
<= ubi
->fm_size
);
1242 fvh
->magic
= cpu_to_be32(UBI_FM_VHDR_MAGIC
);
1243 fvh
->vol_id
= cpu_to_be32(vol
->vol_id
);
1244 fvh
->vol_type
= vol
->vol_type
;
1245 fvh
->used_ebs
= cpu_to_be32(vol
->used_ebs
);
1246 fvh
->data_pad
= cpu_to_be32(vol
->data_pad
);
1247 fvh
->last_eb_bytes
= cpu_to_be32(vol
->last_eb_bytes
);
1249 ubi_assert(vol
->vol_type
== UBI_DYNAMIC_VOLUME
||
1250 vol
->vol_type
== UBI_STATIC_VOLUME
);
1252 feba
= (struct ubi_fm_eba
*)(fm_raw
+ fm_pos
);
1253 fm_pos
+= sizeof(*feba
) + (sizeof(__be32
) * vol
->reserved_pebs
);
1254 ubi_assert(fm_pos
<= ubi
->fm_size
);
1256 for (j
= 0; j
< vol
->reserved_pebs
; j
++)
1257 feba
->pnum
[j
] = cpu_to_be32(vol
->eba_tbl
[j
]);
1259 feba
->reserved_pebs
= cpu_to_be32(j
);
1260 feba
->magic
= cpu_to_be32(UBI_FM_EBA_MAGIC
);
1262 fmh
->vol_count
= cpu_to_be32(vol_count
);
1263 fmh
->bad_peb_count
= cpu_to_be32(ubi
->bad_peb_count
);
1265 avhdr
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1268 spin_unlock(&ubi
->wl_lock
);
1269 spin_unlock(&ubi
->volumes_lock
);
1271 dbg_bld("writing fastmap SB to PEB %i", new_fm
->e
[0]->pnum
);
1272 ret
= ubi_io_write_vid_hdr(ubi
, new_fm
->e
[0]->pnum
, avhdr
);
1274 ubi_err("unable to write vid_hdr to fastmap SB!");
1278 for (i
= 0; i
< new_fm
->used_blocks
; i
++) {
1279 fmsb
->block_loc
[i
] = cpu_to_be32(new_fm
->e
[i
]->pnum
);
1280 fmsb
->block_ec
[i
] = cpu_to_be32(new_fm
->e
[i
]->ec
);
1284 fmsb
->data_crc
= cpu_to_be32(crc32(UBI_CRC32_INIT
, fm_raw
,
1287 for (i
= 1; i
< new_fm
->used_blocks
; i
++) {
1288 dvhdr
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1289 dvhdr
->lnum
= cpu_to_be32(i
);
1290 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1291 new_fm
->e
[i
]->pnum
, be64_to_cpu(dvhdr
->sqnum
));
1292 ret
= ubi_io_write_vid_hdr(ubi
, new_fm
->e
[i
]->pnum
, dvhdr
);
1294 ubi_err("unable to write vid_hdr to PEB %i!",
1295 new_fm
->e
[i
]->pnum
);
1300 for (i
= 0; i
< new_fm
->used_blocks
; i
++) {
1301 ret
= ubi_io_write(ubi
, fm_raw
+ (i
* ubi
->leb_size
),
1302 new_fm
->e
[i
]->pnum
, ubi
->leb_start
, ubi
->leb_size
);
1304 ubi_err("unable to write fastmap to PEB %i!",
1305 new_fm
->e
[i
]->pnum
);
1313 dbg_bld("fastmap written!");
1316 ubi_free_vid_hdr(ubi
, avhdr
);
1317 ubi_free_vid_hdr(ubi
, dvhdr
);
1323 * erase_block - Manually erase a PEB.
1324 * @ubi: UBI device object
1325 * @pnum: PEB to be erased
1327 * Returns the new EC value on success, < 0 indicates an internal error.
1329 static int erase_block(struct ubi_device
*ubi
, int pnum
)
1332 struct ubi_ec_hdr
*ec_hdr
;
1335 ec_hdr
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
1339 ret
= ubi_io_read_ec_hdr(ubi
, pnum
, ec_hdr
, 0);
1342 else if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
1347 ret
= ubi_io_sync_erase(ubi
, pnum
, 0);
1351 ec
= be64_to_cpu(ec_hdr
->ec
);
1353 if (ec
> UBI_MAX_ERASECOUNTER
) {
1358 ec_hdr
->ec
= cpu_to_be64(ec
);
1359 ret
= ubi_io_write_ec_hdr(ubi
, pnum
, ec_hdr
);
1370 * invalidate_fastmap - destroys a fastmap.
1371 * @ubi: UBI device object
1372 * @fm: the fastmap to be destroyed
1374 * Returns 0 on success, < 0 indicates an internal error.
1376 static int invalidate_fastmap(struct ubi_device
*ubi
,
1377 struct ubi_fastmap_layout
*fm
)
1380 struct ubi_vid_hdr
*vh
;
1382 ret
= erase_block(ubi
, fm
->e
[0]->pnum
);
1386 vh
= new_fm_vhdr(ubi
, UBI_FM_SB_VOLUME_ID
);
1390 /* deleting the current fastmap SB is not enough, an old SB may exist,
1391 * so create a (corrupted) SB such that fastmap will find it and fall
1392 * back to scanning mode in any case */
1393 vh
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1394 ret
= ubi_io_write_vid_hdr(ubi
, fm
->e
[0]->pnum
, vh
);
1400 * ubi_update_fastmap - will be called by UBI if a volume changes or
1401 * a fastmap pool becomes full.
1402 * @ubi: UBI device object
1404 * Returns 0 on success, < 0 indicates an internal error.
1406 int ubi_update_fastmap(struct ubi_device
*ubi
)
1409 struct ubi_fastmap_layout
*new_fm
, *old_fm
;
1410 struct ubi_wl_entry
*tmp_e
;
1412 mutex_lock(&ubi
->fm_mutex
);
1414 ubi_refill_pools(ubi
);
1416 if (ubi
->ro_mode
|| ubi
->fm_disabled
) {
1417 mutex_unlock(&ubi
->fm_mutex
);
1421 ret
= ubi_ensure_anchor_pebs(ubi
);
1423 mutex_unlock(&ubi
->fm_mutex
);
1427 new_fm
= kzalloc(sizeof(*new_fm
), GFP_KERNEL
);
1429 mutex_unlock(&ubi
->fm_mutex
);
1433 new_fm
->used_blocks
= ubi
->fm_size
/ ubi
->leb_size
;
1435 for (i
= 0; i
< new_fm
->used_blocks
; i
++) {
1436 new_fm
->e
[i
] = kmem_cache_alloc(ubi_wl_entry_slab
, GFP_KERNEL
);
1437 if (!new_fm
->e
[i
]) {
1439 kfree(new_fm
->e
[i
]);
1442 mutex_unlock(&ubi
->fm_mutex
);
1450 if (new_fm
->used_blocks
> UBI_FM_MAX_BLOCKS
) {
1451 ubi_err("fastmap too large");
1456 for (i
= 1; i
< new_fm
->used_blocks
; i
++) {
1457 spin_lock(&ubi
->wl_lock
);
1458 tmp_e
= ubi_wl_get_fm_peb(ubi
, 0);
1459 spin_unlock(&ubi
->wl_lock
);
1461 if (!tmp_e
&& !old_fm
) {
1463 ubi_err("could not get any free erase block");
1465 for (j
= 1; j
< i
; j
++)
1466 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[j
], j
, 0);
1470 } else if (!tmp_e
&& old_fm
) {
1471 ret
= erase_block(ubi
, old_fm
->e
[i
]->pnum
);
1475 for (j
= 1; j
< i
; j
++)
1476 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[j
],
1479 ubi_err("could not erase old fastmap PEB");
1483 new_fm
->e
[i
]->pnum
= old_fm
->e
[i
]->pnum
;
1484 new_fm
->e
[i
]->ec
= old_fm
->e
[i
]->ec
;
1486 new_fm
->e
[i
]->pnum
= tmp_e
->pnum
;
1487 new_fm
->e
[i
]->ec
= tmp_e
->ec
;
1490 ubi_wl_put_fm_peb(ubi
, old_fm
->e
[i
], i
,
1491 old_fm
->to_be_tortured
[i
]);
1495 spin_lock(&ubi
->wl_lock
);
1496 tmp_e
= ubi_wl_get_fm_peb(ubi
, 1);
1497 spin_unlock(&ubi
->wl_lock
);
1500 /* no fresh anchor PEB was found, reuse the old one */
1502 ret
= erase_block(ubi
, old_fm
->e
[0]->pnum
);
1505 ubi_err("could not erase old anchor PEB");
1507 for (i
= 1; i
< new_fm
->used_blocks
; i
++)
1508 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[i
],
1513 new_fm
->e
[0]->pnum
= old_fm
->e
[0]->pnum
;
1514 new_fm
->e
[0]->ec
= ret
;
1516 /* we've got a new anchor PEB, return the old one */
1517 ubi_wl_put_fm_peb(ubi
, old_fm
->e
[0], 0,
1518 old_fm
->to_be_tortured
[0]);
1520 new_fm
->e
[0]->pnum
= tmp_e
->pnum
;
1521 new_fm
->e
[0]->ec
= tmp_e
->ec
;
1526 ubi_err("could not find any anchor PEB");
1528 for (i
= 1; i
< new_fm
->used_blocks
; i
++)
1529 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[i
], i
, 0);
1535 new_fm
->e
[0]->pnum
= tmp_e
->pnum
;
1536 new_fm
->e
[0]->ec
= tmp_e
->ec
;
1539 down_write(&ubi
->work_sem
);
1540 down_write(&ubi
->fm_sem
);
1541 ret
= ubi_write_fastmap(ubi
, new_fm
);
1542 up_write(&ubi
->fm_sem
);
1543 up_write(&ubi
->work_sem
);
1549 mutex_unlock(&ubi
->fm_mutex
);
1556 ubi_warn("Unable to write new fastmap, err=%i", ret
);
1560 ret
= invalidate_fastmap(ubi
, old_fm
);
1562 ubi_err("Unable to invalidiate current fastmap!");