2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
17 #include <linux/crc32.h>
18 #include <linux/bitmap.h>
22 * init_seen - allocate memory for used for debugging.
23 * @ubi: UBI device description object
25 static inline unsigned long *init_seen(struct ubi_device
*ubi
)
29 if (!ubi_dbg_chk_fastmap(ubi
))
32 ret
= kcalloc(BITS_TO_LONGS(ubi
->peb_count
), sizeof(unsigned long),
35 return ERR_PTR(-ENOMEM
);
41 * free_seen - free the seen logic integer array.
42 * @seen: integer array of @ubi->peb_count size
44 static inline void free_seen(unsigned long *seen
)
50 * set_seen - mark a PEB as seen.
51 * @ubi: UBI device description object
52 * @pnum: The PEB to be makred as seen
53 * @seen: integer array of @ubi->peb_count size
55 static inline void set_seen(struct ubi_device
*ubi
, int pnum
, unsigned long *seen
)
57 if (!ubi_dbg_chk_fastmap(ubi
) || !seen
)
64 * self_check_seen - check whether all PEB have been seen by fastmap.
65 * @ubi: UBI device description object
66 * @seen: integer array of @ubi->peb_count size
68 static int self_check_seen(struct ubi_device
*ubi
, unsigned long *seen
)
72 if (!ubi_dbg_chk_fastmap(ubi
) || !seen
)
75 for (pnum
= 0; pnum
< ubi
->peb_count
; pnum
++) {
76 if (test_bit(pnum
, seen
) && ubi
->lookuptbl
[pnum
]) {
77 ubi_err(ubi
, "self-check failed for PEB %d, fastmap didn't see it", pnum
);
86 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
87 * @ubi: UBI device description object
89 size_t ubi_calc_fm_size(struct ubi_device
*ubi
)
93 size
= sizeof(struct ubi_fm_sb
) +
94 sizeof(struct ubi_fm_hdr
) +
95 sizeof(struct ubi_fm_scan_pool
) +
96 sizeof(struct ubi_fm_scan_pool
) +
97 (ubi
->peb_count
* sizeof(struct ubi_fm_ec
)) +
98 (sizeof(struct ubi_fm_eba
) +
99 (ubi
->peb_count
* sizeof(__be32
))) +
100 sizeof(struct ubi_fm_volhdr
) * UBI_MAX_VOLUMES
;
101 return roundup(size
, ubi
->leb_size
);
106 * new_fm_vhdr - allocate a new volume header for fastmap usage.
107 * @ubi: UBI device description object
108 * @vol_id: the VID of the new header
110 * Returns a new struct ubi_vid_hdr on success.
111 * NULL indicates out of memory.
113 static struct ubi_vid_io_buf
*new_fm_vbuf(struct ubi_device
*ubi
, int vol_id
)
115 struct ubi_vid_io_buf
*new;
116 struct ubi_vid_hdr
*vh
;
118 new = ubi_alloc_vid_buf(ubi
, GFP_KERNEL
);
122 vh
= ubi_get_vid_hdr(new);
123 vh
->vol_type
= UBI_VID_DYNAMIC
;
124 vh
->vol_id
= cpu_to_be32(vol_id
);
126 /* UBI implementations without fastmap support have to delete the
129 vh
->compat
= UBI_COMPAT_DELETE
;
136 * add_aeb - create and add a attach erase block to a given list.
137 * @ai: UBI attach info object
138 * @list: the target list
139 * @pnum: PEB number of the new attach erase block
140 * @ec: erease counter of the new LEB
141 * @scrub: scrub this PEB after attaching
143 * Returns 0 on success, < 0 indicates an internal error.
145 static int add_aeb(struct ubi_attach_info
*ai
, struct list_head
*list
,
146 int pnum
, int ec
, int scrub
)
148 struct ubi_ainf_peb
*aeb
;
150 aeb
= ubi_alloc_aeb(ai
, pnum
, ec
);
156 aeb
->copy_flag
= aeb
->sqnum
= 0;
158 ai
->ec_sum
+= aeb
->ec
;
161 if (ai
->max_ec
< aeb
->ec
)
162 ai
->max_ec
= aeb
->ec
;
164 if (ai
->min_ec
> aeb
->ec
)
165 ai
->min_ec
= aeb
->ec
;
167 list_add_tail(&aeb
->u
.list
, list
);
173 * add_vol - create and add a new volume to ubi_attach_info.
174 * @ai: ubi_attach_info object
175 * @vol_id: VID of the new volume
176 * @used_ebs: number of used EBS
177 * @data_pad: data padding value of the new volume
178 * @vol_type: volume type
179 * @last_eb_bytes: number of bytes in the last LEB
181 * Returns the new struct ubi_ainf_volume on success.
182 * NULL indicates an error.
184 static struct ubi_ainf_volume
*add_vol(struct ubi_attach_info
*ai
, int vol_id
,
185 int used_ebs
, int data_pad
, u8 vol_type
,
188 struct ubi_ainf_volume
*av
;
190 av
= ubi_add_av(ai
, vol_id
);
194 av
->data_pad
= data_pad
;
195 av
->last_data_size
= last_eb_bytes
;
197 av
->vol_type
= vol_type
;
198 if (av
->vol_type
== UBI_STATIC_VOLUME
)
199 av
->used_ebs
= used_ebs
;
201 dbg_bld("found volume (ID %i)", vol_id
);
206 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
207 * from it's original list.
208 * @ai: ubi_attach_info object
209 * @aeb: the to be assigned SEB
210 * @av: target scan volume
212 static void assign_aeb_to_av(struct ubi_attach_info
*ai
,
213 struct ubi_ainf_peb
*aeb
,
214 struct ubi_ainf_volume
*av
)
216 struct ubi_ainf_peb
*tmp_aeb
;
217 struct rb_node
**p
= &ai
->volumes
.rb_node
, *parent
= NULL
;
219 p
= &av
->root
.rb_node
;
223 tmp_aeb
= rb_entry(parent
, struct ubi_ainf_peb
, u
.rb
);
224 if (aeb
->lnum
!= tmp_aeb
->lnum
) {
225 if (aeb
->lnum
< tmp_aeb
->lnum
)
235 list_del(&aeb
->u
.list
);
238 rb_link_node(&aeb
->u
.rb
, parent
, p
);
239 rb_insert_color(&aeb
->u
.rb
, &av
->root
);
243 * update_vol - inserts or updates a LEB which was found a pool.
244 * @ubi: the UBI device object
245 * @ai: attach info object
246 * @av: the volume this LEB belongs to
247 * @new_vh: the volume header derived from new_aeb
248 * @new_aeb: the AEB to be examined
250 * Returns 0 on success, < 0 indicates an internal error.
252 static int update_vol(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
253 struct ubi_ainf_volume
*av
, struct ubi_vid_hdr
*new_vh
,
254 struct ubi_ainf_peb
*new_aeb
)
256 struct rb_node
**p
= &av
->root
.rb_node
, *parent
= NULL
;
257 struct ubi_ainf_peb
*aeb
, *victim
;
262 aeb
= rb_entry(parent
, struct ubi_ainf_peb
, u
.rb
);
264 if (be32_to_cpu(new_vh
->lnum
) != aeb
->lnum
) {
265 if (be32_to_cpu(new_vh
->lnum
) < aeb
->lnum
)
273 /* This case can happen if the fastmap gets written
274 * because of a volume change (creation, deletion, ..).
275 * Then a PEB can be within the persistent EBA and the pool.
277 if (aeb
->pnum
== new_aeb
->pnum
) {
278 ubi_assert(aeb
->lnum
== new_aeb
->lnum
);
279 ubi_free_aeb(ai
, new_aeb
);
284 cmp_res
= ubi_compare_lebs(ubi
, aeb
, new_aeb
->pnum
, new_vh
);
288 /* new_aeb is newer */
290 victim
= ubi_alloc_aeb(ai
, aeb
->ec
, aeb
->pnum
);
294 list_add_tail(&victim
->u
.list
, &ai
->erase
);
296 if (av
->highest_lnum
== be32_to_cpu(new_vh
->lnum
))
298 be32_to_cpu(new_vh
->data_size
);
300 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
301 av
->vol_id
, aeb
->lnum
, new_aeb
->pnum
);
303 aeb
->ec
= new_aeb
->ec
;
304 aeb
->pnum
= new_aeb
->pnum
;
305 aeb
->copy_flag
= new_vh
->copy_flag
;
306 aeb
->scrub
= new_aeb
->scrub
;
307 aeb
->sqnum
= new_aeb
->sqnum
;
308 ubi_free_aeb(ai
, new_aeb
);
310 /* new_aeb is older */
312 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
313 av
->vol_id
, aeb
->lnum
, new_aeb
->pnum
);
314 list_add_tail(&new_aeb
->u
.list
, &ai
->erase
);
319 /* This LEB is new, let's add it to the volume */
321 if (av
->highest_lnum
<= be32_to_cpu(new_vh
->lnum
)) {
322 av
->highest_lnum
= be32_to_cpu(new_vh
->lnum
);
323 av
->last_data_size
= be32_to_cpu(new_vh
->data_size
);
326 if (av
->vol_type
== UBI_STATIC_VOLUME
)
327 av
->used_ebs
= be32_to_cpu(new_vh
->used_ebs
);
331 rb_link_node(&new_aeb
->u
.rb
, parent
, p
);
332 rb_insert_color(&new_aeb
->u
.rb
, &av
->root
);
338 * process_pool_aeb - we found a non-empty PEB in a pool.
339 * @ubi: UBI device object
340 * @ai: attach info object
341 * @new_vh: the volume header derived from new_aeb
342 * @new_aeb: the AEB to be examined
344 * Returns 0 on success, < 0 indicates an internal error.
346 static int process_pool_aeb(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
347 struct ubi_vid_hdr
*new_vh
,
348 struct ubi_ainf_peb
*new_aeb
)
350 int vol_id
= be32_to_cpu(new_vh
->vol_id
);
351 struct ubi_ainf_volume
*av
;
353 if (vol_id
== UBI_FM_SB_VOLUME_ID
|| vol_id
== UBI_FM_DATA_VOLUME_ID
) {
354 ubi_free_aeb(ai
, new_aeb
);
359 /* Find the volume this SEB belongs to */
360 av
= ubi_find_av(ai
, vol_id
);
362 ubi_err(ubi
, "orphaned volume in fastmap pool!");
363 ubi_free_aeb(ai
, new_aeb
);
364 return UBI_BAD_FASTMAP
;
367 ubi_assert(vol_id
== av
->vol_id
);
369 return update_vol(ubi
, ai
, av
, new_vh
, new_aeb
);
373 * unmap_peb - unmap a PEB.
374 * If fastmap detects a free PEB in the pool it has to check whether
375 * this PEB has been unmapped after writing the fastmap.
377 * @ai: UBI attach info object
378 * @pnum: The PEB to be unmapped
380 static void unmap_peb(struct ubi_attach_info
*ai
, int pnum
)
382 struct ubi_ainf_volume
*av
;
383 struct rb_node
*node
, *node2
;
384 struct ubi_ainf_peb
*aeb
;
386 ubi_rb_for_each_entry(node
, av
, &ai
->volumes
, rb
) {
387 ubi_rb_for_each_entry(node2
, aeb
, &av
->root
, u
.rb
) {
388 if (aeb
->pnum
== pnum
) {
389 rb_erase(&aeb
->u
.rb
, &av
->root
);
391 ubi_free_aeb(ai
, aeb
);
399 * scan_pool - scans a pool for changed (no longer empty PEBs).
400 * @ubi: UBI device object
401 * @ai: attach info object
402 * @pebs: an array of all PEB numbers in the to be scanned pool
403 * @pool_size: size of the pool (number of entries in @pebs)
404 * @max_sqnum: pointer to the maximal sequence number
405 * @free: list of PEBs which are most likely free (and go into @ai->free)
407 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
408 * < 0 indicates an internal error.
410 static int scan_pool(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
411 __be32
*pebs
, int pool_size
, unsigned long long *max_sqnum
,
412 struct list_head
*free
)
414 struct ubi_vid_io_buf
*vb
;
415 struct ubi_vid_hdr
*vh
;
416 struct ubi_ec_hdr
*ech
;
417 struct ubi_ainf_peb
*new_aeb
;
418 int i
, pnum
, err
, ret
= 0;
420 ech
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
424 vb
= ubi_alloc_vid_buf(ubi
, GFP_KERNEL
);
430 vh
= ubi_get_vid_hdr(vb
);
432 dbg_bld("scanning fastmap pool: size = %i", pool_size
);
435 * Now scan all PEBs in the pool to find changes which have been made
436 * after the creation of the fastmap
438 for (i
= 0; i
< pool_size
; i
++) {
442 pnum
= be32_to_cpu(pebs
[i
]);
444 if (ubi_io_is_bad(ubi
, pnum
)) {
445 ubi_err(ubi
, "bad PEB in fastmap pool!");
446 ret
= UBI_BAD_FASTMAP
;
450 err
= ubi_io_read_ec_hdr(ubi
, pnum
, ech
, 0);
451 if (err
&& err
!= UBI_IO_BITFLIPS
) {
452 ubi_err(ubi
, "unable to read EC header! PEB:%i err:%i",
454 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
456 } else if (err
== UBI_IO_BITFLIPS
)
460 * Older UBI implementations have image_seq set to zero, so
461 * we shouldn't fail if image_seq == 0.
463 image_seq
= be32_to_cpu(ech
->image_seq
);
465 if (image_seq
&& (image_seq
!= ubi
->image_seq
)) {
466 ubi_err(ubi
, "bad image seq: 0x%x, expected: 0x%x",
467 be32_to_cpu(ech
->image_seq
), ubi
->image_seq
);
468 ret
= UBI_BAD_FASTMAP
;
472 err
= ubi_io_read_vid_hdr(ubi
, pnum
, vb
, 0);
473 if (err
== UBI_IO_FF
|| err
== UBI_IO_FF_BITFLIPS
) {
474 unsigned long long ec
= be64_to_cpu(ech
->ec
);
476 dbg_bld("Adding PEB to free: %i", pnum
);
478 if (err
== UBI_IO_FF_BITFLIPS
)
481 add_aeb(ai
, free
, pnum
, ec
, scrub
);
483 } else if (err
== 0 || err
== UBI_IO_BITFLIPS
) {
484 dbg_bld("Found non empty PEB:%i in pool", pnum
);
486 if (err
== UBI_IO_BITFLIPS
)
489 new_aeb
= ubi_alloc_aeb(ai
, pnum
, be64_to_cpu(ech
->ec
));
495 new_aeb
->lnum
= be32_to_cpu(vh
->lnum
);
496 new_aeb
->sqnum
= be64_to_cpu(vh
->sqnum
);
497 new_aeb
->copy_flag
= vh
->copy_flag
;
498 new_aeb
->scrub
= scrub
;
500 if (*max_sqnum
< new_aeb
->sqnum
)
501 *max_sqnum
= new_aeb
->sqnum
;
503 err
= process_pool_aeb(ubi
, ai
, vh
, new_aeb
);
505 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
509 /* We are paranoid and fall back to scanning mode */
510 ubi_err(ubi
, "fastmap pool PEBs contains damaged PEBs!");
511 ret
= err
> 0 ? UBI_BAD_FASTMAP
: err
;
518 ubi_free_vid_buf(vb
);
524 * count_fastmap_pebs - Counts the PEBs found by fastmap.
525 * @ai: The UBI attach info object
527 static int count_fastmap_pebs(struct ubi_attach_info
*ai
)
529 struct ubi_ainf_peb
*aeb
;
530 struct ubi_ainf_volume
*av
;
531 struct rb_node
*rb1
, *rb2
;
534 list_for_each_entry(aeb
, &ai
->erase
, u
.list
)
537 list_for_each_entry(aeb
, &ai
->free
, u
.list
)
540 ubi_rb_for_each_entry(rb1
, av
, &ai
->volumes
, rb
)
541 ubi_rb_for_each_entry(rb2
, aeb
, &av
->root
, u
.rb
)
548 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
549 * @ubi: UBI device object
550 * @ai: UBI attach info object
551 * @fm: the fastmap to be attached
553 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
554 * < 0 indicates an internal error.
556 static int ubi_attach_fastmap(struct ubi_device
*ubi
,
557 struct ubi_attach_info
*ai
,
558 struct ubi_fastmap_layout
*fm
)
560 struct list_head used
, free
;
561 struct ubi_ainf_volume
*av
;
562 struct ubi_ainf_peb
*aeb
, *tmp_aeb
, *_tmp_aeb
;
563 struct ubi_fm_sb
*fmsb
;
564 struct ubi_fm_hdr
*fmhdr
;
565 struct ubi_fm_scan_pool
*fmpl
, *fmpl_wl
;
566 struct ubi_fm_ec
*fmec
;
567 struct ubi_fm_volhdr
*fmvhdr
;
568 struct ubi_fm_eba
*fm_eba
;
569 int ret
, i
, j
, pool_size
, wl_pool_size
;
570 size_t fm_pos
= 0, fm_size
= ubi
->fm_size
;
571 unsigned long long max_sqnum
= 0;
572 void *fm_raw
= ubi
->fm_buf
;
574 INIT_LIST_HEAD(&used
);
575 INIT_LIST_HEAD(&free
);
576 ai
->min_ec
= UBI_MAX_ERASECOUNTER
;
578 fmsb
= (struct ubi_fm_sb
*)(fm_raw
);
579 ai
->max_sqnum
= fmsb
->sqnum
;
580 fm_pos
+= sizeof(struct ubi_fm_sb
);
581 if (fm_pos
>= fm_size
)
584 fmhdr
= (struct ubi_fm_hdr
*)(fm_raw
+ fm_pos
);
585 fm_pos
+= sizeof(*fmhdr
);
586 if (fm_pos
>= fm_size
)
589 if (be32_to_cpu(fmhdr
->magic
) != UBI_FM_HDR_MAGIC
) {
590 ubi_err(ubi
, "bad fastmap header magic: 0x%x, expected: 0x%x",
591 be32_to_cpu(fmhdr
->magic
), UBI_FM_HDR_MAGIC
);
595 fmpl
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
596 fm_pos
+= sizeof(*fmpl
);
597 if (fm_pos
>= fm_size
)
599 if (be32_to_cpu(fmpl
->magic
) != UBI_FM_POOL_MAGIC
) {
600 ubi_err(ubi
, "bad fastmap pool magic: 0x%x, expected: 0x%x",
601 be32_to_cpu(fmpl
->magic
), UBI_FM_POOL_MAGIC
);
605 fmpl_wl
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
606 fm_pos
+= sizeof(*fmpl_wl
);
607 if (fm_pos
>= fm_size
)
609 if (be32_to_cpu(fmpl_wl
->magic
) != UBI_FM_POOL_MAGIC
) {
610 ubi_err(ubi
, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
611 be32_to_cpu(fmpl_wl
->magic
), UBI_FM_POOL_MAGIC
);
615 pool_size
= be16_to_cpu(fmpl
->size
);
616 wl_pool_size
= be16_to_cpu(fmpl_wl
->size
);
617 fm
->max_pool_size
= be16_to_cpu(fmpl
->max_size
);
618 fm
->max_wl_pool_size
= be16_to_cpu(fmpl_wl
->max_size
);
620 if (pool_size
> UBI_FM_MAX_POOL_SIZE
|| pool_size
< 0) {
621 ubi_err(ubi
, "bad pool size: %i", pool_size
);
625 if (wl_pool_size
> UBI_FM_MAX_POOL_SIZE
|| wl_pool_size
< 0) {
626 ubi_err(ubi
, "bad WL pool size: %i", wl_pool_size
);
631 if (fm
->max_pool_size
> UBI_FM_MAX_POOL_SIZE
||
632 fm
->max_pool_size
< 0) {
633 ubi_err(ubi
, "bad maximal pool size: %i", fm
->max_pool_size
);
637 if (fm
->max_wl_pool_size
> UBI_FM_MAX_POOL_SIZE
||
638 fm
->max_wl_pool_size
< 0) {
639 ubi_err(ubi
, "bad maximal WL pool size: %i",
640 fm
->max_wl_pool_size
);
644 /* read EC values from free list */
645 for (i
= 0; i
< be32_to_cpu(fmhdr
->free_peb_count
); i
++) {
646 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
647 fm_pos
+= sizeof(*fmec
);
648 if (fm_pos
>= fm_size
)
651 add_aeb(ai
, &ai
->free
, be32_to_cpu(fmec
->pnum
),
652 be32_to_cpu(fmec
->ec
), 0);
655 /* read EC values from used list */
656 for (i
= 0; i
< be32_to_cpu(fmhdr
->used_peb_count
); i
++) {
657 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
658 fm_pos
+= sizeof(*fmec
);
659 if (fm_pos
>= fm_size
)
662 add_aeb(ai
, &used
, be32_to_cpu(fmec
->pnum
),
663 be32_to_cpu(fmec
->ec
), 0);
666 /* read EC values from scrub list */
667 for (i
= 0; i
< be32_to_cpu(fmhdr
->scrub_peb_count
); i
++) {
668 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
669 fm_pos
+= sizeof(*fmec
);
670 if (fm_pos
>= fm_size
)
673 add_aeb(ai
, &used
, be32_to_cpu(fmec
->pnum
),
674 be32_to_cpu(fmec
->ec
), 1);
677 /* read EC values from erase list */
678 for (i
= 0; i
< be32_to_cpu(fmhdr
->erase_peb_count
); i
++) {
679 fmec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
680 fm_pos
+= sizeof(*fmec
);
681 if (fm_pos
>= fm_size
)
684 add_aeb(ai
, &ai
->erase
, be32_to_cpu(fmec
->pnum
),
685 be32_to_cpu(fmec
->ec
), 1);
688 ai
->mean_ec
= div_u64(ai
->ec_sum
, ai
->ec_count
);
689 ai
->bad_peb_count
= be32_to_cpu(fmhdr
->bad_peb_count
);
691 /* Iterate over all volumes and read their EBA table */
692 for (i
= 0; i
< be32_to_cpu(fmhdr
->vol_count
); i
++) {
693 fmvhdr
= (struct ubi_fm_volhdr
*)(fm_raw
+ fm_pos
);
694 fm_pos
+= sizeof(*fmvhdr
);
695 if (fm_pos
>= fm_size
)
698 if (be32_to_cpu(fmvhdr
->magic
) != UBI_FM_VHDR_MAGIC
) {
699 ubi_err(ubi
, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
700 be32_to_cpu(fmvhdr
->magic
), UBI_FM_VHDR_MAGIC
);
704 av
= add_vol(ai
, be32_to_cpu(fmvhdr
->vol_id
),
705 be32_to_cpu(fmvhdr
->used_ebs
),
706 be32_to_cpu(fmvhdr
->data_pad
),
708 be32_to_cpu(fmvhdr
->last_eb_bytes
));
712 if (PTR_ERR(av
) == -EINVAL
) {
713 ubi_err(ubi
, "volume (ID %i) already exists",
719 if (ai
->highest_vol_id
< be32_to_cpu(fmvhdr
->vol_id
))
720 ai
->highest_vol_id
= be32_to_cpu(fmvhdr
->vol_id
);
722 fm_eba
= (struct ubi_fm_eba
*)(fm_raw
+ fm_pos
);
723 fm_pos
+= sizeof(*fm_eba
);
724 fm_pos
+= (sizeof(__be32
) * be32_to_cpu(fm_eba
->reserved_pebs
));
725 if (fm_pos
>= fm_size
)
728 if (be32_to_cpu(fm_eba
->magic
) != UBI_FM_EBA_MAGIC
) {
729 ubi_err(ubi
, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
730 be32_to_cpu(fm_eba
->magic
), UBI_FM_EBA_MAGIC
);
734 for (j
= 0; j
< be32_to_cpu(fm_eba
->reserved_pebs
); j
++) {
735 int pnum
= be32_to_cpu(fm_eba
->pnum
[j
]);
741 list_for_each_entry(tmp_aeb
, &used
, u
.list
) {
742 if (tmp_aeb
->pnum
== pnum
) {
749 ubi_err(ubi
, "PEB %i is in EBA but not in used list", pnum
);
755 if (av
->highest_lnum
<= aeb
->lnum
)
756 av
->highest_lnum
= aeb
->lnum
;
758 assign_aeb_to_av(ai
, aeb
, av
);
760 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
761 aeb
->pnum
, aeb
->lnum
, av
->vol_id
);
765 ret
= scan_pool(ubi
, ai
, fmpl
->pebs
, pool_size
, &max_sqnum
, &free
);
769 ret
= scan_pool(ubi
, ai
, fmpl_wl
->pebs
, wl_pool_size
, &max_sqnum
, &free
);
773 if (max_sqnum
> ai
->max_sqnum
)
774 ai
->max_sqnum
= max_sqnum
;
776 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &free
, u
.list
)
777 list_move_tail(&tmp_aeb
->u
.list
, &ai
->free
);
779 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &used
, u
.list
)
780 list_move_tail(&tmp_aeb
->u
.list
, &ai
->erase
);
782 ubi_assert(list_empty(&free
));
785 * If fastmap is leaking PEBs (must not happen), raise a
786 * fat warning and fall back to scanning mode.
787 * We do this here because in ubi_wl_init() it's too late
788 * and we cannot fall back to scanning.
790 if (WARN_ON(count_fastmap_pebs(ai
) != ubi
->peb_count
-
791 ai
->bad_peb_count
- fm
->used_blocks
))
797 ret
= UBI_BAD_FASTMAP
;
799 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &used
, u
.list
) {
800 list_del(&tmp_aeb
->u
.list
);
801 ubi_free_aeb(ai
, tmp_aeb
);
803 list_for_each_entry_safe(tmp_aeb
, _tmp_aeb
, &free
, u
.list
) {
804 list_del(&tmp_aeb
->u
.list
);
805 ubi_free_aeb(ai
, tmp_aeb
);
812 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
813 * @ai: UBI attach info to be filled
815 static int find_fm_anchor(struct ubi_attach_info
*ai
)
818 struct ubi_ainf_peb
*aeb
;
819 unsigned long long max_sqnum
= 0;
821 list_for_each_entry(aeb
, &ai
->fastmap
, u
.list
) {
822 if (aeb
->vol_id
== UBI_FM_SB_VOLUME_ID
&& aeb
->sqnum
> max_sqnum
) {
823 max_sqnum
= aeb
->sqnum
;
832 * ubi_scan_fastmap - scan the fastmap.
833 * @ubi: UBI device object
834 * @ai: UBI attach info to be filled
835 * @scan_ai: UBI attach info from the first 64 PEBs,
836 * used to find the most recent Fastmap data structure
838 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
839 * UBI_BAD_FASTMAP if one was found but is not usable.
840 * < 0 indicates an internal error.
842 int ubi_scan_fastmap(struct ubi_device
*ubi
, struct ubi_attach_info
*ai
,
843 struct ubi_attach_info
*scan_ai
)
845 struct ubi_fm_sb
*fmsb
, *fmsb2
;
846 struct ubi_vid_io_buf
*vb
;
847 struct ubi_vid_hdr
*vh
;
848 struct ubi_ec_hdr
*ech
;
849 struct ubi_fastmap_layout
*fm
;
850 struct ubi_ainf_peb
*tmp_aeb
, *aeb
;
851 int i
, used_blocks
, pnum
, fm_anchor
, ret
= 0;
854 unsigned long long sqnum
= 0;
856 fm_anchor
= find_fm_anchor(scan_ai
);
858 return UBI_NO_FASTMAP
;
860 /* Move all (possible) fastmap blocks into our new attach structure. */
861 list_for_each_entry_safe(aeb
, tmp_aeb
, &scan_ai
->fastmap
, u
.list
)
862 list_move_tail(&aeb
->u
.list
, &ai
->fastmap
);
864 down_write(&ubi
->fm_protect
);
865 memset(ubi
->fm_buf
, 0, ubi
->fm_size
);
867 fmsb
= kmalloc(sizeof(*fmsb
), GFP_KERNEL
);
873 fm
= kzalloc(sizeof(*fm
), GFP_KERNEL
);
880 ret
= ubi_io_read_data(ubi
, fmsb
, fm_anchor
, 0, sizeof(*fmsb
));
881 if (ret
&& ret
!= UBI_IO_BITFLIPS
)
883 else if (ret
== UBI_IO_BITFLIPS
)
884 fm
->to_be_tortured
[0] = 1;
886 if (be32_to_cpu(fmsb
->magic
) != UBI_FM_SB_MAGIC
) {
887 ubi_err(ubi
, "bad super block magic: 0x%x, expected: 0x%x",
888 be32_to_cpu(fmsb
->magic
), UBI_FM_SB_MAGIC
);
889 ret
= UBI_BAD_FASTMAP
;
893 if (fmsb
->version
!= UBI_FM_FMT_VERSION
) {
894 ubi_err(ubi
, "bad fastmap version: %i, expected: %i",
895 fmsb
->version
, UBI_FM_FMT_VERSION
);
896 ret
= UBI_BAD_FASTMAP
;
900 used_blocks
= be32_to_cpu(fmsb
->used_blocks
);
901 if (used_blocks
> UBI_FM_MAX_BLOCKS
|| used_blocks
< 1) {
902 ubi_err(ubi
, "number of fastmap blocks is invalid: %i",
904 ret
= UBI_BAD_FASTMAP
;
908 fm_size
= ubi
->leb_size
* used_blocks
;
909 if (fm_size
!= ubi
->fm_size
) {
910 ubi_err(ubi
, "bad fastmap size: %zi, expected: %zi",
911 fm_size
, ubi
->fm_size
);
912 ret
= UBI_BAD_FASTMAP
;
916 ech
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
922 vb
= ubi_alloc_vid_buf(ubi
, GFP_KERNEL
);
928 vh
= ubi_get_vid_hdr(vb
);
930 for (i
= 0; i
< used_blocks
; i
++) {
933 pnum
= be32_to_cpu(fmsb
->block_loc
[i
]);
935 if (ubi_io_is_bad(ubi
, pnum
)) {
936 ret
= UBI_BAD_FASTMAP
;
940 if (i
== 0 && pnum
!= fm_anchor
) {
941 ubi_err(ubi
, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
943 ret
= UBI_BAD_FASTMAP
;
947 ret
= ubi_io_read_ec_hdr(ubi
, pnum
, ech
, 0);
948 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
949 ubi_err(ubi
, "unable to read fastmap block# %i EC (PEB: %i)",
952 ret
= UBI_BAD_FASTMAP
;
954 } else if (ret
== UBI_IO_BITFLIPS
)
955 fm
->to_be_tortured
[i
] = 1;
957 image_seq
= be32_to_cpu(ech
->image_seq
);
959 ubi
->image_seq
= image_seq
;
962 * Older UBI implementations have image_seq set to zero, so
963 * we shouldn't fail if image_seq == 0.
965 if (image_seq
&& (image_seq
!= ubi
->image_seq
)) {
966 ubi_err(ubi
, "wrong image seq:%d instead of %d",
967 be32_to_cpu(ech
->image_seq
), ubi
->image_seq
);
968 ret
= UBI_BAD_FASTMAP
;
972 ret
= ubi_io_read_vid_hdr(ubi
, pnum
, vb
, 0);
973 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
974 ubi_err(ubi
, "unable to read fastmap block# %i (PEB: %i)",
980 if (be32_to_cpu(vh
->vol_id
) != UBI_FM_SB_VOLUME_ID
) {
981 ubi_err(ubi
, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
982 be32_to_cpu(vh
->vol_id
),
983 UBI_FM_SB_VOLUME_ID
);
984 ret
= UBI_BAD_FASTMAP
;
988 if (be32_to_cpu(vh
->vol_id
) != UBI_FM_DATA_VOLUME_ID
) {
989 ubi_err(ubi
, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
990 be32_to_cpu(vh
->vol_id
),
991 UBI_FM_DATA_VOLUME_ID
);
992 ret
= UBI_BAD_FASTMAP
;
997 if (sqnum
< be64_to_cpu(vh
->sqnum
))
998 sqnum
= be64_to_cpu(vh
->sqnum
);
1000 ret
= ubi_io_read_data(ubi
, ubi
->fm_buf
+ (ubi
->leb_size
* i
),
1001 pnum
, 0, ubi
->leb_size
);
1002 if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
1003 ubi_err(ubi
, "unable to read fastmap block# %i (PEB: %i, "
1004 "err: %i)", i
, pnum
, ret
);
1012 fmsb2
= (struct ubi_fm_sb
*)(ubi
->fm_buf
);
1013 tmp_crc
= be32_to_cpu(fmsb2
->data_crc
);
1014 fmsb2
->data_crc
= 0;
1015 crc
= crc32(UBI_CRC32_INIT
, ubi
->fm_buf
, fm_size
);
1016 if (crc
!= tmp_crc
) {
1017 ubi_err(ubi
, "fastmap data CRC is invalid");
1018 ubi_err(ubi
, "CRC should be: 0x%x, calc: 0x%x",
1020 ret
= UBI_BAD_FASTMAP
;
1024 fmsb2
->sqnum
= sqnum
;
1026 fm
->used_blocks
= used_blocks
;
1028 ret
= ubi_attach_fastmap(ubi
, ai
, fm
);
1031 ret
= UBI_BAD_FASTMAP
;
1035 for (i
= 0; i
< used_blocks
; i
++) {
1036 struct ubi_wl_entry
*e
;
1038 e
= kmem_cache_alloc(ubi_wl_entry_slab
, GFP_KERNEL
);
1047 e
->pnum
= be32_to_cpu(fmsb2
->block_loc
[i
]);
1048 e
->ec
= be32_to_cpu(fmsb2
->block_ec
[i
]);
1053 ubi
->fm_pool
.max_size
= ubi
->fm
->max_pool_size
;
1054 ubi
->fm_wl_pool
.max_size
= ubi
->fm
->max_wl_pool_size
;
1055 ubi_msg(ubi
, "attached by fastmap");
1056 ubi_msg(ubi
, "fastmap pool size: %d", ubi
->fm_pool
.max_size
);
1057 ubi_msg(ubi
, "fastmap WL pool size: %d",
1058 ubi
->fm_wl_pool
.max_size
);
1059 ubi
->fm_disabled
= 0;
1060 ubi
->fast_attach
= 1;
1062 ubi_free_vid_buf(vb
);
1065 up_write(&ubi
->fm_protect
);
1066 if (ret
== UBI_BAD_FASTMAP
)
1067 ubi_err(ubi
, "Attach by fastmap failed, doing a full scan!");
1071 ubi_free_vid_buf(vb
);
1080 * ubi_write_fastmap - writes a fastmap.
1081 * @ubi: UBI device object
1082 * @new_fm: the to be written fastmap
1084 * Returns 0 on success, < 0 indicates an internal error.
1086 static int ubi_write_fastmap(struct ubi_device
*ubi
,
1087 struct ubi_fastmap_layout
*new_fm
)
1091 struct ubi_fm_sb
*fmsb
;
1092 struct ubi_fm_hdr
*fmh
;
1093 struct ubi_fm_scan_pool
*fmpl
, *fmpl_wl
;
1094 struct ubi_fm_ec
*fec
;
1095 struct ubi_fm_volhdr
*fvh
;
1096 struct ubi_fm_eba
*feba
;
1097 struct ubi_wl_entry
*wl_e
;
1098 struct ubi_volume
*vol
;
1099 struct ubi_vid_io_buf
*avbuf
, *dvbuf
;
1100 struct ubi_vid_hdr
*avhdr
, *dvhdr
;
1101 struct ubi_work
*ubi_wrk
;
1102 struct rb_node
*tmp_rb
;
1103 int ret
, i
, j
, free_peb_count
, used_peb_count
, vol_count
;
1104 int scrub_peb_count
, erase_peb_count
;
1105 unsigned long *seen_pebs
= NULL
;
1107 fm_raw
= ubi
->fm_buf
;
1108 memset(ubi
->fm_buf
, 0, ubi
->fm_size
);
1110 avbuf
= new_fm_vbuf(ubi
, UBI_FM_SB_VOLUME_ID
);
1116 dvbuf
= new_fm_vbuf(ubi
, UBI_FM_DATA_VOLUME_ID
);
1122 avhdr
= ubi_get_vid_hdr(avbuf
);
1123 dvhdr
= ubi_get_vid_hdr(dvbuf
);
1125 seen_pebs
= init_seen(ubi
);
1126 if (IS_ERR(seen_pebs
)) {
1127 ret
= PTR_ERR(seen_pebs
);
1131 spin_lock(&ubi
->volumes_lock
);
1132 spin_lock(&ubi
->wl_lock
);
1134 fmsb
= (struct ubi_fm_sb
*)fm_raw
;
1135 fm_pos
+= sizeof(*fmsb
);
1136 ubi_assert(fm_pos
<= ubi
->fm_size
);
1138 fmh
= (struct ubi_fm_hdr
*)(fm_raw
+ fm_pos
);
1139 fm_pos
+= sizeof(*fmh
);
1140 ubi_assert(fm_pos
<= ubi
->fm_size
);
1142 fmsb
->magic
= cpu_to_be32(UBI_FM_SB_MAGIC
);
1143 fmsb
->version
= UBI_FM_FMT_VERSION
;
1144 fmsb
->used_blocks
= cpu_to_be32(new_fm
->used_blocks
);
1145 /* the max sqnum will be filled in while *reading* the fastmap */
1148 fmh
->magic
= cpu_to_be32(UBI_FM_HDR_MAGIC
);
1151 scrub_peb_count
= 0;
1152 erase_peb_count
= 0;
1155 fmpl
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
1156 fm_pos
+= sizeof(*fmpl
);
1157 fmpl
->magic
= cpu_to_be32(UBI_FM_POOL_MAGIC
);
1158 fmpl
->size
= cpu_to_be16(ubi
->fm_pool
.size
);
1159 fmpl
->max_size
= cpu_to_be16(ubi
->fm_pool
.max_size
);
1161 for (i
= 0; i
< ubi
->fm_pool
.size
; i
++) {
1162 fmpl
->pebs
[i
] = cpu_to_be32(ubi
->fm_pool
.pebs
[i
]);
1163 set_seen(ubi
, ubi
->fm_pool
.pebs
[i
], seen_pebs
);
1166 fmpl_wl
= (struct ubi_fm_scan_pool
*)(fm_raw
+ fm_pos
);
1167 fm_pos
+= sizeof(*fmpl_wl
);
1168 fmpl_wl
->magic
= cpu_to_be32(UBI_FM_POOL_MAGIC
);
1169 fmpl_wl
->size
= cpu_to_be16(ubi
->fm_wl_pool
.size
);
1170 fmpl_wl
->max_size
= cpu_to_be16(ubi
->fm_wl_pool
.max_size
);
1172 for (i
= 0; i
< ubi
->fm_wl_pool
.size
; i
++) {
1173 fmpl_wl
->pebs
[i
] = cpu_to_be32(ubi
->fm_wl_pool
.pebs
[i
]);
1174 set_seen(ubi
, ubi
->fm_wl_pool
.pebs
[i
], seen_pebs
);
1177 ubi_for_each_free_peb(ubi
, wl_e
, tmp_rb
) {
1178 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1180 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1181 set_seen(ubi
, wl_e
->pnum
, seen_pebs
);
1182 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1185 fm_pos
+= sizeof(*fec
);
1186 ubi_assert(fm_pos
<= ubi
->fm_size
);
1188 fmh
->free_peb_count
= cpu_to_be32(free_peb_count
);
1190 ubi_for_each_used_peb(ubi
, wl_e
, tmp_rb
) {
1191 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1193 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1194 set_seen(ubi
, wl_e
->pnum
, seen_pebs
);
1195 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1198 fm_pos
+= sizeof(*fec
);
1199 ubi_assert(fm_pos
<= ubi
->fm_size
);
1202 ubi_for_each_protected_peb(ubi
, i
, wl_e
) {
1203 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1205 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1206 set_seen(ubi
, wl_e
->pnum
, seen_pebs
);
1207 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1210 fm_pos
+= sizeof(*fec
);
1211 ubi_assert(fm_pos
<= ubi
->fm_size
);
1213 fmh
->used_peb_count
= cpu_to_be32(used_peb_count
);
1215 ubi_for_each_scrub_peb(ubi
, wl_e
, tmp_rb
) {
1216 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1218 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1219 set_seen(ubi
, wl_e
->pnum
, seen_pebs
);
1220 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1223 fm_pos
+= sizeof(*fec
);
1224 ubi_assert(fm_pos
<= ubi
->fm_size
);
1226 fmh
->scrub_peb_count
= cpu_to_be32(scrub_peb_count
);
1229 list_for_each_entry(ubi_wrk
, &ubi
->works
, list
) {
1230 if (ubi_is_erase_work(ubi_wrk
)) {
1234 fec
= (struct ubi_fm_ec
*)(fm_raw
+ fm_pos
);
1236 fec
->pnum
= cpu_to_be32(wl_e
->pnum
);
1237 set_seen(ubi
, wl_e
->pnum
, seen_pebs
);
1238 fec
->ec
= cpu_to_be32(wl_e
->ec
);
1241 fm_pos
+= sizeof(*fec
);
1242 ubi_assert(fm_pos
<= ubi
->fm_size
);
1245 fmh
->erase_peb_count
= cpu_to_be32(erase_peb_count
);
1247 for (i
= 0; i
< UBI_MAX_VOLUMES
+ UBI_INT_VOL_COUNT
; i
++) {
1248 vol
= ubi
->volumes
[i
];
1255 fvh
= (struct ubi_fm_volhdr
*)(fm_raw
+ fm_pos
);
1256 fm_pos
+= sizeof(*fvh
);
1257 ubi_assert(fm_pos
<= ubi
->fm_size
);
1259 fvh
->magic
= cpu_to_be32(UBI_FM_VHDR_MAGIC
);
1260 fvh
->vol_id
= cpu_to_be32(vol
->vol_id
);
1261 fvh
->vol_type
= vol
->vol_type
;
1262 fvh
->used_ebs
= cpu_to_be32(vol
->used_ebs
);
1263 fvh
->data_pad
= cpu_to_be32(vol
->data_pad
);
1264 fvh
->last_eb_bytes
= cpu_to_be32(vol
->last_eb_bytes
);
1266 ubi_assert(vol
->vol_type
== UBI_DYNAMIC_VOLUME
||
1267 vol
->vol_type
== UBI_STATIC_VOLUME
);
1269 feba
= (struct ubi_fm_eba
*)(fm_raw
+ fm_pos
);
1270 fm_pos
+= sizeof(*feba
) + (sizeof(__be32
) * vol
->reserved_pebs
);
1271 ubi_assert(fm_pos
<= ubi
->fm_size
);
1273 for (j
= 0; j
< vol
->reserved_pebs
; j
++) {
1274 struct ubi_eba_leb_desc ldesc
;
1276 ubi_eba_get_ldesc(vol
, j
, &ldesc
);
1277 feba
->pnum
[j
] = cpu_to_be32(ldesc
.pnum
);
1280 feba
->reserved_pebs
= cpu_to_be32(j
);
1281 feba
->magic
= cpu_to_be32(UBI_FM_EBA_MAGIC
);
1283 fmh
->vol_count
= cpu_to_be32(vol_count
);
1284 fmh
->bad_peb_count
= cpu_to_be32(ubi
->bad_peb_count
);
1286 avhdr
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1289 spin_unlock(&ubi
->wl_lock
);
1290 spin_unlock(&ubi
->volumes_lock
);
1292 dbg_bld("writing fastmap SB to PEB %i", new_fm
->e
[0]->pnum
);
1293 ret
= ubi_io_write_vid_hdr(ubi
, new_fm
->e
[0]->pnum
, avbuf
);
1295 ubi_err(ubi
, "unable to write vid_hdr to fastmap SB!");
1299 for (i
= 0; i
< new_fm
->used_blocks
; i
++) {
1300 fmsb
->block_loc
[i
] = cpu_to_be32(new_fm
->e
[i
]->pnum
);
1301 set_seen(ubi
, new_fm
->e
[i
]->pnum
, seen_pebs
);
1302 fmsb
->block_ec
[i
] = cpu_to_be32(new_fm
->e
[i
]->ec
);
1306 fmsb
->data_crc
= cpu_to_be32(crc32(UBI_CRC32_INIT
, fm_raw
,
1309 for (i
= 1; i
< new_fm
->used_blocks
; i
++) {
1310 dvhdr
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1311 dvhdr
->lnum
= cpu_to_be32(i
);
1312 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1313 new_fm
->e
[i
]->pnum
, be64_to_cpu(dvhdr
->sqnum
));
1314 ret
= ubi_io_write_vid_hdr(ubi
, new_fm
->e
[i
]->pnum
, dvbuf
);
1316 ubi_err(ubi
, "unable to write vid_hdr to PEB %i!",
1317 new_fm
->e
[i
]->pnum
);
1322 for (i
= 0; i
< new_fm
->used_blocks
; i
++) {
1323 ret
= ubi_io_write_data(ubi
, fm_raw
+ (i
* ubi
->leb_size
),
1324 new_fm
->e
[i
]->pnum
, 0, ubi
->leb_size
);
1326 ubi_err(ubi
, "unable to write fastmap to PEB %i!",
1327 new_fm
->e
[i
]->pnum
);
1335 ret
= self_check_seen(ubi
, seen_pebs
);
1336 dbg_bld("fastmap written!");
1339 ubi_free_vid_buf(avbuf
);
1340 ubi_free_vid_buf(dvbuf
);
1341 free_seen(seen_pebs
);
1347 * erase_block - Manually erase a PEB.
1348 * @ubi: UBI device object
1349 * @pnum: PEB to be erased
1351 * Returns the new EC value on success, < 0 indicates an internal error.
1353 static int erase_block(struct ubi_device
*ubi
, int pnum
)
1356 struct ubi_ec_hdr
*ec_hdr
;
1359 ec_hdr
= kzalloc(ubi
->ec_hdr_alsize
, GFP_KERNEL
);
1363 ret
= ubi_io_read_ec_hdr(ubi
, pnum
, ec_hdr
, 0);
1366 else if (ret
&& ret
!= UBI_IO_BITFLIPS
) {
1371 ret
= ubi_io_sync_erase(ubi
, pnum
, 0);
1375 ec
= be64_to_cpu(ec_hdr
->ec
);
1377 if (ec
> UBI_MAX_ERASECOUNTER
) {
1382 ec_hdr
->ec
= cpu_to_be64(ec
);
1383 ret
= ubi_io_write_ec_hdr(ubi
, pnum
, ec_hdr
);
1394 * invalidate_fastmap - destroys a fastmap.
1395 * @ubi: UBI device object
1397 * This function ensures that upon next UBI attach a full scan
1398 * is issued. We need this if UBI is about to write a new fastmap
1399 * but is unable to do so. In this case we have two options:
1400 * a) Make sure that the current fastmap will not be usued upon
1401 * attach time and contine or b) fall back to RO mode to have the
1402 * current fastmap in a valid state.
1403 * Returns 0 on success, < 0 indicates an internal error.
1405 static int invalidate_fastmap(struct ubi_device
*ubi
)
1408 struct ubi_fastmap_layout
*fm
;
1409 struct ubi_wl_entry
*e
;
1410 struct ubi_vid_io_buf
*vb
= NULL
;
1411 struct ubi_vid_hdr
*vh
;
1419 fm
= kzalloc(sizeof(*fm
), GFP_KERNEL
);
1423 vb
= new_fm_vbuf(ubi
, UBI_FM_SB_VOLUME_ID
);
1427 vh
= ubi_get_vid_hdr(vb
);
1430 e
= ubi_wl_get_fm_peb(ubi
, 1);
1435 * Create fake fastmap such that UBI will fall back
1438 vh
->sqnum
= cpu_to_be64(ubi_next_sqnum(ubi
));
1439 ret
= ubi_io_write_vid_hdr(ubi
, e
->pnum
, vb
);
1441 ubi_wl_put_fm_peb(ubi
, e
, 0, 0);
1445 fm
->used_blocks
= 1;
1451 ubi_free_vid_buf(vb
);
1460 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1462 * @ubi: UBI device object
1463 * @fm: fastmap layout object
1465 static void return_fm_pebs(struct ubi_device
*ubi
,
1466 struct ubi_fastmap_layout
*fm
)
1473 for (i
= 0; i
< fm
->used_blocks
; i
++) {
1475 ubi_wl_put_fm_peb(ubi
, fm
->e
[i
], i
,
1476 fm
->to_be_tortured
[i
]);
1483 * ubi_update_fastmap - will be called by UBI if a volume changes or
1484 * a fastmap pool becomes full.
1485 * @ubi: UBI device object
1487 * Returns 0 on success, < 0 indicates an internal error.
1489 int ubi_update_fastmap(struct ubi_device
*ubi
)
1492 struct ubi_fastmap_layout
*new_fm
, *old_fm
;
1493 struct ubi_wl_entry
*tmp_e
;
1495 down_write(&ubi
->fm_protect
);
1496 down_write(&ubi
->work_sem
);
1497 down_write(&ubi
->fm_eba_sem
);
1499 ubi_refill_pools(ubi
);
1501 if (ubi
->ro_mode
|| ubi
->fm_disabled
) {
1502 up_write(&ubi
->fm_eba_sem
);
1503 up_write(&ubi
->work_sem
);
1504 up_write(&ubi
->fm_protect
);
1508 ret
= ubi_ensure_anchor_pebs(ubi
);
1510 up_write(&ubi
->fm_eba_sem
);
1511 up_write(&ubi
->work_sem
);
1512 up_write(&ubi
->fm_protect
);
1516 new_fm
= kzalloc(sizeof(*new_fm
), GFP_KERNEL
);
1518 up_write(&ubi
->fm_eba_sem
);
1519 up_write(&ubi
->work_sem
);
1520 up_write(&ubi
->fm_protect
);
1524 new_fm
->used_blocks
= ubi
->fm_size
/ ubi
->leb_size
;
1528 if (new_fm
->used_blocks
> UBI_FM_MAX_BLOCKS
) {
1529 ubi_err(ubi
, "fastmap too large");
1534 for (i
= 1; i
< new_fm
->used_blocks
; i
++) {
1535 spin_lock(&ubi
->wl_lock
);
1536 tmp_e
= ubi_wl_get_fm_peb(ubi
, 0);
1537 spin_unlock(&ubi
->wl_lock
);
1540 if (old_fm
&& old_fm
->e
[i
]) {
1541 ret
= erase_block(ubi
, old_fm
->e
[i
]->pnum
);
1543 ubi_err(ubi
, "could not erase old fastmap PEB");
1545 for (j
= 1; j
< i
; j
++) {
1546 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[j
],
1548 new_fm
->e
[j
] = NULL
;
1552 new_fm
->e
[i
] = old_fm
->e
[i
];
1553 old_fm
->e
[i
] = NULL
;
1555 ubi_err(ubi
, "could not get any free erase block");
1557 for (j
= 1; j
< i
; j
++) {
1558 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[j
], j
, 0);
1559 new_fm
->e
[j
] = NULL
;
1566 new_fm
->e
[i
] = tmp_e
;
1568 if (old_fm
&& old_fm
->e
[i
]) {
1569 ubi_wl_put_fm_peb(ubi
, old_fm
->e
[i
], i
,
1570 old_fm
->to_be_tortured
[i
]);
1571 old_fm
->e
[i
] = NULL
;
1576 /* Old fastmap is larger than the new one */
1577 if (old_fm
&& new_fm
->used_blocks
< old_fm
->used_blocks
) {
1578 for (i
= new_fm
->used_blocks
; i
< old_fm
->used_blocks
; i
++) {
1579 ubi_wl_put_fm_peb(ubi
, old_fm
->e
[i
], i
,
1580 old_fm
->to_be_tortured
[i
]);
1581 old_fm
->e
[i
] = NULL
;
1585 spin_lock(&ubi
->wl_lock
);
1586 tmp_e
= ubi_wl_get_fm_peb(ubi
, 1);
1587 spin_unlock(&ubi
->wl_lock
);
1590 /* no fresh anchor PEB was found, reuse the old one */
1592 ret
= erase_block(ubi
, old_fm
->e
[0]->pnum
);
1594 ubi_err(ubi
, "could not erase old anchor PEB");
1596 for (i
= 1; i
< new_fm
->used_blocks
; i
++) {
1597 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[i
],
1599 new_fm
->e
[i
] = NULL
;
1603 new_fm
->e
[0] = old_fm
->e
[0];
1604 new_fm
->e
[0]->ec
= ret
;
1605 old_fm
->e
[0] = NULL
;
1607 /* we've got a new anchor PEB, return the old one */
1608 ubi_wl_put_fm_peb(ubi
, old_fm
->e
[0], 0,
1609 old_fm
->to_be_tortured
[0]);
1610 new_fm
->e
[0] = tmp_e
;
1611 old_fm
->e
[0] = NULL
;
1615 ubi_err(ubi
, "could not find any anchor PEB");
1617 for (i
= 1; i
< new_fm
->used_blocks
; i
++) {
1618 ubi_wl_put_fm_peb(ubi
, new_fm
->e
[i
], i
, 0);
1619 new_fm
->e
[i
] = NULL
;
1625 new_fm
->e
[0] = tmp_e
;
1628 ret
= ubi_write_fastmap(ubi
, new_fm
);
1634 up_write(&ubi
->fm_eba_sem
);
1635 up_write(&ubi
->work_sem
);
1636 up_write(&ubi
->fm_protect
);
1641 ubi_warn(ubi
, "Unable to write new fastmap, err=%i", ret
);
1643 ret
= invalidate_fastmap(ubi
);
1645 ubi_err(ubi
, "Unable to invalidiate current fastmap!");
1648 return_fm_pebs(ubi
, old_fm
);
1649 return_fm_pebs(ubi
, new_fm
);