2 * Copyright (c) International Business Machines Corp., 2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Author: Artem Bityutskiy (Битюцкий Артём)
22 * The UBI Eraseblock Association (EBA) unit.
24 * This unit is responsible for I/O to/from logical eraseblock.
26 * Although in this implementation the EBA table is fully kept and managed in
27 * RAM, which assumes poor scalability, it might be (partially) maintained on
28 * flash in future implementations.
30 * The EBA unit implements per-logical eraseblock locking. Before accessing a
31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
34 * lock tree elements are &struct ltree_entry objects. They are indexed by
35 * (@vol_id, @lnum) pairs.
37 * EBA also maintains the global sequence counter which is incremented each
38 * time a logical eraseblock is mapped to a physical eraseblock and it is
39 * stored in the volume identifier header. This means that each VID header has
40 * a unique sequence number. The sequence number is only increased an we assume
41 * 64 bits is enough to never overflow.
44 #include <linux/slab.h>
45 #include <linux/crc32.h>
46 #include <linux/err.h>
49 /* Number of physical eraseblocks reserved for atomic LEB change operation */
50 #define EBA_RESERVED_PEBS 1
53 * struct ltree_entry - an entry in the lock tree.
54 * @rb: links RB-tree nodes
55 * @vol_id: volume ID of the locked logical eraseblock
56 * @lnum: locked logical eraseblock number
57 * @users: how many tasks are using this logical eraseblock or wait for it
58 * @mutex: read/write mutex to implement read/write access serialization to
59 * the (@vol_id, @lnum) logical eraseblock
61 * When a logical eraseblock is being locked - corresponding &struct ltree_entry
62 * object is inserted to the lock tree (@ubi->ltree).
69 struct rw_semaphore mutex
;
72 /* Slab cache for lock-tree entries */
73 static struct kmem_cache
*ltree_slab
;
76 * next_sqnum - get next sequence number.
77 * @ubi: UBI device description object
79 * This function returns next sequence number to use, which is just the current
80 * global sequence counter value. It also increases the global sequence
83 static unsigned long long next_sqnum(struct ubi_device
*ubi
)
85 unsigned long long sqnum
;
87 spin_lock(&ubi
->ltree_lock
);
88 sqnum
= ubi
->global_sqnum
++;
89 spin_unlock(&ubi
->ltree_lock
);
95 * ubi_get_compat - get compatibility flags of a volume.
96 * @ubi: UBI device description object
99 * This function returns compatibility flags for an internal volume. User
100 * volumes have no compatibility flags, so %0 is returned.
102 static int ubi_get_compat(const struct ubi_device
*ubi
, int vol_id
)
104 if (vol_id
== UBI_LAYOUT_VOL_ID
)
105 return UBI_LAYOUT_VOLUME_COMPAT
;
110 * ltree_lookup - look up the lock tree.
111 * @ubi: UBI device description object
113 * @lnum: logical eraseblock number
115 * This function returns a pointer to the corresponding &struct ltree_entry
116 * object if the logical eraseblock is locked and %NULL if it is not.
117 * @ubi->ltree_lock has to be locked.
119 static struct ltree_entry
*ltree_lookup(struct ubi_device
*ubi
, int vol_id
,
124 p
= ubi
->ltree
.rb_node
;
126 struct ltree_entry
*le
;
128 le
= rb_entry(p
, struct ltree_entry
, rb
);
130 if (vol_id
< le
->vol_id
)
132 else if (vol_id
> le
->vol_id
)
137 else if (lnum
> le
->lnum
)
148 * ltree_add_entry - add new entry to the lock tree.
149 * @ubi: UBI device description object
151 * @lnum: logical eraseblock number
153 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
154 * lock tree. If such entry is already there, its usage counter is increased.
155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
158 static struct ltree_entry
*ltree_add_entry(struct ubi_device
*ubi
, int vol_id
,
161 struct ltree_entry
*le
, *le1
, *le_free
;
163 le
= kmem_cache_alloc(ltree_slab
, GFP_NOFS
);
165 return ERR_PTR(-ENOMEM
);
170 spin_lock(&ubi
->ltree_lock
);
171 le1
= ltree_lookup(ubi
, vol_id
, lnum
);
175 * This logical eraseblock is already locked. The newly
176 * allocated lock entry is not needed.
181 struct rb_node
**p
, *parent
= NULL
;
184 * No lock entry, add the newly allocated one to the
185 * @ubi->ltree RB-tree.
189 p
= &ubi
->ltree
.rb_node
;
192 le1
= rb_entry(parent
, struct ltree_entry
, rb
);
194 if (vol_id
< le1
->vol_id
)
196 else if (vol_id
> le1
->vol_id
)
199 ubi_assert(lnum
!= le1
->lnum
);
200 if (lnum
< le1
->lnum
)
207 rb_link_node(&le
->rb
, parent
, p
);
208 rb_insert_color(&le
->rb
, &ubi
->ltree
);
211 spin_unlock(&ubi
->ltree_lock
);
214 kmem_cache_free(ltree_slab
, le_free
);
220 * leb_read_lock - lock logical eraseblock for reading.
221 * @ubi: UBI device description object
223 * @lnum: logical eraseblock number
225 * This function locks a logical eraseblock for reading. Returns zero in case
226 * of success and a negative error code in case of failure.
228 static int leb_read_lock(struct ubi_device
*ubi
, int vol_id
, int lnum
)
230 struct ltree_entry
*le
;
232 le
= ltree_add_entry(ubi
, vol_id
, lnum
);
235 down_read(&le
->mutex
);
240 * leb_read_unlock - unlock logical eraseblock.
241 * @ubi: UBI device description object
243 * @lnum: logical eraseblock number
245 static void leb_read_unlock(struct ubi_device
*ubi
, int vol_id
, int lnum
)
248 struct ltree_entry
*le
;
250 spin_lock(&ubi
->ltree_lock
);
251 le
= ltree_lookup(ubi
, vol_id
, lnum
);
253 ubi_assert(le
->users
>= 0);
254 if (le
->users
== 0) {
255 rb_erase(&le
->rb
, &ubi
->ltree
);
258 spin_unlock(&ubi
->ltree_lock
);
262 kmem_cache_free(ltree_slab
, le
);
266 * leb_write_lock - lock logical eraseblock for writing.
267 * @ubi: UBI device description object
269 * @lnum: logical eraseblock number
271 * This function locks a logical eraseblock for writing. Returns zero in case
272 * of success and a negative error code in case of failure.
274 static int leb_write_lock(struct ubi_device
*ubi
, int vol_id
, int lnum
)
276 struct ltree_entry
*le
;
278 le
= ltree_add_entry(ubi
, vol_id
, lnum
);
281 down_write(&le
->mutex
);
286 * leb_write_unlock - unlock logical eraseblock.
287 * @ubi: UBI device description object
289 * @lnum: logical eraseblock number
291 static void leb_write_unlock(struct ubi_device
*ubi
, int vol_id
, int lnum
)
294 struct ltree_entry
*le
;
296 spin_lock(&ubi
->ltree_lock
);
297 le
= ltree_lookup(ubi
, vol_id
, lnum
);
299 ubi_assert(le
->users
>= 0);
300 if (le
->users
== 0) {
301 rb_erase(&le
->rb
, &ubi
->ltree
);
305 spin_unlock(&ubi
->ltree_lock
);
307 up_write(&le
->mutex
);
309 kmem_cache_free(ltree_slab
, le
);
313 * ubi_eba_unmap_leb - un-map logical eraseblock.
314 * @ubi: UBI device description object
316 * @lnum: logical eraseblock number
318 * This function un-maps logical eraseblock @lnum and schedules corresponding
319 * physical eraseblock for erasure. Returns zero in case of success and a
320 * negative error code in case of failure.
322 int ubi_eba_unmap_leb(struct ubi_device
*ubi
, int vol_id
, int lnum
)
324 int idx
= vol_id2idx(ubi
, vol_id
), err
, pnum
;
325 struct ubi_volume
*vol
= ubi
->volumes
[idx
];
330 err
= leb_write_lock(ubi
, vol_id
, lnum
);
334 pnum
= vol
->eba_tbl
[lnum
];
336 /* This logical eraseblock is already unmapped */
339 dbg_eba("erase LEB %d:%d, PEB %d", vol_id
, lnum
, pnum
);
341 vol
->eba_tbl
[lnum
] = UBI_LEB_UNMAPPED
;
342 err
= ubi_wl_put_peb(ubi
, pnum
, 0);
345 leb_write_unlock(ubi
, vol_id
, lnum
);
350 * ubi_eba_read_leb - read data.
351 * @ubi: UBI device description object
353 * @lnum: logical eraseblock number
354 * @buf: buffer to store the read data
355 * @offset: offset from where to read
356 * @len: how many bytes to read
357 * @check: data CRC check flag
359 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
360 * bytes. The @check flag only makes sense for static volumes and forces
361 * eraseblock data CRC checking.
363 * In case of success this function returns zero. In case of a static volume,
364 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
365 * returned for any volume type if an ECC error was detected by the MTD device
366 * driver. Other negative error cored may be returned in case of other errors.
368 int ubi_eba_read_leb(struct ubi_device
*ubi
, int vol_id
, int lnum
, void *buf
,
369 int offset
, int len
, int check
)
371 int err
, pnum
, scrub
= 0, idx
= vol_id2idx(ubi
, vol_id
);
372 struct ubi_vid_hdr
*vid_hdr
;
373 struct ubi_volume
*vol
= ubi
->volumes
[idx
];
374 uint32_t uninitialized_var(crc
);
376 err
= leb_read_lock(ubi
, vol_id
, lnum
);
380 pnum
= vol
->eba_tbl
[lnum
];
383 * The logical eraseblock is not mapped, fill the whole buffer
384 * with 0xFF bytes. The exception is static volumes for which
385 * it is an error to read unmapped logical eraseblocks.
387 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
388 len
, offset
, vol_id
, lnum
);
389 leb_read_unlock(ubi
, vol_id
, lnum
);
390 ubi_assert(vol
->vol_type
!= UBI_STATIC_VOLUME
);
391 memset(buf
, 0xFF, len
);
395 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
396 len
, offset
, vol_id
, lnum
, pnum
);
398 if (vol
->vol_type
== UBI_DYNAMIC_VOLUME
)
403 vid_hdr
= ubi_zalloc_vid_hdr(ubi
, GFP_NOFS
);
409 err
= ubi_io_read_vid_hdr(ubi
, pnum
, vid_hdr
, 1);
410 if (err
&& err
!= UBI_IO_BITFLIPS
) {
413 * The header is either absent or corrupted.
414 * The former case means there is a bug -
415 * switch to read-only mode just in case.
416 * The latter case means a real corruption - we
417 * may try to recover data. FIXME: but this is
420 if (err
== UBI_IO_BAD_VID_HDR
) {
421 ubi_warn("bad VID header at PEB %d, LEB"
422 "%d:%d", pnum
, vol_id
, lnum
);
428 } else if (err
== UBI_IO_BITFLIPS
)
431 ubi_assert(lnum
< be32_to_cpu(vid_hdr
->used_ebs
));
432 ubi_assert(len
== be32_to_cpu(vid_hdr
->data_size
));
434 crc
= be32_to_cpu(vid_hdr
->data_crc
);
435 ubi_free_vid_hdr(ubi
, vid_hdr
);
438 err
= ubi_io_read_data(ubi
, buf
, pnum
, offset
, len
);
440 if (err
== UBI_IO_BITFLIPS
) {
443 } else if (err
== -EBADMSG
) {
444 if (vol
->vol_type
== UBI_DYNAMIC_VOLUME
)
448 ubi_msg("force data checking");
457 uint32_t crc1
= crc32(UBI_CRC32_INIT
, buf
, len
);
459 ubi_warn("CRC error: calculated %#08x, must be %#08x",
467 err
= ubi_wl_scrub_peb(ubi
, pnum
);
469 leb_read_unlock(ubi
, vol_id
, lnum
);
473 ubi_free_vid_hdr(ubi
, vid_hdr
);
475 leb_read_unlock(ubi
, vol_id
, lnum
);
480 * recover_peb - recover from write failure.
481 * @ubi: UBI device description object
482 * @pnum: the physical eraseblock to recover
484 * @lnum: logical eraseblock number
485 * @buf: data which was not written because of the write failure
486 * @offset: offset of the failed write
487 * @len: how many bytes should have been written
489 * This function is called in case of a write failure and moves all good data
490 * from the potentially bad physical eraseblock to a good physical eraseblock.
491 * This function also writes the data which was not written due to the failure.
492 * Returns new physical eraseblock number in case of success, and a negative
493 * error code in case of failure.
495 static int recover_peb(struct ubi_device
*ubi
, int pnum
, int vol_id
, int lnum
,
496 const void *buf
, int offset
, int len
)
498 int err
, idx
= vol_id2idx(ubi
, vol_id
), new_pnum
, data_size
, tries
= 0;
499 struct ubi_volume
*vol
= ubi
->volumes
[idx
];
500 struct ubi_vid_hdr
*vid_hdr
;
502 vid_hdr
= ubi_zalloc_vid_hdr(ubi
, GFP_NOFS
);
507 mutex_lock(&ubi
->buf_mutex
);
510 new_pnum
= ubi_wl_get_peb(ubi
, UBI_UNKNOWN
);
512 mutex_unlock(&ubi
->buf_mutex
);
513 ubi_free_vid_hdr(ubi
, vid_hdr
);
517 ubi_msg("recover PEB %d, move data to PEB %d", pnum
, new_pnum
);
519 err
= ubi_io_read_vid_hdr(ubi
, pnum
, vid_hdr
, 1);
520 if (err
&& err
!= UBI_IO_BITFLIPS
) {
526 vid_hdr
->sqnum
= cpu_to_be64(next_sqnum(ubi
));
527 err
= ubi_io_write_vid_hdr(ubi
, new_pnum
, vid_hdr
);
531 data_size
= offset
+ len
;
532 memset(ubi
->peb_buf1
+ offset
, 0xFF, len
);
534 /* Read everything before the area where the write failure happened */
536 err
= ubi_io_read_data(ubi
, ubi
->peb_buf1
, pnum
, 0, offset
);
537 if (err
&& err
!= UBI_IO_BITFLIPS
)
541 memcpy(ubi
->peb_buf1
+ offset
, buf
, len
);
543 err
= ubi_io_write_data(ubi
, ubi
->peb_buf1
, new_pnum
, 0, data_size
);
547 mutex_unlock(&ubi
->buf_mutex
);
548 ubi_free_vid_hdr(ubi
, vid_hdr
);
550 vol
->eba_tbl
[lnum
] = new_pnum
;
551 ubi_wl_put_peb(ubi
, pnum
, 1);
553 ubi_msg("data was successfully recovered");
557 mutex_unlock(&ubi
->buf_mutex
);
558 ubi_wl_put_peb(ubi
, new_pnum
, 1);
559 ubi_free_vid_hdr(ubi
, vid_hdr
);
564 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
567 ubi_warn("failed to write to PEB %d", new_pnum
);
568 ubi_wl_put_peb(ubi
, new_pnum
, 1);
569 if (++tries
> UBI_IO_RETRIES
) {
570 mutex_unlock(&ubi
->buf_mutex
);
571 ubi_free_vid_hdr(ubi
, vid_hdr
);
574 ubi_msg("try again");
579 * ubi_eba_write_leb - write data to dynamic volume.
580 * @ubi: UBI device description object
582 * @lnum: logical eraseblock number
583 * @buf: the data to write
584 * @offset: offset within the logical eraseblock where to write
585 * @len: how many bytes to write
588 * This function writes data to logical eraseblock @lnum of a dynamic volume
589 * @vol_id. Returns zero in case of success and a negative error code in case
590 * of failure. In case of error, it is possible that something was still
591 * written to the flash media, but may be some garbage.
593 int ubi_eba_write_leb(struct ubi_device
*ubi
, int vol_id
, int lnum
,
594 const void *buf
, int offset
, int len
, int dtype
)
596 int idx
= vol_id2idx(ubi
, vol_id
), err
, pnum
, tries
= 0;
597 struct ubi_volume
*vol
= ubi
->volumes
[idx
];
598 struct ubi_vid_hdr
*vid_hdr
;
603 err
= leb_write_lock(ubi
, vol_id
, lnum
);
607 pnum
= vol
->eba_tbl
[lnum
];
609 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
610 len
, offset
, vol_id
, lnum
, pnum
);
612 err
= ubi_io_write_data(ubi
, buf
, pnum
, offset
, len
);
614 ubi_warn("failed to write data to PEB %d", pnum
);
615 if (err
== -EIO
&& ubi
->bad_allowed
)
616 err
= recover_peb(ubi
, pnum
, vol_id
, lnum
, buf
, offset
, len
);
620 leb_write_unlock(ubi
, vol_id
, lnum
);
625 * The logical eraseblock is not mapped. We have to get a free physical
626 * eraseblock and write the volume identifier header there first.
628 vid_hdr
= ubi_zalloc_vid_hdr(ubi
, GFP_NOFS
);
630 leb_write_unlock(ubi
, vol_id
, lnum
);
634 vid_hdr
->vol_type
= UBI_VID_DYNAMIC
;
635 vid_hdr
->sqnum
= cpu_to_be64(next_sqnum(ubi
));
636 vid_hdr
->vol_id
= cpu_to_be32(vol_id
);
637 vid_hdr
->lnum
= cpu_to_be32(lnum
);
638 vid_hdr
->compat
= ubi_get_compat(ubi
, vol_id
);
639 vid_hdr
->data_pad
= cpu_to_be32(vol
->data_pad
);
642 pnum
= ubi_wl_get_peb(ubi
, dtype
);
644 ubi_free_vid_hdr(ubi
, vid_hdr
);
645 leb_write_unlock(ubi
, vol_id
, lnum
);
649 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
650 len
, offset
, vol_id
, lnum
, pnum
);
652 err
= ubi_io_write_vid_hdr(ubi
, pnum
, vid_hdr
);
654 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
659 err
= ubi_io_write_data(ubi
, buf
, pnum
, offset
, len
);
661 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, "
662 "PEB %d", len
, offset
, vol_id
, lnum
, pnum
);
666 vol
->eba_tbl
[lnum
] = pnum
;
668 leb_write_unlock(ubi
, vol_id
, lnum
);
669 ubi_free_vid_hdr(ubi
, vid_hdr
);
673 if (err
!= -EIO
|| !ubi
->bad_allowed
) {
675 leb_write_unlock(ubi
, vol_id
, lnum
);
676 ubi_free_vid_hdr(ubi
, vid_hdr
);
681 * Fortunately, this is the first write operation to this physical
682 * eraseblock, so just put it and request a new one. We assume that if
683 * this physical eraseblock went bad, the erase code will handle that.
685 err
= ubi_wl_put_peb(ubi
, pnum
, 1);
686 if (err
|| ++tries
> UBI_IO_RETRIES
) {
688 leb_write_unlock(ubi
, vol_id
, lnum
);
689 ubi_free_vid_hdr(ubi
, vid_hdr
);
693 vid_hdr
->sqnum
= cpu_to_be64(next_sqnum(ubi
));
694 ubi_msg("try another PEB");
699 * ubi_eba_write_leb_st - write data to static volume.
700 * @ubi: UBI device description object
702 * @lnum: logical eraseblock number
703 * @buf: data to write
704 * @len: how many bytes to write
706 * @used_ebs: how many logical eraseblocks will this volume contain
708 * This function writes data to logical eraseblock @lnum of static volume
709 * @vol_id. The @used_ebs argument should contain total number of logical
710 * eraseblock in this static volume.
712 * When writing to the last logical eraseblock, the @len argument doesn't have
713 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
714 * to the real data size, although the @buf buffer has to contain the
715 * alignment. In all other cases, @len has to be aligned.
717 * It is prohibited to write more then once to logical eraseblocks of static
718 * volumes. This function returns zero in case of success and a negative error
719 * code in case of failure.
721 int ubi_eba_write_leb_st(struct ubi_device
*ubi
, int vol_id
, int lnum
,
722 const void *buf
, int len
, int dtype
, int used_ebs
)
724 int err
, pnum
, tries
= 0, data_size
= len
;
725 int idx
= vol_id2idx(ubi
, vol_id
);
726 struct ubi_volume
*vol
= ubi
->volumes
[idx
];
727 struct ubi_vid_hdr
*vid_hdr
;
733 if (lnum
== used_ebs
- 1)
734 /* If this is the last LEB @len may be unaligned */
735 len
= ALIGN(data_size
, ubi
->min_io_size
);
737 ubi_assert(len
% ubi
->min_io_size
== 0);
739 vid_hdr
= ubi_zalloc_vid_hdr(ubi
, GFP_NOFS
);
743 err
= leb_write_lock(ubi
, vol_id
, lnum
);
745 ubi_free_vid_hdr(ubi
, vid_hdr
);
749 vid_hdr
->sqnum
= cpu_to_be64(next_sqnum(ubi
));
750 vid_hdr
->vol_id
= cpu_to_be32(vol_id
);
751 vid_hdr
->lnum
= cpu_to_be32(lnum
);
752 vid_hdr
->compat
= ubi_get_compat(ubi
, vol_id
);
753 vid_hdr
->data_pad
= cpu_to_be32(vol
->data_pad
);
755 crc
= crc32(UBI_CRC32_INIT
, buf
, data_size
);
756 vid_hdr
->vol_type
= UBI_VID_STATIC
;
757 vid_hdr
->data_size
= cpu_to_be32(data_size
);
758 vid_hdr
->used_ebs
= cpu_to_be32(used_ebs
);
759 vid_hdr
->data_crc
= cpu_to_be32(crc
);
762 pnum
= ubi_wl_get_peb(ubi
, dtype
);
764 ubi_free_vid_hdr(ubi
, vid_hdr
);
765 leb_write_unlock(ubi
, vol_id
, lnum
);
769 dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
770 len
, vol_id
, lnum
, pnum
, used_ebs
);
772 err
= ubi_io_write_vid_hdr(ubi
, pnum
, vid_hdr
);
774 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
779 err
= ubi_io_write_data(ubi
, buf
, pnum
, 0, len
);
781 ubi_warn("failed to write %d bytes of data to PEB %d",
786 ubi_assert(vol
->eba_tbl
[lnum
] < 0);
787 vol
->eba_tbl
[lnum
] = pnum
;
789 leb_write_unlock(ubi
, vol_id
, lnum
);
790 ubi_free_vid_hdr(ubi
, vid_hdr
);
794 if (err
!= -EIO
|| !ubi
->bad_allowed
) {
796 * This flash device does not admit of bad eraseblocks or
797 * something nasty and unexpected happened. Switch to read-only
801 leb_write_unlock(ubi
, vol_id
, lnum
);
802 ubi_free_vid_hdr(ubi
, vid_hdr
);
806 err
= ubi_wl_put_peb(ubi
, pnum
, 1);
807 if (err
|| ++tries
> UBI_IO_RETRIES
) {
809 leb_write_unlock(ubi
, vol_id
, lnum
);
810 ubi_free_vid_hdr(ubi
, vid_hdr
);
814 vid_hdr
->sqnum
= cpu_to_be64(next_sqnum(ubi
));
815 ubi_msg("try another PEB");
820 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
821 * @ubi: UBI device description object
823 * @lnum: logical eraseblock number
824 * @buf: data to write
825 * @len: how many bytes to write
828 * This function changes the contents of a logical eraseblock atomically. @buf
829 * has to contain new logical eraseblock data, and @len - the length of the
830 * data, which has to be aligned. This function guarantees that in case of an
831 * unclean reboot the old contents is preserved. Returns zero in case of
832 * success and a negative error code in case of failure.
834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
837 int ubi_eba_atomic_leb_change(struct ubi_device
*ubi
, int vol_id
, int lnum
,
838 const void *buf
, int len
, int dtype
)
840 int err
, pnum
, tries
= 0, idx
= vol_id2idx(ubi
, vol_id
);
841 struct ubi_volume
*vol
= ubi
->volumes
[idx
];
842 struct ubi_vid_hdr
*vid_hdr
;
848 vid_hdr
= ubi_zalloc_vid_hdr(ubi
, GFP_NOFS
);
852 mutex_lock(&ubi
->alc_mutex
);
853 err
= leb_write_lock(ubi
, vol_id
, lnum
);
857 vid_hdr
->sqnum
= cpu_to_be64(next_sqnum(ubi
));
858 vid_hdr
->vol_id
= cpu_to_be32(vol_id
);
859 vid_hdr
->lnum
= cpu_to_be32(lnum
);
860 vid_hdr
->compat
= ubi_get_compat(ubi
, vol_id
);
861 vid_hdr
->data_pad
= cpu_to_be32(vol
->data_pad
);
863 crc
= crc32(UBI_CRC32_INIT
, buf
, len
);
864 vid_hdr
->vol_type
= UBI_VID_DYNAMIC
;
865 vid_hdr
->data_size
= cpu_to_be32(len
);
866 vid_hdr
->copy_flag
= 1;
867 vid_hdr
->data_crc
= cpu_to_be32(crc
);
870 pnum
= ubi_wl_get_peb(ubi
, dtype
);
876 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
877 vol_id
, lnum
, vol
->eba_tbl
[lnum
], pnum
);
879 err
= ubi_io_write_vid_hdr(ubi
, pnum
, vid_hdr
);
881 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
886 err
= ubi_io_write_data(ubi
, buf
, pnum
, 0, len
);
888 ubi_warn("failed to write %d bytes of data to PEB %d",
893 if (vol
->eba_tbl
[lnum
] >= 0) {
894 err
= ubi_wl_put_peb(ubi
, vol
->eba_tbl
[lnum
], 1);
899 vol
->eba_tbl
[lnum
] = pnum
;
902 leb_write_unlock(ubi
, vol_id
, lnum
);
904 mutex_unlock(&ubi
->alc_mutex
);
905 ubi_free_vid_hdr(ubi
, vid_hdr
);
909 if (err
!= -EIO
|| !ubi
->bad_allowed
) {
911 * This flash device does not admit of bad eraseblocks or
912 * something nasty and unexpected happened. Switch to read-only
919 err
= ubi_wl_put_peb(ubi
, pnum
, 1);
920 if (err
|| ++tries
> UBI_IO_RETRIES
) {
925 vid_hdr
->sqnum
= cpu_to_be64(next_sqnum(ubi
));
926 ubi_msg("try another PEB");
931 * ltree_entry_ctor - lock tree entries slab cache constructor.
932 * @obj: the lock-tree entry to construct
933 * @cache: the lock tree entry slab cache
934 * @flags: constructor flags
936 static void ltree_entry_ctor(struct kmem_cache
*cache
, void *obj
)
938 struct ltree_entry
*le
= obj
;
941 init_rwsem(&le
->mutex
);
945 * ubi_eba_copy_leb - copy logical eraseblock.
946 * @ubi: UBI device description object
947 * @from: physical eraseblock number from where to copy
948 * @to: physical eraseblock number where to copy
949 * @vid_hdr: VID header of the @from physical eraseblock
951 * This function copies logical eraseblock from physical eraseblock @from to
952 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
953 * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation
954 * was canceled because bit-flips were detected at the target PEB, and a
955 * negative error code in case of failure.
957 int ubi_eba_copy_leb(struct ubi_device
*ubi
, int from
, int to
,
958 struct ubi_vid_hdr
*vid_hdr
)
960 int err
, vol_id
, lnum
, data_size
, aldata_size
, pnum
, idx
;
961 struct ubi_volume
*vol
;
964 vol_id
= be32_to_cpu(vid_hdr
->vol_id
);
965 lnum
= be32_to_cpu(vid_hdr
->lnum
);
967 dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id
, lnum
, from
, to
);
969 if (vid_hdr
->vol_type
== UBI_VID_STATIC
) {
970 data_size
= be32_to_cpu(vid_hdr
->data_size
);
971 aldata_size
= ALIGN(data_size
, ubi
->min_io_size
);
973 data_size
= aldata_size
=
974 ubi
->leb_size
- be32_to_cpu(vid_hdr
->data_pad
);
977 * We do not want anybody to write to this logical eraseblock while we
978 * are moving it, so we lock it.
980 err
= leb_write_lock(ubi
, vol_id
, lnum
);
984 mutex_lock(&ubi
->buf_mutex
);
987 * But the logical eraseblock might have been put by this time.
988 * Cancel if it is true.
990 idx
= vol_id2idx(ubi
, vol_id
);
993 * We may race with volume deletion/re-size, so we have to hold
994 * @ubi->volumes_lock.
996 spin_lock(&ubi
->volumes_lock
);
997 vol
= ubi
->volumes
[idx
];
999 dbg_eba("volume %d was removed meanwhile", vol_id
);
1000 spin_unlock(&ubi
->volumes_lock
);
1004 pnum
= vol
->eba_tbl
[lnum
];
1006 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1007 "PEB %d, cancel", vol_id
, lnum
, from
, pnum
);
1008 spin_unlock(&ubi
->volumes_lock
);
1011 spin_unlock(&ubi
->volumes_lock
);
1013 /* OK, now the LEB is locked and we can safely start moving it */
1015 dbg_eba("read %d bytes of data", aldata_size
);
1016 err
= ubi_io_read_data(ubi
, ubi
->peb_buf1
, from
, 0, aldata_size
);
1017 if (err
&& err
!= UBI_IO_BITFLIPS
) {
1018 ubi_warn("error %d while reading data from PEB %d",
1024 * Now we have got to calculate how much data we have to to copy. In
1025 * case of a static volume it is fairly easy - the VID header contains
1026 * the data size. In case of a dynamic volume it is more difficult - we
1027 * have to read the contents, cut 0xFF bytes from the end and copy only
1028 * the first part. We must do this to avoid writing 0xFF bytes as it
1029 * may have some side-effects. And not only this. It is important not
1030 * to include those 0xFFs to CRC because later the they may be filled
1033 if (vid_hdr
->vol_type
== UBI_VID_DYNAMIC
)
1034 aldata_size
= data_size
=
1035 ubi_calc_data_len(ubi
, ubi
->peb_buf1
, data_size
);
1038 crc
= crc32(UBI_CRC32_INIT
, ubi
->peb_buf1
, data_size
);
1042 * It may turn out to me that the whole @from physical eraseblock
1043 * contains only 0xFF bytes. Then we have to only write the VID header
1044 * and do not write any data. This also means we should not set
1045 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1047 if (data_size
> 0) {
1048 vid_hdr
->copy_flag
= 1;
1049 vid_hdr
->data_size
= cpu_to_be32(data_size
);
1050 vid_hdr
->data_crc
= cpu_to_be32(crc
);
1052 vid_hdr
->sqnum
= cpu_to_be64(next_sqnum(ubi
));
1054 err
= ubi_io_write_vid_hdr(ubi
, to
, vid_hdr
);
1060 /* Read the VID header back and check if it was written correctly */
1061 err
= ubi_io_read_vid_hdr(ubi
, to
, vid_hdr
, 1);
1063 if (err
!= UBI_IO_BITFLIPS
)
1064 ubi_warn("cannot read VID header back from PEB %d", to
);
1068 if (data_size
> 0) {
1069 err
= ubi_io_write_data(ubi
, ubi
->peb_buf1
, to
, 0, aldata_size
);
1076 * We've written the data and are going to read it back to make
1077 * sure it was written correctly.
1080 err
= ubi_io_read_data(ubi
, ubi
->peb_buf2
, to
, 0, aldata_size
);
1082 if (err
!= UBI_IO_BITFLIPS
)
1083 ubi_warn("cannot read data back from PEB %d",
1090 if (memcmp(ubi
->peb_buf1
, ubi
->peb_buf2
, aldata_size
)) {
1091 ubi_warn("read data back from PEB %d - it is different",
1097 ubi_assert(vol
->eba_tbl
[lnum
] == from
);
1098 vol
->eba_tbl
[lnum
] = to
;
1101 mutex_unlock(&ubi
->buf_mutex
);
1102 leb_write_unlock(ubi
, vol_id
, lnum
);
1107 * ubi_eba_init_scan - initialize the EBA unit using scanning information.
1108 * @ubi: UBI device description object
1109 * @si: scanning information
1111 * This function returns zero in case of success and a negative error code in
1114 int ubi_eba_init_scan(struct ubi_device
*ubi
, struct ubi_scan_info
*si
)
1116 int i
, j
, err
, num_volumes
;
1117 struct ubi_scan_volume
*sv
;
1118 struct ubi_volume
*vol
;
1119 struct ubi_scan_leb
*seb
;
1122 dbg_eba("initialize EBA unit");
1124 spin_lock_init(&ubi
->ltree_lock
);
1125 mutex_init(&ubi
->alc_mutex
);
1126 ubi
->ltree
= RB_ROOT
;
1128 if (ubi_devices_cnt
== 0) {
1129 ltree_slab
= kmem_cache_create("ubi_ltree_slab",
1130 sizeof(struct ltree_entry
), 0,
1131 0, <ree_entry_ctor
);
1136 ubi
->global_sqnum
= si
->max_sqnum
+ 1;
1137 num_volumes
= ubi
->vtbl_slots
+ UBI_INT_VOL_COUNT
;
1139 for (i
= 0; i
< num_volumes
; i
++) {
1140 vol
= ubi
->volumes
[i
];
1146 vol
->eba_tbl
= kmalloc(vol
->reserved_pebs
* sizeof(int),
1148 if (!vol
->eba_tbl
) {
1153 for (j
= 0; j
< vol
->reserved_pebs
; j
++)
1154 vol
->eba_tbl
[j
] = UBI_LEB_UNMAPPED
;
1156 sv
= ubi_scan_find_sv(si
, idx2vol_id(ubi
, i
));
1160 ubi_rb_for_each_entry(rb
, seb
, &sv
->root
, u
.rb
) {
1161 if (seb
->lnum
>= vol
->reserved_pebs
)
1163 * This may happen in case of an unclean reboot
1166 ubi_scan_move_to_list(sv
, seb
, &si
->erase
);
1167 vol
->eba_tbl
[seb
->lnum
] = seb
->pnum
;
1171 if (ubi
->bad_allowed
) {
1172 ubi_calculate_reserved(ubi
);
1174 if (ubi
->avail_pebs
< ubi
->beb_rsvd_level
) {
1175 /* No enough free physical eraseblocks */
1176 ubi
->beb_rsvd_pebs
= ubi
->avail_pebs
;
1177 ubi_warn("cannot reserve enough PEBs for bad PEB "
1178 "handling, reserved %d, need %d",
1179 ubi
->beb_rsvd_pebs
, ubi
->beb_rsvd_level
);
1181 ubi
->beb_rsvd_pebs
= ubi
->beb_rsvd_level
;
1183 ubi
->avail_pebs
-= ubi
->beb_rsvd_pebs
;
1184 ubi
->rsvd_pebs
+= ubi
->beb_rsvd_pebs
;
1187 if (ubi
->avail_pebs
< EBA_RESERVED_PEBS
) {
1188 ubi_err("no enough physical eraseblocks (%d, need %d)",
1189 ubi
->avail_pebs
, EBA_RESERVED_PEBS
);
1193 ubi
->avail_pebs
-= EBA_RESERVED_PEBS
;
1194 ubi
->rsvd_pebs
+= EBA_RESERVED_PEBS
;
1196 dbg_eba("EBA unit is initialized");
1200 for (i
= 0; i
< num_volumes
; i
++) {
1201 if (!ubi
->volumes
[i
])
1203 kfree(ubi
->volumes
[i
]->eba_tbl
);
1205 if (ubi_devices_cnt
== 0)
1206 kmem_cache_destroy(ltree_slab
);
1211 * ubi_eba_close - close EBA unit.
1212 * @ubi: UBI device description object
1214 void ubi_eba_close(const struct ubi_device
*ubi
)
1216 int i
, num_volumes
= ubi
->vtbl_slots
+ UBI_INT_VOL_COUNT
;
1218 dbg_eba("close EBA unit");
1220 for (i
= 0; i
< num_volumes
; i
++) {
1221 if (!ubi
->volumes
[i
])
1223 kfree(ubi
->volumes
[i
]->eba_tbl
);
1225 if (ubi_devices_cnt
== 1)
1226 kmem_cache_destroy(ltree_slab
);