1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011 Red Hat, Inc.
5 * This file is released under the GPL.
8 #include "dm-space-map-common.h"
9 #include "dm-transaction-manager.h"
10 #include "dm-btree-internal.h"
11 #include "dm-persistent-data-internal.h"
13 #include <linux/bitops.h>
14 #include <linux/device-mapper.h>
16 #define DM_MSG_PREFIX "space map common"
18 /*----------------------------------------------------------------*/
23 #define INDEX_CSUM_XOR 160478
25 static void index_prepare_for_write(const struct dm_block_validator
*v
,
29 struct disk_metadata_index
*mi_le
= dm_block_data(b
);
31 mi_le
->blocknr
= cpu_to_le64(dm_block_location(b
));
32 mi_le
->csum
= cpu_to_le32(dm_bm_checksum(&mi_le
->padding
,
33 block_size
- sizeof(__le32
),
37 static int index_check(const struct dm_block_validator
*v
,
41 struct disk_metadata_index
*mi_le
= dm_block_data(b
);
44 if (dm_block_location(b
) != le64_to_cpu(mi_le
->blocknr
)) {
45 DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__
,
46 le64_to_cpu(mi_le
->blocknr
), dm_block_location(b
));
50 csum_disk
= cpu_to_le32(dm_bm_checksum(&mi_le
->padding
,
51 block_size
- sizeof(__le32
),
53 if (csum_disk
!= mi_le
->csum
) {
54 DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__
,
55 le32_to_cpu(csum_disk
), le32_to_cpu(mi_le
->csum
));
62 static const struct dm_block_validator index_validator
= {
64 .prepare_for_write
= index_prepare_for_write
,
68 /*----------------------------------------------------------------*/
73 #define BITMAP_CSUM_XOR 240779
75 static void dm_bitmap_prepare_for_write(const struct dm_block_validator
*v
,
79 struct disk_bitmap_header
*disk_header
= dm_block_data(b
);
81 disk_header
->blocknr
= cpu_to_le64(dm_block_location(b
));
82 disk_header
->csum
= cpu_to_le32(dm_bm_checksum(&disk_header
->not_used
,
83 block_size
- sizeof(__le32
),
87 static int dm_bitmap_check(const struct dm_block_validator
*v
,
91 struct disk_bitmap_header
*disk_header
= dm_block_data(b
);
94 if (dm_block_location(b
) != le64_to_cpu(disk_header
->blocknr
)) {
95 DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
96 le64_to_cpu(disk_header
->blocknr
), dm_block_location(b
));
100 csum_disk
= cpu_to_le32(dm_bm_checksum(&disk_header
->not_used
,
101 block_size
- sizeof(__le32
),
103 if (csum_disk
!= disk_header
->csum
) {
104 DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
105 le32_to_cpu(csum_disk
), le32_to_cpu(disk_header
->csum
));
112 static const struct dm_block_validator dm_sm_bitmap_validator
= {
114 .prepare_for_write
= dm_bitmap_prepare_for_write
,
115 .check
= dm_bitmap_check
,
118 /*----------------------------------------------------------------*/
120 #define ENTRIES_PER_WORD 32
121 #define ENTRIES_SHIFT 5
123 static void *dm_bitmap_data(struct dm_block
*b
)
125 return dm_block_data(b
) + sizeof(struct disk_bitmap_header
);
128 #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
130 static unsigned int dm_bitmap_word_used(void *addr
, unsigned int b
)
132 __le64
*words_le
= addr
;
133 __le64
*w_le
= words_le
+ (b
>> ENTRIES_SHIFT
);
135 uint64_t bits
= le64_to_cpu(*w_le
);
136 uint64_t mask
= (bits
+ WORD_MASK_HIGH
+ 1) & WORD_MASK_HIGH
;
138 return !(~bits
& mask
);
141 static unsigned int sm_lookup_bitmap(void *addr
, unsigned int b
)
143 __le64
*words_le
= addr
;
144 __le64
*w_le
= words_le
+ (b
>> ENTRIES_SHIFT
);
147 b
= (b
& (ENTRIES_PER_WORD
- 1)) << 1;
148 hi
= !!test_bit_le(b
, (void *) w_le
);
149 lo
= !!test_bit_le(b
+ 1, (void *) w_le
);
150 return (hi
<< 1) | lo
;
153 static void sm_set_bitmap(void *addr
, unsigned int b
, unsigned int val
)
155 __le64
*words_le
= addr
;
156 __le64
*w_le
= words_le
+ (b
>> ENTRIES_SHIFT
);
158 b
= (b
& (ENTRIES_PER_WORD
- 1)) << 1;
161 __set_bit_le(b
, (void *) w_le
);
163 __clear_bit_le(b
, (void *) w_le
);
166 __set_bit_le(b
+ 1, (void *) w_le
);
168 __clear_bit_le(b
+ 1, (void *) w_le
);
171 static int sm_find_free(void *addr
, unsigned int begin
, unsigned int end
,
172 unsigned int *result
)
174 while (begin
< end
) {
175 if (!(begin
& (ENTRIES_PER_WORD
- 1)) &&
176 dm_bitmap_word_used(addr
, begin
)) {
177 begin
+= ENTRIES_PER_WORD
;
181 if (!sm_lookup_bitmap(addr
, begin
)) {
192 /*----------------------------------------------------------------*/
194 static int sm_ll_init(struct ll_disk
*ll
, struct dm_transaction_manager
*tm
)
196 memset(ll
, 0, sizeof(struct ll_disk
));
200 ll
->bitmap_info
.tm
= tm
;
201 ll
->bitmap_info
.levels
= 1;
204 * Because the new bitmap blocks are created via a shadow
205 * operation, the old entry has already had its reference count
206 * decremented and we don't need the btree to do any bookkeeping.
208 ll
->bitmap_info
.value_type
.size
= sizeof(struct disk_index_entry
);
209 ll
->bitmap_info
.value_type
.inc
= NULL
;
210 ll
->bitmap_info
.value_type
.dec
= NULL
;
211 ll
->bitmap_info
.value_type
.equal
= NULL
;
213 ll
->ref_count_info
.tm
= tm
;
214 ll
->ref_count_info
.levels
= 1;
215 ll
->ref_count_info
.value_type
.size
= sizeof(uint32_t);
216 ll
->ref_count_info
.value_type
.inc
= NULL
;
217 ll
->ref_count_info
.value_type
.dec
= NULL
;
218 ll
->ref_count_info
.value_type
.equal
= NULL
;
220 ll
->block_size
= dm_bm_block_size(dm_tm_get_bm(tm
));
222 if (ll
->block_size
> (1 << 30)) {
223 DMERR("block size too big to hold bitmaps");
227 ll
->entries_per_block
= (ll
->block_size
- sizeof(struct disk_bitmap_header
)) *
231 ll
->ref_count_root
= 0;
232 ll
->bitmap_index_changed
= false;
237 int sm_ll_extend(struct ll_disk
*ll
, dm_block_t extra_blocks
)
240 dm_block_t i
, nr_blocks
, nr_indexes
;
241 unsigned int old_blocks
, blocks
;
243 nr_blocks
= ll
->nr_blocks
+ extra_blocks
;
244 old_blocks
= dm_sector_div_up(ll
->nr_blocks
, ll
->entries_per_block
);
245 blocks
= dm_sector_div_up(nr_blocks
, ll
->entries_per_block
);
247 nr_indexes
= dm_sector_div_up(nr_blocks
, ll
->entries_per_block
);
248 if (nr_indexes
> ll
->max_entries(ll
)) {
249 DMERR("space map too large");
254 * We need to set this before the dm_tm_new_block() call below.
256 ll
->nr_blocks
= nr_blocks
;
257 for (i
= old_blocks
; i
< blocks
; i
++) {
259 struct disk_index_entry idx
;
261 r
= dm_tm_new_block(ll
->tm
, &dm_sm_bitmap_validator
, &b
);
265 idx
.blocknr
= cpu_to_le64(dm_block_location(b
));
267 dm_tm_unlock(ll
->tm
, b
);
269 idx
.nr_free
= cpu_to_le32(ll
->entries_per_block
);
270 idx
.none_free_before
= 0;
272 r
= ll
->save_ie(ll
, i
, &idx
);
280 int sm_ll_lookup_bitmap(struct ll_disk
*ll
, dm_block_t b
, uint32_t *result
)
283 dm_block_t index
= b
;
284 struct disk_index_entry ie_disk
;
285 struct dm_block
*blk
;
287 if (b
>= ll
->nr_blocks
) {
288 DMERR_LIMIT("metadata block out of bounds");
292 b
= do_div(index
, ll
->entries_per_block
);
293 r
= ll
->load_ie(ll
, index
, &ie_disk
);
297 r
= dm_tm_read_lock(ll
->tm
, le64_to_cpu(ie_disk
.blocknr
),
298 &dm_sm_bitmap_validator
, &blk
);
302 *result
= sm_lookup_bitmap(dm_bitmap_data(blk
), b
);
304 dm_tm_unlock(ll
->tm
, blk
);
309 static int sm_ll_lookup_big_ref_count(struct ll_disk
*ll
, dm_block_t b
,
315 r
= dm_btree_lookup(&ll
->ref_count_info
, ll
->ref_count_root
, &b
, &le_rc
);
319 *result
= le32_to_cpu(le_rc
);
324 int sm_ll_lookup(struct ll_disk
*ll
, dm_block_t b
, uint32_t *result
)
326 int r
= sm_ll_lookup_bitmap(ll
, b
, result
);
334 return sm_ll_lookup_big_ref_count(ll
, b
, result
);
337 int sm_ll_find_free_block(struct ll_disk
*ll
, dm_block_t begin
,
338 dm_block_t end
, dm_block_t
*result
)
341 struct disk_index_entry ie_disk
;
342 dm_block_t i
, index_begin
= begin
;
343 dm_block_t index_end
= dm_sector_div_up(end
, ll
->entries_per_block
);
348 begin
= do_div(index_begin
, ll
->entries_per_block
);
349 end
= do_div(end
, ll
->entries_per_block
);
351 end
= ll
->entries_per_block
;
353 for (i
= index_begin
; i
< index_end
; i
++, begin
= 0) {
354 struct dm_block
*blk
;
355 unsigned int position
;
358 r
= ll
->load_ie(ll
, i
, &ie_disk
);
362 if (le32_to_cpu(ie_disk
.nr_free
) == 0)
365 r
= dm_tm_read_lock(ll
->tm
, le64_to_cpu(ie_disk
.blocknr
),
366 &dm_sm_bitmap_validator
, &blk
);
370 bit_end
= (i
== index_end
- 1) ? end
: ll
->entries_per_block
;
372 r
= sm_find_free(dm_bitmap_data(blk
),
373 max_t(unsigned int, begin
, le32_to_cpu(ie_disk
.none_free_before
)),
377 * This might happen because we started searching
378 * part way through the bitmap.
380 dm_tm_unlock(ll
->tm
, blk
);
384 dm_tm_unlock(ll
->tm
, blk
);
386 *result
= i
* ll
->entries_per_block
+ (dm_block_t
) position
;
393 int sm_ll_find_common_free_block(struct ll_disk
*old_ll
, struct ll_disk
*new_ll
,
394 dm_block_t begin
, dm_block_t end
, dm_block_t
*b
)
400 r
= sm_ll_find_free_block(new_ll
, begin
, new_ll
->nr_blocks
, b
);
404 /* double check this block wasn't used in the old transaction */
405 if (*b
>= old_ll
->nr_blocks
)
408 r
= sm_ll_lookup(old_ll
, *b
, &count
);
420 /*----------------------------------------------------------------*/
422 int sm_ll_insert(struct ll_disk
*ll
, dm_block_t b
,
423 uint32_t ref_count
, int32_t *nr_allocations
)
428 dm_block_t index
= b
;
429 struct disk_index_entry ie_disk
;
433 bit
= do_div(index
, ll
->entries_per_block
);
434 r
= ll
->load_ie(ll
, index
, &ie_disk
);
438 r
= dm_tm_shadow_block(ll
->tm
, le64_to_cpu(ie_disk
.blocknr
),
439 &dm_sm_bitmap_validator
, &nb
, &inc
);
441 DMERR("dm_tm_shadow_block() failed");
444 ie_disk
.blocknr
= cpu_to_le64(dm_block_location(nb
));
445 bm_le
= dm_bitmap_data(nb
);
447 old
= sm_lookup_bitmap(bm_le
, bit
);
449 r
= sm_ll_lookup_big_ref_count(ll
, b
, &old
);
451 dm_tm_unlock(ll
->tm
, nb
);
457 dm_tm_unlock(ll
->tm
, nb
);
461 if (ref_count
<= 2) {
462 sm_set_bitmap(bm_le
, bit
, ref_count
);
463 dm_tm_unlock(ll
->tm
, nb
);
466 r
= dm_btree_remove(&ll
->ref_count_info
,
468 &b
, &ll
->ref_count_root
);
474 __le32 le_rc
= cpu_to_le32(ref_count
);
476 sm_set_bitmap(bm_le
, bit
, 3);
477 dm_tm_unlock(ll
->tm
, nb
);
479 __dm_bless_for_disk(&le_rc
);
480 r
= dm_btree_insert(&ll
->ref_count_info
, ll
->ref_count_root
,
481 &b
, &le_rc
, &ll
->ref_count_root
);
483 DMERR("ref count insert failed");
488 if (ref_count
&& !old
) {
491 le32_add_cpu(&ie_disk
.nr_free
, -1);
492 if (le32_to_cpu(ie_disk
.none_free_before
) == bit
)
493 ie_disk
.none_free_before
= cpu_to_le32(bit
+ 1);
495 } else if (old
&& !ref_count
) {
496 *nr_allocations
= -1;
498 le32_add_cpu(&ie_disk
.nr_free
, 1);
499 ie_disk
.none_free_before
= cpu_to_le32(min(le32_to_cpu(ie_disk
.none_free_before
), bit
));
503 return ll
->save_ie(ll
, index
, &ie_disk
);
506 /*----------------------------------------------------------------*/
509 * Holds useful intermediate results for the range based inc and dec
513 struct disk_index_entry ie_disk
;
514 struct dm_block
*bitmap_block
;
517 struct dm_block
*overflow_leaf
;
520 static inline void init_inc_context(struct inc_context
*ic
)
522 ic
->bitmap_block
= NULL
;
524 ic
->overflow_leaf
= NULL
;
527 static inline void exit_inc_context(struct ll_disk
*ll
, struct inc_context
*ic
)
529 if (ic
->bitmap_block
)
530 dm_tm_unlock(ll
->tm
, ic
->bitmap_block
);
531 if (ic
->overflow_leaf
)
532 dm_tm_unlock(ll
->tm
, ic
->overflow_leaf
);
535 static inline void reset_inc_context(struct ll_disk
*ll
, struct inc_context
*ic
)
537 exit_inc_context(ll
, ic
);
538 init_inc_context(ic
);
542 * Confirms a btree node contains a particular key at an index.
544 static bool contains_key(struct btree_node
*n
, uint64_t key
, int index
)
547 index
< le32_to_cpu(n
->header
.nr_entries
) &&
548 le64_to_cpu(n
->keys
[index
]) == key
;
551 static int __sm_ll_inc_overflow(struct ll_disk
*ll
, dm_block_t b
, struct inc_context
*ic
)
555 struct btree_node
*n
;
560 * bitmap_block needs to be unlocked because getting the
561 * overflow_leaf may need to allocate, and thus use the space map.
563 reset_inc_context(ll
, ic
);
565 r
= btree_get_overwrite_leaf(&ll
->ref_count_info
, ll
->ref_count_root
,
566 b
, &index
, &ll
->ref_count_root
, &ic
->overflow_leaf
);
570 n
= dm_block_data(ic
->overflow_leaf
);
572 if (!contains_key(n
, b
, index
)) {
573 DMERR("overflow btree is missing an entry");
577 v_ptr
= value_ptr(n
, index
);
578 rc
= le32_to_cpu(*v_ptr
) + 1;
579 *v_ptr
= cpu_to_le32(rc
);
584 static int sm_ll_inc_overflow(struct ll_disk
*ll
, dm_block_t b
, struct inc_context
*ic
)
587 struct btree_node
*n
;
592 * Do we already have the correct overflow leaf?
594 if (ic
->overflow_leaf
) {
595 n
= dm_block_data(ic
->overflow_leaf
);
596 index
= lower_bound(n
, b
);
597 if (contains_key(n
, b
, index
)) {
598 v_ptr
= value_ptr(n
, index
);
599 rc
= le32_to_cpu(*v_ptr
) + 1;
600 *v_ptr
= cpu_to_le32(rc
);
606 return __sm_ll_inc_overflow(ll
, b
, ic
);
609 static inline int shadow_bitmap(struct ll_disk
*ll
, struct inc_context
*ic
)
613 r
= dm_tm_shadow_block(ll
->tm
, le64_to_cpu(ic
->ie_disk
.blocknr
),
614 &dm_sm_bitmap_validator
, &ic
->bitmap_block
, &inc
);
616 DMERR("dm_tm_shadow_block() failed");
619 ic
->ie_disk
.blocknr
= cpu_to_le64(dm_block_location(ic
->bitmap_block
));
620 ic
->bitmap
= dm_bitmap_data(ic
->bitmap_block
);
625 * Once shadow_bitmap has been called, which always happens at the start of inc/dec,
626 * we can reopen the bitmap with a simple write lock, rather than re calling
627 * dm_tm_shadow_block().
629 static inline int ensure_bitmap(struct ll_disk
*ll
, struct inc_context
*ic
)
631 if (!ic
->bitmap_block
) {
632 int r
= dm_bm_write_lock(dm_tm_get_bm(ll
->tm
), le64_to_cpu(ic
->ie_disk
.blocknr
),
633 &dm_sm_bitmap_validator
, &ic
->bitmap_block
);
635 DMERR("unable to re-get write lock for bitmap");
638 ic
->bitmap
= dm_bitmap_data(ic
->bitmap_block
);
645 * Loops round incrementing entries in a single bitmap.
647 static inline int sm_ll_inc_bitmap(struct ll_disk
*ll
, dm_block_t b
,
648 uint32_t bit
, uint32_t bit_end
,
649 int32_t *nr_allocations
, dm_block_t
*new_b
,
650 struct inc_context
*ic
)
656 for (; bit
!= bit_end
; bit
++, b
++) {
658 * We only need to drop the bitmap if we need to find a new btree
659 * leaf for the overflow. So if it was dropped last iteration,
662 r
= ensure_bitmap(ll
, ic
);
666 old
= sm_lookup_bitmap(ic
->bitmap
, bit
);
669 /* inc bitmap, adjust nr_allocated */
670 sm_set_bitmap(ic
->bitmap
, bit
, 1);
673 le32_add_cpu(&ic
->ie_disk
.nr_free
, -1);
674 if (le32_to_cpu(ic
->ie_disk
.none_free_before
) == bit
)
675 ic
->ie_disk
.none_free_before
= cpu_to_le32(bit
+ 1);
680 sm_set_bitmap(ic
->bitmap
, bit
, 2);
684 /* inc bitmap and insert into overflow */
685 sm_set_bitmap(ic
->bitmap
, bit
, 3);
686 reset_inc_context(ll
, ic
);
688 le_rc
= cpu_to_le32(3);
689 __dm_bless_for_disk(&le_rc
);
690 r
= dm_btree_insert(&ll
->ref_count_info
, ll
->ref_count_root
,
691 &b
, &le_rc
, &ll
->ref_count_root
);
693 DMERR("ref count insert failed");
700 * inc within the overflow tree only.
702 r
= sm_ll_inc_overflow(ll
, b
, ic
);
713 * Finds a bitmap that contains entries in the block range, and increments
716 static int __sm_ll_inc(struct ll_disk
*ll
, dm_block_t b
, dm_block_t e
,
717 int32_t *nr_allocations
, dm_block_t
*new_b
)
720 struct inc_context ic
;
721 uint32_t bit
, bit_end
;
722 dm_block_t index
= b
;
724 init_inc_context(&ic
);
726 bit
= do_div(index
, ll
->entries_per_block
);
727 r
= ll
->load_ie(ll
, index
, &ic
.ie_disk
);
731 r
= shadow_bitmap(ll
, &ic
);
735 bit_end
= min(bit
+ (e
- b
), (dm_block_t
) ll
->entries_per_block
);
736 r
= sm_ll_inc_bitmap(ll
, b
, bit
, bit_end
, nr_allocations
, new_b
, &ic
);
738 exit_inc_context(ll
, &ic
);
743 return ll
->save_ie(ll
, index
, &ic
.ie_disk
);
746 int sm_ll_inc(struct ll_disk
*ll
, dm_block_t b
, dm_block_t e
,
747 int32_t *nr_allocations
)
751 int r
= __sm_ll_inc(ll
, b
, e
, nr_allocations
, &b
);
760 /*----------------------------------------------------------------*/
762 static int __sm_ll_del_overflow(struct ll_disk
*ll
, dm_block_t b
,
763 struct inc_context
*ic
)
765 reset_inc_context(ll
, ic
);
766 return dm_btree_remove(&ll
->ref_count_info
, ll
->ref_count_root
,
767 &b
, &ll
->ref_count_root
);
770 static int __sm_ll_dec_overflow(struct ll_disk
*ll
, dm_block_t b
,
771 struct inc_context
*ic
, uint32_t *old_rc
)
775 struct btree_node
*n
;
779 reset_inc_context(ll
, ic
);
780 r
= btree_get_overwrite_leaf(&ll
->ref_count_info
, ll
->ref_count_root
,
781 b
, &index
, &ll
->ref_count_root
, &ic
->overflow_leaf
);
785 n
= dm_block_data(ic
->overflow_leaf
);
787 if (!contains_key(n
, b
, index
)) {
788 DMERR("overflow btree is missing an entry");
792 v_ptr
= value_ptr(n
, index
);
793 rc
= le32_to_cpu(*v_ptr
);
797 return __sm_ll_del_overflow(ll
, b
, ic
);
800 *v_ptr
= cpu_to_le32(rc
);
804 static int sm_ll_dec_overflow(struct ll_disk
*ll
, dm_block_t b
,
805 struct inc_context
*ic
, uint32_t *old_rc
)
808 * Do we already have the correct overflow leaf?
810 if (ic
->overflow_leaf
) {
812 struct btree_node
*n
;
816 n
= dm_block_data(ic
->overflow_leaf
);
817 index
= lower_bound(n
, b
);
818 if (contains_key(n
, b
, index
)) {
819 v_ptr
= value_ptr(n
, index
);
820 rc
= le32_to_cpu(*v_ptr
);
825 *v_ptr
= cpu_to_le32(rc
);
828 return __sm_ll_del_overflow(ll
, b
, ic
);
834 return __sm_ll_dec_overflow(ll
, b
, ic
, old_rc
);
838 * Loops round incrementing entries in a single bitmap.
840 static inline int sm_ll_dec_bitmap(struct ll_disk
*ll
, dm_block_t b
,
841 uint32_t bit
, uint32_t bit_end
,
842 struct inc_context
*ic
,
843 int32_t *nr_allocations
, dm_block_t
*new_b
)
848 for (; bit
!= bit_end
; bit
++, b
++) {
850 * We only need to drop the bitmap if we need to find a new btree
851 * leaf for the overflow. So if it was dropped last iteration,
854 r
= ensure_bitmap(ll
, ic
);
858 old
= sm_lookup_bitmap(ic
->bitmap
, bit
);
861 DMERR("unable to decrement block");
866 sm_set_bitmap(ic
->bitmap
, bit
, 0);
869 le32_add_cpu(&ic
->ie_disk
.nr_free
, 1);
870 ic
->ie_disk
.none_free_before
=
871 cpu_to_le32(min(le32_to_cpu(ic
->ie_disk
.none_free_before
), bit
));
875 /* dec bitmap and insert into overflow */
876 sm_set_bitmap(ic
->bitmap
, bit
, 1);
880 r
= sm_ll_dec_overflow(ll
, b
, ic
, &old
);
885 r
= ensure_bitmap(ll
, ic
);
889 sm_set_bitmap(ic
->bitmap
, bit
, 2);
899 static int __sm_ll_dec(struct ll_disk
*ll
, dm_block_t b
, dm_block_t e
,
900 int32_t *nr_allocations
, dm_block_t
*new_b
)
903 uint32_t bit
, bit_end
;
904 struct inc_context ic
;
905 dm_block_t index
= b
;
907 init_inc_context(&ic
);
909 bit
= do_div(index
, ll
->entries_per_block
);
910 r
= ll
->load_ie(ll
, index
, &ic
.ie_disk
);
914 r
= shadow_bitmap(ll
, &ic
);
918 bit_end
= min(bit
+ (e
- b
), (dm_block_t
) ll
->entries_per_block
);
919 r
= sm_ll_dec_bitmap(ll
, b
, bit
, bit_end
, &ic
, nr_allocations
, new_b
);
920 exit_inc_context(ll
, &ic
);
925 return ll
->save_ie(ll
, index
, &ic
.ie_disk
);
928 int sm_ll_dec(struct ll_disk
*ll
, dm_block_t b
, dm_block_t e
,
929 int32_t *nr_allocations
)
933 int r
= __sm_ll_dec(ll
, b
, e
, nr_allocations
, &b
);
942 /*----------------------------------------------------------------*/
944 int sm_ll_commit(struct ll_disk
*ll
)
948 if (ll
->bitmap_index_changed
) {
951 ll
->bitmap_index_changed
= false;
957 /*----------------------------------------------------------------*/
959 static int metadata_ll_load_ie(struct ll_disk
*ll
, dm_block_t index
,
960 struct disk_index_entry
*ie
)
962 memcpy(ie
, ll
->mi_le
.index
+ index
, sizeof(*ie
));
966 static int metadata_ll_save_ie(struct ll_disk
*ll
, dm_block_t index
,
967 struct disk_index_entry
*ie
)
969 ll
->bitmap_index_changed
= true;
970 memcpy(ll
->mi_le
.index
+ index
, ie
, sizeof(*ie
));
974 static int metadata_ll_init_index(struct ll_disk
*ll
)
979 r
= dm_tm_new_block(ll
->tm
, &index_validator
, &b
);
983 ll
->bitmap_root
= dm_block_location(b
);
985 dm_tm_unlock(ll
->tm
, b
);
990 static int metadata_ll_open(struct ll_disk
*ll
)
993 struct dm_block
*block
;
995 r
= dm_tm_read_lock(ll
->tm
, ll
->bitmap_root
,
996 &index_validator
, &block
);
1000 memcpy(&ll
->mi_le
, dm_block_data(block
), sizeof(ll
->mi_le
));
1001 dm_tm_unlock(ll
->tm
, block
);
1006 static dm_block_t
metadata_ll_max_entries(struct ll_disk
*ll
)
1008 return MAX_METADATA_BITMAPS
;
1011 static int metadata_ll_commit(struct ll_disk
*ll
)
1016 r
= dm_tm_shadow_block(ll
->tm
, ll
->bitmap_root
, &index_validator
, &b
, &inc
);
1020 memcpy(dm_block_data(b
), &ll
->mi_le
, sizeof(ll
->mi_le
));
1021 ll
->bitmap_root
= dm_block_location(b
);
1023 dm_tm_unlock(ll
->tm
, b
);
1028 int sm_ll_new_metadata(struct ll_disk
*ll
, struct dm_transaction_manager
*tm
)
1032 r
= sm_ll_init(ll
, tm
);
1036 ll
->load_ie
= metadata_ll_load_ie
;
1037 ll
->save_ie
= metadata_ll_save_ie
;
1038 ll
->init_index
= metadata_ll_init_index
;
1039 ll
->open_index
= metadata_ll_open
;
1040 ll
->max_entries
= metadata_ll_max_entries
;
1041 ll
->commit
= metadata_ll_commit
;
1044 ll
->nr_allocated
= 0;
1046 r
= ll
->init_index(ll
);
1050 r
= dm_btree_empty(&ll
->ref_count_info
, &ll
->ref_count_root
);
1057 int sm_ll_open_metadata(struct ll_disk
*ll
, struct dm_transaction_manager
*tm
,
1058 void *root_le
, size_t len
)
1061 struct disk_sm_root smr
;
1063 if (len
< sizeof(struct disk_sm_root
)) {
1064 DMERR("sm_metadata root too small");
1069 * We don't know the alignment of the root_le buffer, so need to
1070 * copy into a new structure.
1072 memcpy(&smr
, root_le
, sizeof(smr
));
1074 r
= sm_ll_init(ll
, tm
);
1078 ll
->load_ie
= metadata_ll_load_ie
;
1079 ll
->save_ie
= metadata_ll_save_ie
;
1080 ll
->init_index
= metadata_ll_init_index
;
1081 ll
->open_index
= metadata_ll_open
;
1082 ll
->max_entries
= metadata_ll_max_entries
;
1083 ll
->commit
= metadata_ll_commit
;
1085 ll
->nr_blocks
= le64_to_cpu(smr
.nr_blocks
);
1086 ll
->nr_allocated
= le64_to_cpu(smr
.nr_allocated
);
1087 ll
->bitmap_root
= le64_to_cpu(smr
.bitmap_root
);
1088 ll
->ref_count_root
= le64_to_cpu(smr
.ref_count_root
);
1090 return ll
->open_index(ll
);
1093 /*----------------------------------------------------------------*/
1095 static inline int ie_cache_writeback(struct ll_disk
*ll
, struct ie_cache
*iec
)
1098 __dm_bless_for_disk(iec
->ie
);
1099 return dm_btree_insert(&ll
->bitmap_info
, ll
->bitmap_root
,
1100 &iec
->index
, &iec
->ie
, &ll
->bitmap_root
);
1103 static inline unsigned int hash_index(dm_block_t index
)
1105 return dm_hash_block(index
, IE_CACHE_MASK
);
1108 static int disk_ll_load_ie(struct ll_disk
*ll
, dm_block_t index
,
1109 struct disk_index_entry
*ie
)
1112 unsigned int h
= hash_index(index
);
1113 struct ie_cache
*iec
= ll
->ie_cache
+ h
;
1116 if (iec
->index
== index
) {
1117 memcpy(ie
, &iec
->ie
, sizeof(*ie
));
1122 r
= ie_cache_writeback(ll
, iec
);
1128 r
= dm_btree_lookup(&ll
->bitmap_info
, ll
->bitmap_root
, &index
, ie
);
1133 memcpy(&iec
->ie
, ie
, sizeof(*ie
));
1139 static int disk_ll_save_ie(struct ll_disk
*ll
, dm_block_t index
,
1140 struct disk_index_entry
*ie
)
1143 unsigned int h
= hash_index(index
);
1144 struct ie_cache
*iec
= ll
->ie_cache
+ h
;
1146 ll
->bitmap_index_changed
= true;
1148 if (iec
->index
== index
) {
1149 memcpy(&iec
->ie
, ie
, sizeof(*ie
));
1155 r
= ie_cache_writeback(ll
, iec
);
1164 memcpy(&iec
->ie
, ie
, sizeof(*ie
));
1168 static int disk_ll_init_index(struct ll_disk
*ll
)
1172 for (i
= 0; i
< IE_CACHE_SIZE
; i
++) {
1173 struct ie_cache
*iec
= ll
->ie_cache
+ i
;
1178 return dm_btree_empty(&ll
->bitmap_info
, &ll
->bitmap_root
);
1181 static int disk_ll_open(struct ll_disk
*ll
)
1186 static dm_block_t
disk_ll_max_entries(struct ll_disk
*ll
)
1191 static int disk_ll_commit(struct ll_disk
*ll
)
1196 for (i
= 0; i
< IE_CACHE_SIZE
; i
++) {
1197 struct ie_cache
*iec
= ll
->ie_cache
+ i
;
1199 if (iec
->valid
&& iec
->dirty
)
1200 r
= ie_cache_writeback(ll
, iec
);
1206 int sm_ll_new_disk(struct ll_disk
*ll
, struct dm_transaction_manager
*tm
)
1210 r
= sm_ll_init(ll
, tm
);
1214 ll
->load_ie
= disk_ll_load_ie
;
1215 ll
->save_ie
= disk_ll_save_ie
;
1216 ll
->init_index
= disk_ll_init_index
;
1217 ll
->open_index
= disk_ll_open
;
1218 ll
->max_entries
= disk_ll_max_entries
;
1219 ll
->commit
= disk_ll_commit
;
1222 ll
->nr_allocated
= 0;
1224 r
= ll
->init_index(ll
);
1228 r
= dm_btree_empty(&ll
->ref_count_info
, &ll
->ref_count_root
);
1235 int sm_ll_open_disk(struct ll_disk
*ll
, struct dm_transaction_manager
*tm
,
1236 void *root_le
, size_t len
)
1239 struct disk_sm_root
*smr
= root_le
;
1241 if (len
< sizeof(struct disk_sm_root
)) {
1242 DMERR("sm_metadata root too small");
1246 r
= sm_ll_init(ll
, tm
);
1250 ll
->load_ie
= disk_ll_load_ie
;
1251 ll
->save_ie
= disk_ll_save_ie
;
1252 ll
->init_index
= disk_ll_init_index
;
1253 ll
->open_index
= disk_ll_open
;
1254 ll
->max_entries
= disk_ll_max_entries
;
1255 ll
->commit
= disk_ll_commit
;
1257 ll
->nr_blocks
= le64_to_cpu(smr
->nr_blocks
);
1258 ll
->nr_allocated
= le64_to_cpu(smr
->nr_allocated
);
1259 ll
->bitmap_root
= le64_to_cpu(smr
->bitmap_root
);
1260 ll
->ref_count_root
= le64_to_cpu(smr
->ref_count_root
);
1262 return ll
->open_index(ll
);
1265 /*----------------------------------------------------------------*/