1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat, Inc.
5 * This file is released under the GPL.
9 #include "dm-space-map.h"
10 #include "dm-transaction-manager.h"
12 #include <linux/export.h>
13 #include <linux/device-mapper.h>
15 #define DM_MSG_PREFIX "array"
17 /*----------------------------------------------------------------*/
20 * The array is implemented as a fully populated btree, which points to
21 * blocks that contain the packed values. This is more space efficient
22 * than just using a btree since we don't store 1 key per value.
29 __le64 blocknr
; /* Block this node is supposed to live in. */
32 /*----------------------------------------------------------------*/
35 * Validator methods. As usual we calculate a checksum, and also write the
36 * block location into the header (paranoia about ssds remapping areas by
39 #define CSUM_XOR 595846735
41 static void array_block_prepare_for_write(const struct dm_block_validator
*v
,
45 struct array_block
*bh_le
= dm_block_data(b
);
47 bh_le
->blocknr
= cpu_to_le64(dm_block_location(b
));
48 bh_le
->csum
= cpu_to_le32(dm_bm_checksum(&bh_le
->max_entries
,
49 size_of_block
- sizeof(__le32
),
53 static int array_block_check(const struct dm_block_validator
*v
,
57 struct array_block
*bh_le
= dm_block_data(b
);
60 if (dm_block_location(b
) != le64_to_cpu(bh_le
->blocknr
)) {
61 DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__
,
62 (unsigned long long) le64_to_cpu(bh_le
->blocknr
),
63 (unsigned long long) dm_block_location(b
));
67 csum_disk
= cpu_to_le32(dm_bm_checksum(&bh_le
->max_entries
,
68 size_of_block
- sizeof(__le32
),
70 if (csum_disk
!= bh_le
->csum
) {
71 DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__
,
72 (unsigned int) le32_to_cpu(csum_disk
),
73 (unsigned int) le32_to_cpu(bh_le
->csum
));
80 static const struct dm_block_validator array_validator
= {
82 .prepare_for_write
= array_block_prepare_for_write
,
83 .check
= array_block_check
86 /*----------------------------------------------------------------*/
89 * Functions for manipulating the array blocks.
93 * Returns a pointer to a value within an array block.
95 * index - The index into _this_ specific block.
97 static void *element_at(struct dm_array_info
*info
, struct array_block
*ab
,
100 unsigned char *entry
= (unsigned char *) (ab
+ 1);
102 entry
+= index
* info
->value_type
.size
;
108 * Utility function that calls one of the value_type methods on every value
111 static void on_entries(struct dm_array_info
*info
, struct array_block
*ab
,
112 void (*fn
)(void *, const void *, unsigned int))
114 unsigned int nr_entries
= le32_to_cpu(ab
->nr_entries
);
116 fn(info
->value_type
.context
, element_at(info
, ab
, 0), nr_entries
);
120 * Increment every value in an array block.
122 static void inc_ablock_entries(struct dm_array_info
*info
, struct array_block
*ab
)
124 struct dm_btree_value_type
*vt
= &info
->value_type
;
127 on_entries(info
, ab
, vt
->inc
);
131 * Decrement every value in an array block.
133 static void dec_ablock_entries(struct dm_array_info
*info
, struct array_block
*ab
)
135 struct dm_btree_value_type
*vt
= &info
->value_type
;
138 on_entries(info
, ab
, vt
->dec
);
142 * Each array block can hold this many values.
144 static uint32_t calc_max_entries(size_t value_size
, size_t size_of_block
)
146 return (size_of_block
- sizeof(struct array_block
)) / value_size
;
150 * Allocate a new array block. The caller will need to unlock block.
152 static int alloc_ablock(struct dm_array_info
*info
, size_t size_of_block
,
153 uint32_t max_entries
,
154 struct dm_block
**block
, struct array_block
**ab
)
158 r
= dm_tm_new_block(info
->btree_info
.tm
, &array_validator
, block
);
162 (*ab
) = dm_block_data(*block
);
163 (*ab
)->max_entries
= cpu_to_le32(max_entries
);
164 (*ab
)->nr_entries
= cpu_to_le32(0);
165 (*ab
)->value_size
= cpu_to_le32(info
->value_type
.size
);
171 * Pad an array block out with a particular value. Every instance will
172 * cause an increment of the value_type. new_nr must always be more than
173 * the current number of entries.
175 static void fill_ablock(struct dm_array_info
*info
, struct array_block
*ab
,
176 const void *value
, unsigned int new_nr
)
178 uint32_t nr_entries
, delta
, i
;
179 struct dm_btree_value_type
*vt
= &info
->value_type
;
181 BUG_ON(new_nr
> le32_to_cpu(ab
->max_entries
));
182 BUG_ON(new_nr
< le32_to_cpu(ab
->nr_entries
));
184 nr_entries
= le32_to_cpu(ab
->nr_entries
);
185 delta
= new_nr
- nr_entries
;
187 vt
->inc(vt
->context
, value
, delta
);
188 for (i
= nr_entries
; i
< new_nr
; i
++)
189 memcpy(element_at(info
, ab
, i
), value
, vt
->size
);
190 ab
->nr_entries
= cpu_to_le32(new_nr
);
194 * Remove some entries from the back of an array block. Every value
195 * removed will be decremented. new_nr must be <= the current number of
198 static void trim_ablock(struct dm_array_info
*info
, struct array_block
*ab
,
201 uint32_t nr_entries
, delta
;
202 struct dm_btree_value_type
*vt
= &info
->value_type
;
204 BUG_ON(new_nr
> le32_to_cpu(ab
->max_entries
));
205 BUG_ON(new_nr
> le32_to_cpu(ab
->nr_entries
));
207 nr_entries
= le32_to_cpu(ab
->nr_entries
);
208 delta
= nr_entries
- new_nr
;
210 vt
->dec(vt
->context
, element_at(info
, ab
, new_nr
- 1), delta
);
211 ab
->nr_entries
= cpu_to_le32(new_nr
);
215 * Read locks a block, and coerces it to an array block. The caller must
216 * unlock 'block' when finished.
218 static int get_ablock(struct dm_array_info
*info
, dm_block_t b
,
219 struct dm_block
**block
, struct array_block
**ab
)
223 r
= dm_tm_read_lock(info
->btree_info
.tm
, b
, &array_validator
, block
);
227 *ab
= dm_block_data(*block
);
232 * Unlocks an array block.
234 static void unlock_ablock(struct dm_array_info
*info
, struct dm_block
*block
)
236 dm_tm_unlock(info
->btree_info
.tm
, block
);
239 /*----------------------------------------------------------------*/
242 * Btree manipulation.
246 * Looks up an array block in the btree, and then read locks it.
248 * index is the index of the index of the array_block, (ie. the array index
251 static int lookup_ablock(struct dm_array_info
*info
, dm_block_t root
,
252 unsigned int index
, struct dm_block
**block
,
253 struct array_block
**ab
)
256 uint64_t key
= index
;
259 r
= dm_btree_lookup(&info
->btree_info
, root
, &key
, &block_le
);
263 return get_ablock(info
, le64_to_cpu(block_le
), block
, ab
);
267 * Insert an array block into the btree. The block is _not_ unlocked.
269 static int insert_ablock(struct dm_array_info
*info
, uint64_t index
,
270 struct dm_block
*block
, dm_block_t
*root
)
272 __le64 block_le
= cpu_to_le64(dm_block_location(block
));
274 __dm_bless_for_disk(block_le
);
275 return dm_btree_insert(&info
->btree_info
, *root
, &index
, &block_le
, root
);
278 /*----------------------------------------------------------------*/
280 static int __shadow_ablock(struct dm_array_info
*info
, dm_block_t b
,
281 struct dm_block
**block
, struct array_block
**ab
)
284 int r
= dm_tm_shadow_block(info
->btree_info
.tm
, b
,
285 &array_validator
, block
, &inc
);
289 *ab
= dm_block_data(*block
);
291 inc_ablock_entries(info
, *ab
);
297 * The shadow op will often be a noop. Only insert if it really
300 static int __reinsert_ablock(struct dm_array_info
*info
, unsigned int index
,
301 struct dm_block
*block
, dm_block_t b
,
306 if (dm_block_location(block
) != b
) {
308 * dm_tm_shadow_block will have already decremented the old
309 * block, but it is still referenced by the btree. We
310 * increment to stop the insert decrementing it below zero
311 * when overwriting the old value.
313 dm_tm_inc(info
->btree_info
.tm
, b
);
314 r
= insert_ablock(info
, index
, block
, root
);
321 * Looks up an array block in the btree. Then shadows it, and updates the
322 * btree to point to this new shadow. 'root' is an input/output parameter
323 * for both the current root block, and the new one.
325 static int shadow_ablock(struct dm_array_info
*info
, dm_block_t
*root
,
326 unsigned int index
, struct dm_block
**block
,
327 struct array_block
**ab
)
330 uint64_t key
= index
;
334 r
= dm_btree_lookup(&info
->btree_info
, *root
, &key
, &block_le
);
337 b
= le64_to_cpu(block_le
);
339 r
= __shadow_ablock(info
, b
, block
, ab
);
343 return __reinsert_ablock(info
, index
, *block
, b
, root
);
347 * Allocate an new array block, and fill it with some values.
349 static int insert_new_ablock(struct dm_array_info
*info
, size_t size_of_block
,
350 uint32_t max_entries
,
351 unsigned int block_index
, uint32_t nr
,
352 const void *value
, dm_block_t
*root
)
355 struct dm_block
*block
;
356 struct array_block
*ab
;
358 r
= alloc_ablock(info
, size_of_block
, max_entries
, &block
, &ab
);
362 fill_ablock(info
, ab
, value
, nr
);
363 r
= insert_ablock(info
, block_index
, block
, root
);
364 unlock_ablock(info
, block
);
369 static int insert_full_ablocks(struct dm_array_info
*info
, size_t size_of_block
,
370 unsigned int begin_block
, unsigned int end_block
,
371 unsigned int max_entries
, const void *value
,
376 for (; !r
&& begin_block
!= end_block
; begin_block
++)
377 r
= insert_new_ablock(info
, size_of_block
, max_entries
, begin_block
, max_entries
, value
, root
);
383 * There are a bunch of functions involved with resizing an array. This
384 * structure holds information that commonly needed by them. Purely here
385 * to reduce parameter count.
389 * Describes the array.
391 struct dm_array_info
*info
;
394 * The current root of the array. This gets updated.
399 * Metadata block size. Used to calculate the nr entries in an
402 size_t size_of_block
;
405 * Maximum nr entries in an array block.
407 unsigned int max_entries
;
410 * nr of completely full blocks in the array.
412 * 'old' refers to before the resize, 'new' after.
414 unsigned int old_nr_full_blocks
, new_nr_full_blocks
;
417 * Number of entries in the final block. 0 iff only full blocks in
420 unsigned int old_nr_entries_in_last_block
, new_nr_entries_in_last_block
;
423 * The default value used when growing the array.
429 * Removes a consecutive set of array blocks from the btree. The values
430 * in block are decremented as a side effect of the btree remove.
432 * begin_index - the index of the first array block to remove.
433 * end_index - the one-past-the-end value. ie. this block is not removed.
435 static int drop_blocks(struct resize
*resize
, unsigned int begin_index
,
436 unsigned int end_index
)
440 while (begin_index
!= end_index
) {
441 uint64_t key
= begin_index
++;
443 r
= dm_btree_remove(&resize
->info
->btree_info
, resize
->root
,
444 &key
, &resize
->root
);
453 * Calculates how many blocks are needed for the array.
455 static unsigned int total_nr_blocks_needed(unsigned int nr_full_blocks
,
456 unsigned int nr_entries_in_last_block
)
458 return nr_full_blocks
+ (nr_entries_in_last_block
? 1 : 0);
464 static int shrink(struct resize
*resize
)
467 unsigned int begin
, end
;
468 struct dm_block
*block
;
469 struct array_block
*ab
;
472 * Lose some blocks from the back?
474 if (resize
->new_nr_full_blocks
< resize
->old_nr_full_blocks
) {
475 begin
= total_nr_blocks_needed(resize
->new_nr_full_blocks
,
476 resize
->new_nr_entries_in_last_block
);
477 end
= total_nr_blocks_needed(resize
->old_nr_full_blocks
,
478 resize
->old_nr_entries_in_last_block
);
480 r
= drop_blocks(resize
, begin
, end
);
486 * Trim the new tail block
488 if (resize
->new_nr_entries_in_last_block
) {
489 r
= shadow_ablock(resize
->info
, &resize
->root
,
490 resize
->new_nr_full_blocks
, &block
, &ab
);
494 trim_ablock(resize
->info
, ab
, resize
->new_nr_entries_in_last_block
);
495 unlock_ablock(resize
->info
, block
);
504 static int grow_extend_tail_block(struct resize
*resize
, uint32_t new_nr_entries
)
507 struct dm_block
*block
;
508 struct array_block
*ab
;
510 r
= shadow_ablock(resize
->info
, &resize
->root
,
511 resize
->old_nr_full_blocks
, &block
, &ab
);
515 fill_ablock(resize
->info
, ab
, resize
->value
, new_nr_entries
);
516 unlock_ablock(resize
->info
, block
);
521 static int grow_add_tail_block(struct resize
*resize
)
523 return insert_new_ablock(resize
->info
, resize
->size_of_block
,
525 resize
->new_nr_full_blocks
,
526 resize
->new_nr_entries_in_last_block
,
527 resize
->value
, &resize
->root
);
530 static int grow_needs_more_blocks(struct resize
*resize
)
533 unsigned int old_nr_blocks
= resize
->old_nr_full_blocks
;
535 if (resize
->old_nr_entries_in_last_block
> 0) {
538 r
= grow_extend_tail_block(resize
, resize
->max_entries
);
543 r
= insert_full_ablocks(resize
->info
, resize
->size_of_block
,
545 resize
->new_nr_full_blocks
,
546 resize
->max_entries
, resize
->value
,
551 if (resize
->new_nr_entries_in_last_block
)
552 r
= grow_add_tail_block(resize
);
557 static int grow(struct resize
*resize
)
559 if (resize
->new_nr_full_blocks
> resize
->old_nr_full_blocks
)
560 return grow_needs_more_blocks(resize
);
562 else if (resize
->old_nr_entries_in_last_block
)
563 return grow_extend_tail_block(resize
, resize
->new_nr_entries_in_last_block
);
566 return grow_add_tail_block(resize
);
569 /*----------------------------------------------------------------*/
572 * These are the value_type functions for the btree elements, which point
575 static void block_inc(void *context
, const void *value
, unsigned int count
)
577 const __le64
*block_le
= value
;
578 struct dm_array_info
*info
= context
;
581 for (i
= 0; i
< count
; i
++, block_le
++)
582 dm_tm_inc(info
->btree_info
.tm
, le64_to_cpu(*block_le
));
585 static void __block_dec(void *context
, const void *value
)
591 struct dm_block
*block
;
592 struct array_block
*ab
;
593 struct dm_array_info
*info
= context
;
595 memcpy(&block_le
, value
, sizeof(block_le
));
596 b
= le64_to_cpu(block_le
);
598 r
= dm_tm_ref(info
->btree_info
.tm
, b
, &ref_count
);
600 DMERR_LIMIT("couldn't get reference count for block %llu",
601 (unsigned long long) b
);
605 if (ref_count
== 1) {
607 * We're about to drop the last reference to this ablock.
608 * So we need to decrement the ref count of the contents.
610 r
= get_ablock(info
, b
, &block
, &ab
);
612 DMERR_LIMIT("couldn't get array block %llu",
613 (unsigned long long) b
);
617 dec_ablock_entries(info
, ab
);
618 unlock_ablock(info
, block
);
621 dm_tm_dec(info
->btree_info
.tm
, b
);
624 static void block_dec(void *context
, const void *value
, unsigned int count
)
628 for (i
= 0; i
< count
; i
++, value
+= sizeof(__le64
))
629 __block_dec(context
, value
);
632 static int block_equal(void *context
, const void *value1
, const void *value2
)
634 return !memcmp(value1
, value2
, sizeof(__le64
));
637 /*----------------------------------------------------------------*/
639 void dm_array_info_init(struct dm_array_info
*info
,
640 struct dm_transaction_manager
*tm
,
641 struct dm_btree_value_type
*vt
)
643 struct dm_btree_value_type
*bvt
= &info
->btree_info
.value_type
;
645 memcpy(&info
->value_type
, vt
, sizeof(info
->value_type
));
646 info
->btree_info
.tm
= tm
;
647 info
->btree_info
.levels
= 1;
650 bvt
->size
= sizeof(__le64
);
651 bvt
->inc
= block_inc
;
652 bvt
->dec
= block_dec
;
653 bvt
->equal
= block_equal
;
655 EXPORT_SYMBOL_GPL(dm_array_info_init
);
657 int dm_array_empty(struct dm_array_info
*info
, dm_block_t
*root
)
659 return dm_btree_empty(&info
->btree_info
, root
);
661 EXPORT_SYMBOL_GPL(dm_array_empty
);
663 static int array_resize(struct dm_array_info
*info
, dm_block_t root
,
664 uint32_t old_size
, uint32_t new_size
,
665 const void *value
, dm_block_t
*new_root
)
668 struct resize resize
;
670 if (old_size
== new_size
) {
677 resize
.size_of_block
= dm_bm_block_size(dm_tm_get_bm(info
->btree_info
.tm
));
678 resize
.max_entries
= calc_max_entries(info
->value_type
.size
,
679 resize
.size_of_block
);
681 resize
.old_nr_full_blocks
= old_size
/ resize
.max_entries
;
682 resize
.old_nr_entries_in_last_block
= old_size
% resize
.max_entries
;
683 resize
.new_nr_full_blocks
= new_size
/ resize
.max_entries
;
684 resize
.new_nr_entries_in_last_block
= new_size
% resize
.max_entries
;
685 resize
.value
= value
;
687 r
= ((new_size
> old_size
) ? grow
: shrink
)(&resize
);
691 *new_root
= resize
.root
;
695 int dm_array_resize(struct dm_array_info
*info
, dm_block_t root
,
696 uint32_t old_size
, uint32_t new_size
,
697 const void *value
, dm_block_t
*new_root
)
698 __dm_written_to_disk(value
)
700 int r
= array_resize(info
, root
, old_size
, new_size
, value
, new_root
);
702 __dm_unbless_for_disk(value
);
705 EXPORT_SYMBOL_GPL(dm_array_resize
);
707 static int populate_ablock_with_values(struct dm_array_info
*info
, struct array_block
*ab
,
708 value_fn fn
, void *context
,
709 unsigned int base
, unsigned int new_nr
)
713 struct dm_btree_value_type
*vt
= &info
->value_type
;
715 BUG_ON(le32_to_cpu(ab
->nr_entries
));
716 BUG_ON(new_nr
> le32_to_cpu(ab
->max_entries
));
718 for (i
= 0; i
< new_nr
; i
++) {
719 r
= fn(base
+ i
, element_at(info
, ab
, i
), context
);
724 vt
->inc(vt
->context
, element_at(info
, ab
, i
), 1);
727 ab
->nr_entries
= cpu_to_le32(new_nr
);
731 int dm_array_new(struct dm_array_info
*info
, dm_block_t
*root
,
732 uint32_t size
, value_fn fn
, void *context
)
735 struct dm_block
*block
;
736 struct array_block
*ab
;
737 unsigned int block_index
, end_block
, size_of_block
, max_entries
;
739 r
= dm_array_empty(info
, root
);
743 size_of_block
= dm_bm_block_size(dm_tm_get_bm(info
->btree_info
.tm
));
744 max_entries
= calc_max_entries(info
->value_type
.size
, size_of_block
);
745 end_block
= dm_div_up(size
, max_entries
);
747 for (block_index
= 0; block_index
!= end_block
; block_index
++) {
748 r
= alloc_ablock(info
, size_of_block
, max_entries
, &block
, &ab
);
752 r
= populate_ablock_with_values(info
, ab
, fn
, context
,
753 block_index
* max_entries
,
754 min(max_entries
, size
));
756 unlock_ablock(info
, block
);
760 r
= insert_ablock(info
, block_index
, block
, root
);
761 unlock_ablock(info
, block
);
770 EXPORT_SYMBOL_GPL(dm_array_new
);
772 int dm_array_del(struct dm_array_info
*info
, dm_block_t root
)
774 return dm_btree_del(&info
->btree_info
, root
);
776 EXPORT_SYMBOL_GPL(dm_array_del
);
778 int dm_array_get_value(struct dm_array_info
*info
, dm_block_t root
,
779 uint32_t index
, void *value_le
)
782 struct dm_block
*block
;
783 struct array_block
*ab
;
784 size_t size_of_block
;
785 unsigned int entry
, max_entries
;
787 size_of_block
= dm_bm_block_size(dm_tm_get_bm(info
->btree_info
.tm
));
788 max_entries
= calc_max_entries(info
->value_type
.size
, size_of_block
);
790 r
= lookup_ablock(info
, root
, index
/ max_entries
, &block
, &ab
);
794 entry
= index
% max_entries
;
795 if (entry
>= le32_to_cpu(ab
->nr_entries
))
798 memcpy(value_le
, element_at(info
, ab
, entry
),
799 info
->value_type
.size
);
801 unlock_ablock(info
, block
);
804 EXPORT_SYMBOL_GPL(dm_array_get_value
);
806 static int array_set_value(struct dm_array_info
*info
, dm_block_t root
,
807 uint32_t index
, const void *value
, dm_block_t
*new_root
)
810 struct dm_block
*block
;
811 struct array_block
*ab
;
812 size_t size_of_block
;
813 unsigned int max_entries
;
816 struct dm_btree_value_type
*vt
= &info
->value_type
;
818 size_of_block
= dm_bm_block_size(dm_tm_get_bm(info
->btree_info
.tm
));
819 max_entries
= calc_max_entries(info
->value_type
.size
, size_of_block
);
821 r
= shadow_ablock(info
, &root
, index
/ max_entries
, &block
, &ab
);
826 entry
= index
% max_entries
;
827 if (entry
>= le32_to_cpu(ab
->nr_entries
)) {
832 old_value
= element_at(info
, ab
, entry
);
834 (!vt
->equal
|| !vt
->equal(vt
->context
, old_value
, value
))) {
835 vt
->dec(vt
->context
, old_value
, 1);
837 vt
->inc(vt
->context
, value
, 1);
840 memcpy(old_value
, value
, info
->value_type
.size
);
843 unlock_ablock(info
, block
);
847 int dm_array_set_value(struct dm_array_info
*info
, dm_block_t root
,
848 uint32_t index
, const void *value
, dm_block_t
*new_root
)
849 __dm_written_to_disk(value
)
853 r
= array_set_value(info
, root
, index
, value
, new_root
);
854 __dm_unbless_for_disk(value
);
857 EXPORT_SYMBOL_GPL(dm_array_set_value
);
860 struct dm_array_info
*info
;
861 int (*fn
)(void *context
, uint64_t key
, void *leaf
);
865 static int walk_ablock(void *context
, uint64_t *keys
, void *leaf
)
867 struct walk_info
*wi
= context
;
872 unsigned int nr_entries
, max_entries
;
873 struct dm_block
*block
;
874 struct array_block
*ab
;
876 memcpy(&block_le
, leaf
, sizeof(block_le
));
877 r
= get_ablock(wi
->info
, le64_to_cpu(block_le
), &block
, &ab
);
881 max_entries
= le32_to_cpu(ab
->max_entries
);
882 nr_entries
= le32_to_cpu(ab
->nr_entries
);
883 for (i
= 0; i
< nr_entries
; i
++) {
884 r
= wi
->fn(wi
->context
, keys
[0] * max_entries
+ i
,
885 element_at(wi
->info
, ab
, i
));
891 unlock_ablock(wi
->info
, block
);
895 int dm_array_walk(struct dm_array_info
*info
, dm_block_t root
,
896 int (*fn
)(void *, uint64_t key
, void *leaf
),
903 wi
.context
= context
;
905 return dm_btree_walk(&info
->btree_info
, root
, walk_ablock
, &wi
);
907 EXPORT_SYMBOL_GPL(dm_array_walk
);
909 /*----------------------------------------------------------------*/
911 static int load_ablock(struct dm_array_cursor
*c
)
918 unlock_ablock(c
->info
, c
->block
);
924 r
= dm_btree_cursor_get_value(&c
->cursor
, &key
, &value_le
);
926 DMERR("dm_btree_cursor_get_value failed");
927 dm_btree_cursor_end(&c
->cursor
);
930 r
= get_ablock(c
->info
, le64_to_cpu(value_le
), &c
->block
, &c
->ab
);
932 DMERR("get_ablock failed");
933 dm_btree_cursor_end(&c
->cursor
);
940 int dm_array_cursor_begin(struct dm_array_info
*info
, dm_block_t root
,
941 struct dm_array_cursor
*c
)
945 memset(c
, 0, sizeof(*c
));
947 r
= dm_btree_cursor_begin(&info
->btree_info
, root
, true, &c
->cursor
);
949 DMERR("couldn't create btree cursor");
953 return load_ablock(c
);
955 EXPORT_SYMBOL_GPL(dm_array_cursor_begin
);
957 void dm_array_cursor_end(struct dm_array_cursor
*c
)
960 unlock_ablock(c
->info
, c
->block
);
961 dm_btree_cursor_end(&c
->cursor
);
964 EXPORT_SYMBOL_GPL(dm_array_cursor_end
);
966 int dm_array_cursor_next(struct dm_array_cursor
*c
)
975 if (c
->index
>= le32_to_cpu(c
->ab
->nr_entries
)) {
976 r
= dm_btree_cursor_next(&c
->cursor
);
987 EXPORT_SYMBOL_GPL(dm_array_cursor_next
);
989 int dm_array_cursor_skip(struct dm_array_cursor
*c
, uint32_t count
)
994 uint32_t remaining
= le32_to_cpu(c
->ab
->nr_entries
) - c
->index
;
996 if (count
< remaining
) {
1002 r
= dm_array_cursor_next(c
);
1008 EXPORT_SYMBOL_GPL(dm_array_cursor_skip
);
1010 void dm_array_cursor_get_value(struct dm_array_cursor
*c
, void **value_le
)
1012 *value_le
= element_at(c
->info
, c
->ab
, c
->index
);
1014 EXPORT_SYMBOL_GPL(dm_array_cursor_get_value
);
1016 /*----------------------------------------------------------------*/