btrfs-progs: tests/fuzz: Add fuzzed test image for btrfs check BUG_ON
[btrfs-progs-unstable/devel.git] / extent_io.c
blob198492699438f06a4e8bee5c7a39965a55c484d0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <sys/types.h>
22 #include <sys/stat.h>
23 #include <fcntl.h>
24 #include <unistd.h>
25 #include <stdbool.h>
26 #include "kerncompat.h"
27 #include "extent_io.h"
28 #include "list.h"
29 #include "ctree.h"
30 #include "volumes.h"
31 #include "utils.h"
32 #include "internal.h"
34 void extent_io_tree_init(struct extent_io_tree *tree)
36 cache_tree_init(&tree->state);
37 cache_tree_init(&tree->cache);
38 INIT_LIST_HEAD(&tree->lru);
39 tree->cache_size = 0;
40 tree->max_cache_size = (u64)total_memory() / 4;
43 void extent_io_tree_init_cache_max(struct extent_io_tree *tree,
44 u64 max_cache_size)
46 extent_io_tree_init(tree);
47 tree->max_cache_size = max_cache_size;
50 static struct extent_state *alloc_extent_state(void)
52 struct extent_state *state;
54 state = malloc(sizeof(*state));
55 if (!state)
56 return NULL;
57 state->cache_node.objectid = 0;
58 state->refs = 1;
59 state->state = 0;
60 state->xprivate = 0;
61 return state;
64 static void btrfs_free_extent_state(struct extent_state *state)
66 state->refs--;
67 BUG_ON(state->refs < 0);
68 if (state->refs == 0)
69 free(state);
72 static void free_extent_state_func(struct cache_extent *cache)
74 struct extent_state *es;
76 es = container_of(cache, struct extent_state, cache_node);
77 btrfs_free_extent_state(es);
80 static void free_extent_buffer_final(struct extent_buffer *eb);
81 void extent_io_tree_cleanup(struct extent_io_tree *tree)
83 struct extent_buffer *eb;
85 while(!list_empty(&tree->lru)) {
86 eb = list_entry(tree->lru.next, struct extent_buffer, lru);
87 if (eb->refs) {
88 fprintf(stderr,
89 "extent buffer leak: start %llu len %u\n",
90 (unsigned long long)eb->start, eb->len);
91 free_extent_buffer_nocache(eb);
92 } else {
93 free_extent_buffer_final(eb);
97 cache_tree_free_extents(&tree->state, free_extent_state_func);
100 static inline void update_extent_state(struct extent_state *state)
102 state->cache_node.start = state->start;
103 state->cache_node.size = state->end + 1 - state->start;
107 * Utility function to look for merge candidates inside a given range.
108 * Any extents with matching state are merged together into a single
109 * extent in the tree. Extents with EXTENT_IO in their state field are
110 * not merged
112 static int merge_state(struct extent_io_tree *tree,
113 struct extent_state *state)
115 struct extent_state *other;
116 struct cache_extent *other_node;
118 if (state->state & EXTENT_IOBITS)
119 return 0;
121 other_node = prev_cache_extent(&state->cache_node);
122 if (other_node) {
123 other = container_of(other_node, struct extent_state,
124 cache_node);
125 if (other->end == state->start - 1 &&
126 other->state == state->state) {
127 state->start = other->start;
128 update_extent_state(state);
129 remove_cache_extent(&tree->state, &other->cache_node);
130 btrfs_free_extent_state(other);
133 other_node = next_cache_extent(&state->cache_node);
134 if (other_node) {
135 other = container_of(other_node, struct extent_state,
136 cache_node);
137 if (other->start == state->end + 1 &&
138 other->state == state->state) {
139 other->start = state->start;
140 update_extent_state(other);
141 remove_cache_extent(&tree->state, &state->cache_node);
142 btrfs_free_extent_state(state);
145 return 0;
149 * insert an extent_state struct into the tree. 'bits' are set on the
150 * struct before it is inserted.
152 static int insert_state(struct extent_io_tree *tree,
153 struct extent_state *state, u64 start, u64 end,
154 int bits)
156 int ret;
158 BUG_ON(end < start);
159 state->state |= bits;
160 state->start = start;
161 state->end = end;
162 update_extent_state(state);
163 ret = insert_cache_extent(&tree->state, &state->cache_node);
164 BUG_ON(ret);
165 merge_state(tree, state);
166 return 0;
170 * split a given extent state struct in two, inserting the preallocated
171 * struct 'prealloc' as the newly created second half. 'split' indicates an
172 * offset inside 'orig' where it should be split.
174 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
175 struct extent_state *prealloc, u64 split)
177 int ret;
178 prealloc->start = orig->start;
179 prealloc->end = split - 1;
180 prealloc->state = orig->state;
181 update_extent_state(prealloc);
182 orig->start = split;
183 update_extent_state(orig);
184 ret = insert_cache_extent(&tree->state, &prealloc->cache_node);
185 BUG_ON(ret);
186 return 0;
190 * clear some bits on a range in the tree.
192 static int clear_state_bit(struct extent_io_tree *tree,
193 struct extent_state *state, int bits)
195 int ret = state->state & bits;
197 state->state &= ~bits;
198 if (state->state == 0) {
199 remove_cache_extent(&tree->state, &state->cache_node);
200 btrfs_free_extent_state(state);
201 } else {
202 merge_state(tree, state);
204 return ret;
208 * clear some bits on a range in the tree.
210 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
212 struct extent_state *state;
213 struct extent_state *prealloc = NULL;
214 struct cache_extent *node;
215 u64 last_end;
216 int err;
217 int set = 0;
219 again:
220 if (!prealloc) {
221 prealloc = alloc_extent_state();
222 if (!prealloc)
223 return -ENOMEM;
227 * this search will find the extents that end after
228 * our range starts
230 node = search_cache_extent(&tree->state, start);
231 if (!node)
232 goto out;
233 state = container_of(node, struct extent_state, cache_node);
234 if (state->start > end)
235 goto out;
236 last_end = state->end;
239 * | ---- desired range ---- |
240 * | state | or
241 * | ------------- state -------------- |
243 * We need to split the extent we found, and may flip
244 * bits on second half.
246 * If the extent we found extends past our range, we
247 * just split and search again. It'll get split again
248 * the next time though.
250 * If the extent we found is inside our range, we clear
251 * the desired bit on it.
253 if (state->start < start) {
254 err = split_state(tree, state, prealloc, start);
255 BUG_ON(err == -EEXIST);
256 prealloc = NULL;
257 if (err)
258 goto out;
259 if (state->end <= end) {
260 set |= clear_state_bit(tree, state, bits);
261 if (last_end == (u64)-1)
262 goto out;
263 start = last_end + 1;
264 } else {
265 start = state->start;
267 goto search_again;
270 * | ---- desired range ---- |
271 * | state |
272 * We need to split the extent, and clear the bit
273 * on the first half
275 if (state->start <= end && state->end > end) {
276 err = split_state(tree, state, prealloc, end + 1);
277 BUG_ON(err == -EEXIST);
279 set |= clear_state_bit(tree, prealloc, bits);
280 prealloc = NULL;
281 goto out;
284 start = state->end + 1;
285 set |= clear_state_bit(tree, state, bits);
286 if (last_end == (u64)-1)
287 goto out;
288 start = last_end + 1;
289 goto search_again;
290 out:
291 if (prealloc)
292 btrfs_free_extent_state(prealloc);
293 return set;
295 search_again:
296 if (start > end)
297 goto out;
298 goto again;
302 * set some bits on a range in the tree.
304 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
306 struct extent_state *state;
307 struct extent_state *prealloc = NULL;
308 struct cache_extent *node;
309 int err = 0;
310 u64 last_start;
311 u64 last_end;
312 again:
313 if (!prealloc) {
314 prealloc = alloc_extent_state();
315 if (!prealloc)
316 return -ENOMEM;
320 * this search will find the extents that end after
321 * our range starts
323 node = search_cache_extent(&tree->state, start);
324 if (!node) {
325 err = insert_state(tree, prealloc, start, end, bits);
326 BUG_ON(err == -EEXIST);
327 prealloc = NULL;
328 goto out;
331 state = container_of(node, struct extent_state, cache_node);
332 last_start = state->start;
333 last_end = state->end;
336 * | ---- desired range ---- |
337 * | state |
339 * Just lock what we found and keep going
341 if (state->start == start && state->end <= end) {
342 state->state |= bits;
343 merge_state(tree, state);
344 if (last_end == (u64)-1)
345 goto out;
346 start = last_end + 1;
347 goto search_again;
350 * | ---- desired range ---- |
351 * | state |
352 * or
353 * | ------------- state -------------- |
355 * We need to split the extent we found, and may flip bits on
356 * second half.
358 * If the extent we found extends past our
359 * range, we just split and search again. It'll get split
360 * again the next time though.
362 * If the extent we found is inside our range, we set the
363 * desired bit on it.
365 if (state->start < start) {
366 err = split_state(tree, state, prealloc, start);
367 BUG_ON(err == -EEXIST);
368 prealloc = NULL;
369 if (err)
370 goto out;
371 if (state->end <= end) {
372 state->state |= bits;
373 start = state->end + 1;
374 merge_state(tree, state);
375 if (last_end == (u64)-1)
376 goto out;
377 start = last_end + 1;
378 } else {
379 start = state->start;
381 goto search_again;
384 * | ---- desired range ---- |
385 * | state | or | state |
387 * There's a hole, we need to insert something in it and
388 * ignore the extent we found.
390 if (state->start > start) {
391 u64 this_end;
392 if (end < last_start)
393 this_end = end;
394 else
395 this_end = last_start -1;
396 err = insert_state(tree, prealloc, start, this_end,
397 bits);
398 BUG_ON(err == -EEXIST);
399 prealloc = NULL;
400 if (err)
401 goto out;
402 start = this_end + 1;
403 goto search_again;
406 * | ---- desired range ---- |
407 * | ---------- state ---------- |
408 * We need to split the extent, and set the bit
409 * on the first half
411 err = split_state(tree, state, prealloc, end + 1);
412 BUG_ON(err == -EEXIST);
414 state->state |= bits;
415 merge_state(tree, prealloc);
416 prealloc = NULL;
417 out:
418 if (prealloc)
419 btrfs_free_extent_state(prealloc);
420 return err;
421 search_again:
422 if (start > end)
423 goto out;
424 goto again;
427 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
429 return set_extent_bits(tree, start, end, EXTENT_DIRTY);
432 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
434 return clear_extent_bits(tree, start, end, EXTENT_DIRTY);
437 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
438 u64 *start_ret, u64 *end_ret, int bits)
440 struct cache_extent *node;
441 struct extent_state *state;
442 int ret = 1;
445 * this search will find all the extents that end after
446 * our range starts.
448 node = search_cache_extent(&tree->state, start);
449 if (!node)
450 goto out;
452 while(1) {
453 state = container_of(node, struct extent_state, cache_node);
454 if (state->end >= start && (state->state & bits)) {
455 *start_ret = state->start;
456 *end_ret = state->end;
457 ret = 0;
458 break;
460 node = next_cache_extent(node);
461 if (!node)
462 break;
464 out:
465 return ret;
468 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
469 int bits, int filled)
471 struct extent_state *state = NULL;
472 struct cache_extent *node;
473 int bitset = 0;
475 node = search_cache_extent(&tree->state, start);
476 while (node && start <= end) {
477 state = container_of(node, struct extent_state, cache_node);
479 if (filled && state->start > start) {
480 bitset = 0;
481 break;
483 if (state->start > end)
484 break;
485 if (state->state & bits) {
486 bitset = 1;
487 if (!filled)
488 break;
489 } else if (filled) {
490 bitset = 0;
491 break;
493 start = state->end + 1;
494 if (start > end)
495 break;
496 node = next_cache_extent(node);
497 if (!node) {
498 if (filled)
499 bitset = 0;
500 break;
503 return bitset;
506 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
508 struct cache_extent *node;
509 struct extent_state *state;
510 int ret = 0;
512 node = search_cache_extent(&tree->state, start);
513 if (!node) {
514 ret = -ENOENT;
515 goto out;
517 state = container_of(node, struct extent_state, cache_node);
518 if (state->start != start) {
519 ret = -ENOENT;
520 goto out;
522 state->xprivate = private;
523 out:
524 return ret;
527 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
529 struct cache_extent *node;
530 struct extent_state *state;
531 int ret = 0;
533 node = search_cache_extent(&tree->state, start);
534 if (!node) {
535 ret = -ENOENT;
536 goto out;
538 state = container_of(node, struct extent_state, cache_node);
539 if (state->start != start) {
540 ret = -ENOENT;
541 goto out;
543 *private = state->xprivate;
544 out:
545 return ret;
548 static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info,
549 u64 bytenr, u32 blocksize)
551 struct extent_buffer *eb;
553 eb = calloc(1, sizeof(struct extent_buffer) + blocksize);
554 if (!eb)
555 return NULL;
557 eb->start = bytenr;
558 eb->len = blocksize;
559 eb->refs = 1;
560 eb->flags = 0;
561 eb->fd = -1;
562 eb->dev_bytenr = (u64)-1;
563 eb->cache_node.start = bytenr;
564 eb->cache_node.size = blocksize;
565 eb->fs_info = info;
566 eb->tree = &info->extent_cache;
567 INIT_LIST_HEAD(&eb->recow);
568 INIT_LIST_HEAD(&eb->lru);
570 return eb;
573 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
575 struct extent_buffer *new;
577 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
578 if (!new)
579 return NULL;
580 /* cloned eb is not linked into fs_info->extent_cache */
581 new->tree = NULL;
583 copy_extent_buffer(new, src, 0, 0, src->len);
584 new->flags |= EXTENT_BUFFER_DUMMY;
586 return new;
589 static void free_extent_buffer_final(struct extent_buffer *eb)
591 struct extent_io_tree *tree = eb->tree;
593 BUG_ON(eb->refs);
594 BUG_ON(tree && tree->cache_size < eb->len);
595 list_del_init(&eb->lru);
596 if (!(eb->flags & EXTENT_BUFFER_DUMMY)) {
597 remove_cache_extent(&tree->cache, &eb->cache_node);
598 tree->cache_size -= eb->len;
600 free(eb);
603 static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now)
605 if (!eb || IS_ERR(eb))
606 return;
608 eb->refs--;
609 BUG_ON(eb->refs < 0);
610 if (eb->refs == 0) {
611 BUG_ON(eb->flags & EXTENT_DIRTY);
612 list_del_init(&eb->recow);
613 if (eb->flags & EXTENT_BUFFER_DUMMY || free_now)
614 free_extent_buffer_final(eb);
618 void free_extent_buffer(struct extent_buffer *eb)
620 free_extent_buffer_internal(eb, 0);
623 void free_extent_buffer_nocache(struct extent_buffer *eb)
625 free_extent_buffer_internal(eb, 1);
628 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
629 u64 bytenr, u32 blocksize)
631 struct extent_buffer *eb = NULL;
632 struct cache_extent *cache;
634 cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
635 if (cache && cache->start == bytenr &&
636 cache->size == blocksize) {
637 eb = container_of(cache, struct extent_buffer, cache_node);
638 list_move_tail(&eb->lru, &tree->lru);
639 eb->refs++;
641 return eb;
644 struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree,
645 u64 start)
647 struct extent_buffer *eb = NULL;
648 struct cache_extent *cache;
650 cache = search_cache_extent(&tree->cache, start);
651 if (cache) {
652 eb = container_of(cache, struct extent_buffer, cache_node);
653 list_move_tail(&eb->lru, &tree->lru);
654 eb->refs++;
656 return eb;
659 static void trim_extent_buffer_cache(struct extent_io_tree *tree)
661 struct extent_buffer *eb, *tmp;
663 list_for_each_entry_safe(eb, tmp, &tree->lru, lru) {
664 if (eb->refs == 0)
665 free_extent_buffer_final(eb);
666 if (tree->cache_size <= ((tree->max_cache_size * 9) / 10))
667 break;
671 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
672 u64 bytenr, u32 blocksize)
674 struct extent_buffer *eb;
675 struct extent_io_tree *tree = &fs_info->extent_cache;
676 struct cache_extent *cache;
678 cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
679 if (cache && cache->start == bytenr &&
680 cache->size == blocksize) {
681 eb = container_of(cache, struct extent_buffer, cache_node);
682 list_move_tail(&eb->lru, &tree->lru);
683 eb->refs++;
684 } else {
685 int ret;
687 if (cache) {
688 eb = container_of(cache, struct extent_buffer,
689 cache_node);
690 free_extent_buffer(eb);
692 eb = __alloc_extent_buffer(fs_info, bytenr, blocksize);
693 if (!eb)
694 return NULL;
695 ret = insert_cache_extent(&tree->cache, &eb->cache_node);
696 if (ret) {
697 free(eb);
698 return NULL;
700 list_add_tail(&eb->lru, &tree->lru);
701 tree->cache_size += blocksize;
702 if (tree->cache_size >= tree->max_cache_size)
703 trim_extent_buffer_cache(tree);
705 return eb;
708 int read_extent_from_disk(struct extent_buffer *eb,
709 unsigned long offset, unsigned long len)
711 int ret;
712 ret = pread(eb->fd, eb->data + offset, len, eb->dev_bytenr);
713 if (ret < 0) {
714 ret = -errno;
715 goto out;
717 if (ret != len) {
718 ret = -EIO;
719 goto out;
721 ret = 0;
722 out:
723 return ret;
726 int write_extent_to_disk(struct extent_buffer *eb)
728 int ret;
729 ret = pwrite(eb->fd, eb->data, eb->len, eb->dev_bytenr);
730 if (ret < 0)
731 goto out;
732 if (ret != eb->len) {
733 ret = -EIO;
734 goto out;
736 ret = 0;
737 out:
738 return ret;
741 int read_data_from_disk(struct btrfs_fs_info *info, void *buf, u64 offset,
742 u64 bytes, int mirror)
744 struct btrfs_multi_bio *multi = NULL;
745 struct btrfs_device *device;
746 u64 bytes_left = bytes;
747 u64 read_len;
748 u64 total_read = 0;
749 int ret;
751 while (bytes_left) {
752 read_len = bytes_left;
753 ret = btrfs_map_block(info, READ, offset, &read_len, &multi,
754 mirror, NULL);
755 if (ret) {
756 fprintf(stderr, "Couldn't map the block %Lu\n",
757 offset);
758 return -EIO;
760 device = multi->stripes[0].dev;
762 read_len = min(bytes_left, read_len);
763 if (device->fd <= 0) {
764 kfree(multi);
765 return -EIO;
768 ret = pread(device->fd, buf + total_read, read_len,
769 multi->stripes[0].physical);
770 kfree(multi);
771 if (ret < 0) {
772 fprintf(stderr, "Error reading %Lu, %d\n", offset,
773 ret);
774 return ret;
776 if (ret != read_len) {
777 fprintf(stderr, "Short read for %Lu, read %d, "
778 "read_len %Lu\n", offset, ret, read_len);
779 return -EIO;
782 bytes_left -= read_len;
783 offset += read_len;
784 total_read += read_len;
787 return 0;
790 int write_data_to_disk(struct btrfs_fs_info *info, void *buf, u64 offset,
791 u64 bytes, int mirror)
793 struct btrfs_multi_bio *multi = NULL;
794 struct btrfs_device *device;
795 u64 bytes_left = bytes;
796 u64 this_len;
797 u64 total_write = 0;
798 u64 *raid_map = NULL;
799 u64 dev_bytenr;
800 int dev_nr;
801 int ret = 0;
803 while (bytes_left > 0) {
804 this_len = bytes_left;
805 dev_nr = 0;
807 ret = btrfs_map_block(info, WRITE, offset, &this_len, &multi,
808 mirror, &raid_map);
809 if (ret) {
810 fprintf(stderr, "Couldn't map the block %Lu\n",
811 offset);
812 return -EIO;
815 if (raid_map) {
816 struct extent_buffer *eb;
817 u64 stripe_len = this_len;
819 this_len = min(this_len, bytes_left);
820 this_len = min(this_len, (u64)info->nodesize);
822 eb = malloc(sizeof(struct extent_buffer) + this_len);
823 if (!eb) {
824 fprintf(stderr, "cannot allocate memory for eb\n");
825 ret = -ENOMEM;
826 goto out;
829 memset(eb, 0, sizeof(struct extent_buffer) + this_len);
830 eb->start = offset;
831 eb->len = this_len;
833 memcpy(eb->data, buf + total_write, this_len);
834 ret = write_raid56_with_parity(info, eb, multi,
835 stripe_len, raid_map);
836 BUG_ON(ret);
838 free(eb);
839 kfree(raid_map);
840 raid_map = NULL;
841 } else while (dev_nr < multi->num_stripes) {
842 device = multi->stripes[dev_nr].dev;
843 if (device->fd <= 0) {
844 kfree(multi);
845 return -EIO;
848 dev_bytenr = multi->stripes[dev_nr].physical;
849 this_len = min(this_len, bytes_left);
850 dev_nr++;
852 ret = pwrite(device->fd, buf + total_write, this_len, dev_bytenr);
853 if (ret != this_len) {
854 if (ret < 0) {
855 fprintf(stderr, "Error writing to "
856 "device %d\n", errno);
857 ret = errno;
858 kfree(multi);
859 return ret;
860 } else {
861 fprintf(stderr, "Short write\n");
862 kfree(multi);
863 return -EIO;
868 BUG_ON(bytes_left < this_len);
870 bytes_left -= this_len;
871 offset += this_len;
872 total_write += this_len;
874 kfree(multi);
875 multi = NULL;
877 return 0;
879 out:
880 kfree(raid_map);
881 return ret;
884 int set_extent_buffer_dirty(struct extent_buffer *eb)
886 struct extent_io_tree *tree = eb->tree;
887 if (!(eb->flags & EXTENT_DIRTY)) {
888 eb->flags |= EXTENT_DIRTY;
889 set_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
890 extent_buffer_get(eb);
892 return 0;
895 int clear_extent_buffer_dirty(struct extent_buffer *eb)
897 struct extent_io_tree *tree = eb->tree;
898 if (eb->flags & EXTENT_DIRTY) {
899 eb->flags &= ~EXTENT_DIRTY;
900 clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
901 free_extent_buffer(eb);
903 return 0;
906 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
907 unsigned long start, unsigned long len)
909 return memcmp(eb->data + start, ptrv, len);
912 void read_extent_buffer(struct extent_buffer *eb, void *dst,
913 unsigned long start, unsigned long len)
915 memcpy(dst, eb->data + start, len);
918 void write_extent_buffer(struct extent_buffer *eb, const void *src,
919 unsigned long start, unsigned long len)
921 memcpy(eb->data + start, src, len);
924 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
925 unsigned long dst_offset, unsigned long src_offset,
926 unsigned long len)
928 memcpy(dst->data + dst_offset, src->data + src_offset, len);
931 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
932 unsigned long src_offset, unsigned long len)
934 memmove(dst->data + dst_offset, dst->data + src_offset, len);
937 void memset_extent_buffer(struct extent_buffer *eb, char c,
938 unsigned long start, unsigned long len)
940 memset(eb->data + start, c, len);
943 int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
944 unsigned long nr)
946 return le_test_bit(nr, (u8 *)eb->data + start);