btrfs-progs: docs: enhance manual page for btrfstune
[btrfs-progs-unstable/devel.git] / free-space-cache.c
blob19ab0c904a7124fcdae8a2f606645416401b39ec
1 /*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
20 #include "ctree.h"
21 #include "free-space-cache.h"
22 #include "transaction.h"
23 #include "disk-io.h"
24 #include "extent_io.h"
25 #include "crc32c.h"
26 #include "bitops.h"
29 * Kernel always uses PAGE_CACHE_SIZE for sectorsize, but we don't have
30 * anything like that in userspace and have to get the value from the
31 * filesystem
33 #define BITS_PER_BITMAP(sectorsize) ((sectorsize) * 8)
34 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
36 static int link_free_space(struct btrfs_free_space_ctl *ctl,
37 struct btrfs_free_space *info);
38 static void merge_space_tree(struct btrfs_free_space_ctl *ctl);
40 struct io_ctl {
41 void *cur, *orig;
42 void *buffer;
43 struct btrfs_root *root;
44 unsigned long size;
45 u64 total_size;
46 int index;
47 int num_pages;
48 unsigned check_crcs:1;
51 static int io_ctl_init(struct io_ctl *io_ctl, u64 size, u64 ino,
52 struct btrfs_root *root)
54 memset(io_ctl, 0, sizeof(struct io_ctl));
55 io_ctl->num_pages = (size + root->sectorsize - 1) / root->sectorsize;
56 io_ctl->buffer = kzalloc(size, GFP_NOFS);
57 if (!io_ctl->buffer)
58 return -ENOMEM;
59 io_ctl->total_size = size;
60 io_ctl->root = root;
61 if (ino != BTRFS_FREE_INO_OBJECTID)
62 io_ctl->check_crcs = 1;
63 return 0;
66 static void io_ctl_free(struct io_ctl *io_ctl)
68 kfree(io_ctl->buffer);
71 static void io_ctl_unmap_page(struct io_ctl *io_ctl)
73 if (io_ctl->cur) {
74 io_ctl->cur = NULL;
75 io_ctl->orig = NULL;
79 static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
81 BUG_ON(io_ctl->index >= io_ctl->num_pages);
82 io_ctl->cur = io_ctl->buffer + (io_ctl->index++ * io_ctl->root->sectorsize);
83 io_ctl->orig = io_ctl->cur;
84 io_ctl->size = io_ctl->root->sectorsize;
85 if (clear)
86 memset(io_ctl->cur, 0, io_ctl->root->sectorsize);
89 static void io_ctl_drop_pages(struct io_ctl *io_ctl)
91 io_ctl_unmap_page(io_ctl);
94 static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct btrfs_root *root,
95 struct btrfs_path *path, u64 ino)
97 struct extent_buffer *leaf;
98 struct btrfs_file_extent_item *fi;
99 struct btrfs_key key;
100 u64 bytenr, len;
101 u64 total_read = 0;
102 int ret = 0;
104 key.objectid = ino;
105 key.type = BTRFS_EXTENT_DATA_KEY;
106 key.offset = 0;
108 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
109 if (ret) {
110 fprintf(stderr,
111 "Couldn't find file extent item for free space inode"
112 " %Lu\n", ino);
113 btrfs_release_path(path);
114 return -EINVAL;
117 while (total_read < io_ctl->total_size) {
118 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
119 ret = btrfs_next_leaf(root, path);
120 if (ret) {
121 ret = -EINVAL;
122 break;
125 leaf = path->nodes[0];
127 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
128 if (key.objectid != ino) {
129 ret = -EINVAL;
130 break;
133 if (key.type != BTRFS_EXTENT_DATA_KEY) {
134 ret = -EINVAL;
135 break;
138 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
139 struct btrfs_file_extent_item);
140 if (btrfs_file_extent_type(path->nodes[0], fi) !=
141 BTRFS_FILE_EXTENT_REG) {
142 fprintf(stderr, "Not the file extent type we wanted\n");
143 ret = -EINVAL;
144 break;
147 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi) +
148 btrfs_file_extent_offset(leaf, fi);
149 len = btrfs_file_extent_num_bytes(leaf, fi);
150 ret = read_data_from_disk(root->fs_info,
151 io_ctl->buffer + key.offset, bytenr,
152 len, 0);
153 if (ret)
154 break;
155 total_read += len;
156 path->slots[0]++;
159 btrfs_release_path(path);
160 return ret;
163 static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
165 __le64 *gen;
168 * Skip the crc area. If we don't check crcs then we just have a 64bit
169 * chunk at the front of the first page.
171 if (io_ctl->check_crcs) {
172 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
173 io_ctl->size -= sizeof(u64) +
174 (sizeof(u32) * io_ctl->num_pages);
175 } else {
176 io_ctl->cur += sizeof(u64);
177 io_ctl->size -= sizeof(u64) * 2;
180 gen = io_ctl->cur;
181 if (le64_to_cpu(*gen) != generation) {
182 printk("btrfs: space cache generation "
183 "(%Lu) does not match inode (%Lu)\n", *gen,
184 generation);
185 io_ctl_unmap_page(io_ctl);
186 return -EIO;
188 io_ctl->cur += sizeof(u64);
189 return 0;
192 static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
194 u32 *tmp, val;
195 u32 crc = ~(u32)0;
196 unsigned offset = 0;
198 if (!io_ctl->check_crcs) {
199 io_ctl_map_page(io_ctl, 0);
200 return 0;
203 if (index == 0)
204 offset = sizeof(u32) * io_ctl->num_pages;
206 tmp = io_ctl->buffer;
207 tmp += index;
208 val = *tmp;
210 io_ctl_map_page(io_ctl, 0);
211 crc = crc32c(crc, io_ctl->orig + offset, io_ctl->root->sectorsize - offset);
212 btrfs_csum_final(crc, (char *)&crc);
213 if (val != crc) {
214 printk("btrfs: csum mismatch on free space cache\n");
215 io_ctl_unmap_page(io_ctl);
216 return -EIO;
219 return 0;
222 static int io_ctl_read_entry(struct io_ctl *io_ctl,
223 struct btrfs_free_space *entry, u8 *type)
225 struct btrfs_free_space_entry *e;
226 int ret;
228 if (!io_ctl->cur) {
229 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
230 if (ret)
231 return ret;
234 e = io_ctl->cur;
235 entry->offset = le64_to_cpu(e->offset);
236 entry->bytes = le64_to_cpu(e->bytes);
237 *type = e->type;
238 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
239 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
241 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
242 return 0;
244 io_ctl_unmap_page(io_ctl);
246 return 0;
249 static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
250 struct btrfs_free_space *entry)
252 int ret;
254 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
255 if (ret)
256 return ret;
258 memcpy(entry->bitmap, io_ctl->cur, io_ctl->root->sectorsize);
259 io_ctl_unmap_page(io_ctl);
261 return 0;
265 static int __load_free_space_cache(struct btrfs_root *root,
266 struct btrfs_free_space_ctl *ctl,
267 struct btrfs_path *path, u64 offset)
269 struct btrfs_free_space_header *header;
270 struct btrfs_inode_item *inode_item;
271 struct extent_buffer *leaf;
272 struct io_ctl io_ctl;
273 struct btrfs_key key;
274 struct btrfs_key inode_location;
275 struct btrfs_disk_key disk_key;
276 struct btrfs_free_space *e, *n;
277 struct list_head bitmaps;
278 u64 num_entries;
279 u64 num_bitmaps;
280 u64 generation;
281 u64 inode_size;
282 u8 type;
283 int ret = 0;
285 INIT_LIST_HEAD(&bitmaps);
287 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
288 key.offset = offset;
289 key.type = 0;
291 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
292 if (ret < 0) {
293 return 0;
294 } else if (ret > 0) {
295 btrfs_release_path(path);
296 return 0;
299 leaf = path->nodes[0];
300 header = btrfs_item_ptr(leaf, path->slots[0],
301 struct btrfs_free_space_header);
302 num_entries = btrfs_free_space_entries(leaf, header);
303 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
304 generation = btrfs_free_space_generation(leaf, header);
305 btrfs_free_space_key(leaf, header, &disk_key);
306 btrfs_disk_key_to_cpu(&inode_location, &disk_key);
307 btrfs_release_path(path);
309 ret = btrfs_search_slot(NULL, root, &inode_location, path, 0, 0);
310 if (ret) {
311 fprintf(stderr, "Couldn't find free space inode %d\n", ret);
312 return 0;
315 leaf = path->nodes[0];
316 inode_item = btrfs_item_ptr(leaf, path->slots[0],
317 struct btrfs_inode_item);
319 inode_size = btrfs_inode_size(leaf, inode_item);
320 if (!inode_size || !btrfs_inode_generation(leaf, inode_item)) {
321 btrfs_release_path(path);
322 return 0;
325 if (btrfs_inode_generation(leaf, inode_item) != generation) {
326 fprintf(stderr,
327 "free space inode generation (%llu) did not match "
328 "free space cache generation (%llu)\n",
329 (unsigned long long)btrfs_inode_generation(leaf,
330 inode_item),
331 (unsigned long long)generation);
332 btrfs_release_path(path);
333 return 0;
336 btrfs_release_path(path);
338 if (!num_entries)
339 return 0;
341 ret = io_ctl_init(&io_ctl, inode_size, inode_location.objectid, root);
342 if (ret)
343 return ret;
345 ret = io_ctl_prepare_pages(&io_ctl, root, path,
346 inode_location.objectid);
347 if (ret)
348 goto out;
350 ret = io_ctl_check_crc(&io_ctl, 0);
351 if (ret)
352 goto free_cache;
354 ret = io_ctl_check_generation(&io_ctl, generation);
355 if (ret)
356 goto free_cache;
358 while (num_entries) {
359 e = calloc(1, sizeof(*e));
360 if (!e)
361 goto free_cache;
363 ret = io_ctl_read_entry(&io_ctl, e, &type);
364 if (ret) {
365 free(e);
366 goto free_cache;
369 if (!e->bytes) {
370 free(e);
371 goto free_cache;
374 if (type == BTRFS_FREE_SPACE_EXTENT) {
375 ret = link_free_space(ctl, e);
376 if (ret) {
377 fprintf(stderr,
378 "Duplicate entries in free space cache\n");
379 free(e);
380 goto free_cache;
382 } else {
383 BUG_ON(!num_bitmaps);
384 num_bitmaps--;
385 e->bitmap = kzalloc(ctl->sectorsize, GFP_NOFS);
386 if (!e->bitmap) {
387 free(e);
388 goto free_cache;
390 ret = link_free_space(ctl, e);
391 ctl->total_bitmaps++;
392 if (ret) {
393 fprintf(stderr,
394 "Duplicate entries in free space cache\n");
395 free(e->bitmap);
396 free(e);
397 goto free_cache;
399 list_add_tail(&e->list, &bitmaps);
402 num_entries--;
405 io_ctl_unmap_page(&io_ctl);
408 * We add the bitmaps at the end of the entries in order that
409 * the bitmap entries are added to the cache.
411 list_for_each_entry_safe(e, n, &bitmaps, list) {
412 list_del_init(&e->list);
413 ret = io_ctl_read_bitmap(&io_ctl, e);
414 if (ret)
415 goto free_cache;
418 io_ctl_drop_pages(&io_ctl);
419 merge_space_tree(ctl);
420 ret = 1;
421 out:
422 io_ctl_free(&io_ctl);
423 return ret;
424 free_cache:
425 io_ctl_drop_pages(&io_ctl);
426 __btrfs_remove_free_space_cache(ctl);
427 goto out;
430 int load_free_space_cache(struct btrfs_fs_info *fs_info,
431 struct btrfs_block_group_cache *block_group)
433 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
434 struct btrfs_path *path;
435 u64 used = btrfs_block_group_used(&block_group->item);
436 int ret = 0;
437 int matched;
439 path = btrfs_alloc_path();
440 if (!path)
441 return 0;
443 ret = __load_free_space_cache(fs_info->tree_root, ctl, path,
444 block_group->key.objectid);
445 btrfs_free_path(path);
447 matched = (ctl->free_space == (block_group->key.offset - used -
448 block_group->bytes_super));
449 if (ret == 1 && !matched) {
450 __btrfs_remove_free_space_cache(ctl);
451 fprintf(stderr,
452 "block group %llu has wrong amount of free space\n",
453 block_group->key.objectid);
454 ret = -1;
457 if (ret < 0) {
458 ret = 0;
460 fprintf(stderr,
461 "failed to load free space cache for block group %llu\n",
462 block_group->key.objectid);
465 return ret;
468 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
469 u64 offset)
471 BUG_ON(offset < bitmap_start);
472 offset -= bitmap_start;
473 return (unsigned long)(offset / unit);
476 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
478 return (unsigned long)(bytes / unit);
481 static int tree_insert_offset(struct rb_root *root, u64 offset,
482 struct rb_node *node, int bitmap)
484 struct rb_node **p = &root->rb_node;
485 struct rb_node *parent = NULL;
486 struct btrfs_free_space *info;
488 while (*p) {
489 parent = *p;
490 info = rb_entry(parent, struct btrfs_free_space, offset_index);
492 if (offset < info->offset) {
493 p = &(*p)->rb_left;
494 } else if (offset > info->offset) {
495 p = &(*p)->rb_right;
496 } else {
498 * we could have a bitmap entry and an extent entry
499 * share the same offset. If this is the case, we want
500 * the extent entry to always be found first if we do a
501 * linear search through the tree, since we want to have
502 * the quickest allocation time, and allocating from an
503 * extent is faster than allocating from a bitmap. So
504 * if we're inserting a bitmap and we find an entry at
505 * this offset, we want to go right, or after this entry
506 * logically. If we are inserting an extent and we've
507 * found a bitmap, we want to go left, or before
508 * logically.
510 if (bitmap) {
511 if (info->bitmap)
512 return -EEXIST;
513 p = &(*p)->rb_right;
514 } else {
515 if (!info->bitmap)
516 return -EEXIST;
517 p = &(*p)->rb_left;
522 rb_link_node(node, parent, p);
523 rb_insert_color(node, root);
525 return 0;
529 * searches the tree for the given offset.
531 * fuzzy - If this is set, then we are trying to make an allocation, and we just
532 * want a section that has at least bytes size and comes at or after the given
533 * offset.
535 static struct btrfs_free_space *
536 tree_search_offset(struct btrfs_free_space_ctl *ctl,
537 u64 offset, int bitmap_only, int fuzzy)
539 struct rb_node *n = ctl->free_space_offset.rb_node;
540 struct btrfs_free_space *entry, *prev = NULL;
541 u32 sectorsize = ctl->sectorsize;
543 /* find entry that is closest to the 'offset' */
544 while (1) {
545 if (!n) {
546 entry = NULL;
547 break;
550 entry = rb_entry(n, struct btrfs_free_space, offset_index);
551 prev = entry;
553 if (offset < entry->offset)
554 n = n->rb_left;
555 else if (offset > entry->offset)
556 n = n->rb_right;
557 else
558 break;
561 if (bitmap_only) {
562 if (!entry)
563 return NULL;
564 if (entry->bitmap)
565 return entry;
568 * bitmap entry and extent entry may share same offset,
569 * in that case, bitmap entry comes after extent entry.
571 n = rb_next(n);
572 if (!n)
573 return NULL;
574 entry = rb_entry(n, struct btrfs_free_space, offset_index);
575 if (entry->offset != offset)
576 return NULL;
578 WARN_ON(!entry->bitmap);
579 return entry;
580 } else if (entry) {
581 if (entry->bitmap) {
583 * if previous extent entry covers the offset,
584 * we should return it instead of the bitmap entry
586 n = rb_prev(&entry->offset_index);
587 if (n) {
588 prev = rb_entry(n, struct btrfs_free_space,
589 offset_index);
590 if (!prev->bitmap &&
591 prev->offset + prev->bytes > offset)
592 entry = prev;
595 return entry;
598 if (!prev)
599 return NULL;
601 /* find last entry before the 'offset' */
602 entry = prev;
603 if (entry->offset > offset) {
604 n = rb_prev(&entry->offset_index);
605 if (n) {
606 entry = rb_entry(n, struct btrfs_free_space,
607 offset_index);
608 BUG_ON(entry->offset > offset);
609 } else {
610 if (fuzzy)
611 return entry;
612 else
613 return NULL;
617 if (entry->bitmap) {
618 n = rb_prev(&entry->offset_index);
619 if (n) {
620 prev = rb_entry(n, struct btrfs_free_space,
621 offset_index);
622 if (!prev->bitmap &&
623 prev->offset + prev->bytes > offset)
624 return prev;
626 if (entry->offset + BITS_PER_BITMAP(sectorsize) * ctl->unit > offset)
627 return entry;
628 } else if (entry->offset + entry->bytes > offset)
629 return entry;
631 if (!fuzzy)
632 return NULL;
634 while (1) {
635 if (entry->bitmap) {
636 if (entry->offset + BITS_PER_BITMAP(sectorsize) *
637 ctl->unit > offset)
638 break;
639 } else {
640 if (entry->offset + entry->bytes > offset)
641 break;
644 n = rb_next(&entry->offset_index);
645 if (!n)
646 return NULL;
647 entry = rb_entry(n, struct btrfs_free_space, offset_index);
649 return entry;
652 void unlink_free_space(struct btrfs_free_space_ctl *ctl,
653 struct btrfs_free_space *info)
655 rb_erase(&info->offset_index, &ctl->free_space_offset);
656 ctl->free_extents--;
657 ctl->free_space -= info->bytes;
660 static int link_free_space(struct btrfs_free_space_ctl *ctl,
661 struct btrfs_free_space *info)
663 int ret = 0;
665 BUG_ON(!info->bitmap && !info->bytes);
666 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
667 &info->offset_index, (info->bitmap != NULL));
668 if (ret)
669 return ret;
671 ctl->free_space += info->bytes;
672 ctl->free_extents++;
673 return ret;
676 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
677 struct btrfs_free_space *bitmap_info, u64 *offset,
678 u64 *bytes)
680 unsigned long found_bits = 0;
681 unsigned long bits, i;
682 unsigned long next_zero;
683 u32 sectorsize = ctl->sectorsize;
685 i = offset_to_bit(bitmap_info->offset, ctl->unit,
686 max_t(u64, *offset, bitmap_info->offset));
687 bits = bytes_to_bits(*bytes, ctl->unit);
689 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP(sectorsize)) {
690 next_zero = find_next_zero_bit(bitmap_info->bitmap,
691 BITS_PER_BITMAP(sectorsize), i);
692 if ((next_zero - i) >= bits) {
693 found_bits = next_zero - i;
694 break;
696 i = next_zero;
699 if (found_bits) {
700 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
701 *bytes = (u64)(found_bits) * ctl->unit;
702 return 0;
705 return -1;
708 struct btrfs_free_space *
709 btrfs_find_free_space(struct btrfs_free_space_ctl *ctl, u64 offset, u64 bytes)
711 return tree_search_offset(ctl, offset, 0, 0);
714 static void try_merge_free_space(struct btrfs_free_space_ctl *ctl,
715 struct btrfs_free_space *info)
717 struct btrfs_free_space *left_info;
718 struct btrfs_free_space *right_info;
719 u64 offset = info->offset;
720 u64 bytes = info->bytes;
723 * first we want to see if there is free space adjacent to the range we
724 * are adding, if there is remove that struct and add a new one to
725 * cover the entire range
727 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
728 if (right_info && rb_prev(&right_info->offset_index))
729 left_info = rb_entry(rb_prev(&right_info->offset_index),
730 struct btrfs_free_space, offset_index);
731 else
732 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
734 if (right_info && !right_info->bitmap) {
735 unlink_free_space(ctl, right_info);
736 info->bytes += right_info->bytes;
737 free(right_info);
740 if (left_info && !left_info->bitmap &&
741 left_info->offset + left_info->bytes == offset) {
742 unlink_free_space(ctl, left_info);
743 info->offset = left_info->offset;
744 info->bytes += left_info->bytes;
745 free(left_info);
749 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
750 u64 bytes)
752 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
753 struct btrfs_free_space *info;
754 struct rb_node *n;
755 int count = 0;
757 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
758 info = rb_entry(n, struct btrfs_free_space, offset_index);
759 if (info->bytes >= bytes && !block_group->ro)
760 count++;
761 printk("entry offset %llu, bytes %llu, bitmap %s\n",
762 (unsigned long long)info->offset,
763 (unsigned long long)info->bytes,
764 (info->bitmap) ? "yes" : "no");
766 printk("%d blocks of free space at or bigger than bytes is \n", count);
769 int btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group,
770 int sectorsize)
772 struct btrfs_free_space_ctl *ctl;
774 ctl = calloc(1, sizeof(*ctl));
775 if (!ctl)
776 return -ENOMEM;
778 ctl->sectorsize = sectorsize;
779 ctl->unit = sectorsize;
780 ctl->start = block_group->key.objectid;
781 ctl->private = block_group;
782 block_group->free_space_ctl = ctl;
784 return 0;
787 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
789 struct btrfs_free_space *info;
790 struct rb_node *node;
792 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
793 info = rb_entry(node, struct btrfs_free_space, offset_index);
794 unlink_free_space(ctl, info);
795 free(info->bitmap);
796 free(info);
800 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
802 __btrfs_remove_free_space_cache(block_group->free_space_ctl);
805 static int btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, u64 offset,
806 u64 bytes)
808 struct btrfs_free_space *info;
809 int ret = 0;
811 info = calloc(1, sizeof(*info));
812 if (!info)
813 return -ENOMEM;
815 info->offset = offset;
816 info->bytes = bytes;
818 try_merge_free_space(ctl, info);
820 ret = link_free_space(ctl, info);
821 if (ret) {
822 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
823 BUG_ON(ret == -EEXIST);
826 return ret;
830 * Merges all the free space cache and kills the bitmap entries since we just
831 * want to use the free space cache to verify it's correct, no reason to keep
832 * the bitmaps around to confuse things.
834 static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
836 struct btrfs_free_space *e, *prev = NULL;
837 struct rb_node *n;
838 int ret;
839 u32 sectorsize = ctl->sectorsize;
841 again:
842 prev = NULL;
843 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
844 e = rb_entry(n, struct btrfs_free_space, offset_index);
845 if (e->bitmap) {
846 u64 offset = e->offset, bytes = ctl->unit;
847 u64 end;
849 end = e->offset + (u64)(BITS_PER_BITMAP(sectorsize) * ctl->unit);
851 unlink_free_space(ctl, e);
852 while (!(search_bitmap(ctl, e, &offset, &bytes))) {
853 ret = btrfs_add_free_space(ctl, offset,
854 bytes);
855 BUG_ON(ret);
856 offset += bytes;
857 if (offset >= end)
858 break;
859 bytes = ctl->unit;
861 free(e->bitmap);
862 free(e);
863 goto again;
865 if (!prev)
866 goto next;
867 if (prev->offset + prev->bytes == e->offset) {
868 unlink_free_space(ctl, prev);
869 unlink_free_space(ctl, e);
870 prev->bytes += e->bytes;
871 free(e);
872 link_free_space(ctl, prev);
873 goto again;
875 next:
876 prev = e;