2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
21 #include "free-space-cache.h"
22 #include "transaction.h"
24 struct btrfs_free_space
{
25 struct rb_node bytes_index
;
26 struct rb_node offset_index
;
31 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
34 struct rb_node
**p
= &root
->rb_node
;
35 struct rb_node
*parent
= NULL
;
36 struct btrfs_free_space
*info
;
40 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
42 if (offset
< info
->offset
)
44 else if (offset
> info
->offset
)
50 rb_link_node(node
, parent
, p
);
51 rb_insert_color(node
, root
);
56 static int tree_insert_bytes(struct rb_root
*root
, u64 bytes
,
59 struct rb_node
**p
= &root
->rb_node
;
60 struct rb_node
*parent
= NULL
;
61 struct btrfs_free_space
*info
;
65 info
= rb_entry(parent
, struct btrfs_free_space
, bytes_index
);
67 if (bytes
< info
->bytes
)
73 rb_link_node(node
, parent
, p
);
74 rb_insert_color(node
, root
);
80 * searches the tree for the given offset.
82 * fuzzy == 1: this is used for allocations where we are given a hint of where
83 * to look for free space. Because the hint may not be completely on an offset
84 * mark, or the hint may no longer point to free space we need to fudge our
85 * results a bit. So we look for free space starting at or after offset with at
86 * least bytes size. We prefer to find as close to the given offset as we can.
87 * Also if the offset is within a free space range, then we will return the free
88 * space that contains the given offset, which means we can return a free space
89 * chunk with an offset before the provided offset.
91 * fuzzy == 0: this is just a normal tree search. Give us the free space that
92 * starts at the given offset which is at least bytes size, and if its not there
95 static struct btrfs_free_space
*tree_search_offset(struct rb_root
*root
,
96 u64 offset
, u64 bytes
,
99 struct rb_node
*n
= root
->rb_node
;
100 struct btrfs_free_space
*entry
, *ret
= NULL
;
103 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
105 if (offset
< entry
->offset
) {
107 (!ret
|| entry
->offset
< ret
->offset
) &&
108 (bytes
<= entry
->bytes
))
111 } else if (offset
> entry
->offset
) {
113 (entry
->offset
+ entry
->bytes
- 1) >= offset
&&
114 bytes
<= entry
->bytes
) {
120 if (bytes
> entry
->bytes
) {
133 * return a chunk at least bytes size, as close to offset that we can get.
135 static struct btrfs_free_space
*tree_search_bytes(struct rb_root
*root
,
136 u64 offset
, u64 bytes
)
138 struct rb_node
*n
= root
->rb_node
;
139 struct btrfs_free_space
*entry
, *ret
= NULL
;
142 entry
= rb_entry(n
, struct btrfs_free_space
, bytes_index
);
144 if (bytes
< entry
->bytes
) {
146 * We prefer to get a hole size as close to the size we
147 * are asking for so we don't take small slivers out of
148 * huge holes, but we also want to get as close to the
149 * offset as possible so we don't have a whole lot of
152 if (offset
<= entry
->offset
) {
155 else if (entry
->bytes
< ret
->bytes
)
157 else if (entry
->offset
< ret
->offset
)
161 } else if (bytes
> entry
->bytes
) {
165 * Ok we may have multiple chunks of the wanted size,
166 * so we don't want to take the first one we find, we
167 * want to take the one closest to our given offset, so
168 * keep searching just in case theres a better match.
171 if (offset
> entry
->offset
)
173 else if (!ret
|| entry
->offset
< ret
->offset
)
181 static void unlink_free_space(struct btrfs_block_group_cache
*block_group
,
182 struct btrfs_free_space
*info
)
184 rb_erase(&info
->offset_index
, &block_group
->free_space_offset
);
185 rb_erase(&info
->bytes_index
, &block_group
->free_space_bytes
);
188 static int link_free_space(struct btrfs_block_group_cache
*block_group
,
189 struct btrfs_free_space
*info
)
194 BUG_ON(!info
->bytes
);
195 ret
= tree_insert_offset(&block_group
->free_space_offset
, info
->offset
,
196 &info
->offset_index
);
200 ret
= tree_insert_bytes(&block_group
->free_space_bytes
, info
->bytes
,
208 int btrfs_add_free_space(struct btrfs_block_group_cache
*block_group
,
209 u64 offset
, u64 bytes
)
211 struct btrfs_free_space
*right_info
;
212 struct btrfs_free_space
*left_info
;
213 struct btrfs_free_space
*info
= NULL
;
216 info
= kzalloc(sizeof(struct btrfs_free_space
), GFP_NOFS
);
220 info
->offset
= offset
;
223 spin_lock(&block_group
->tree_lock
);
226 * first we want to see if there is free space adjacent to the range we
227 * are adding, if there is remove that struct and add a new one to
228 * cover the entire range
230 right_info
= tree_search_offset(&block_group
->free_space_offset
,
232 left_info
= tree_search_offset(&block_group
->free_space_offset
,
236 unlink_free_space(block_group
, right_info
);
237 info
->bytes
+= right_info
->bytes
;
241 if (left_info
&& left_info
->offset
+ left_info
->bytes
== offset
) {
242 unlink_free_space(block_group
, left_info
);
243 info
->offset
= left_info
->offset
;
244 info
->bytes
+= left_info
->bytes
;
248 ret
= link_free_space(block_group
, info
);
252 spin_unlock(&block_group
->tree_lock
);
255 printk(KERN_ERR
"btrfs: unable to add free space :%d\n", ret
);
256 BUG_ON(ret
== -EEXIST
);
262 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
263 u64 offset
, u64 bytes
)
265 struct btrfs_free_space
*info
;
268 spin_lock(&block_group
->tree_lock
);
270 info
= tree_search_offset(&block_group
->free_space_offset
, offset
, 0,
272 if (info
&& info
->offset
== offset
) {
273 if (info
->bytes
< bytes
) {
274 printk(KERN_ERR
"Found free space at %llu, size %llu,"
275 "trying to use %llu\n",
276 (unsigned long long)info
->offset
,
277 (unsigned long long)info
->bytes
,
278 (unsigned long long)bytes
);
281 spin_unlock(&block_group
->tree_lock
);
284 unlink_free_space(block_group
, info
);
286 if (info
->bytes
== bytes
) {
288 spin_unlock(&block_group
->tree_lock
);
292 info
->offset
+= bytes
;
293 info
->bytes
-= bytes
;
295 ret
= link_free_space(block_group
, info
);
296 spin_unlock(&block_group
->tree_lock
);
298 } else if (info
&& info
->offset
< offset
&&
299 info
->offset
+ info
->bytes
>= offset
+ bytes
) {
300 u64 old_start
= info
->offset
;
302 * we're freeing space in the middle of the info,
303 * this can happen during tree log replay
305 * first unlink the old info and then
306 * insert it again after the hole we're creating
308 unlink_free_space(block_group
, info
);
309 if (offset
+ bytes
< info
->offset
+ info
->bytes
) {
310 u64 old_end
= info
->offset
+ info
->bytes
;
312 info
->offset
= offset
+ bytes
;
313 info
->bytes
= old_end
- info
->offset
;
314 ret
= link_free_space(block_group
, info
);
317 /* the hole we're creating ends at the end
318 * of the info struct, just free the info
322 spin_unlock(&block_group
->tree_lock
);
323 /* step two, insert a new info struct to cover anything
326 ret
= btrfs_add_free_space(block_group
, old_start
,
330 spin_unlock(&block_group
->tree_lock
);
332 printk(KERN_ERR
"couldn't find space %llu to free\n",
333 (unsigned long long)offset
);
334 printk(KERN_ERR
"cached is %d, offset %llu bytes %llu\n",
335 block_group
->cached
, block_group
->key
.objectid
,
336 block_group
->key
.offset
);
337 btrfs_dump_free_space(block_group
, bytes
);
339 printk(KERN_ERR
"hmm, found offset=%llu bytes=%llu, "
340 "but wanted offset=%llu bytes=%llu\n",
341 info
->offset
, info
->bytes
, offset
, bytes
);
349 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
352 struct btrfs_free_space
*info
;
356 for (n
= rb_first(&block_group
->free_space_offset
); n
; n
= rb_next(n
)) {
357 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
358 if (info
->bytes
>= bytes
)
360 printk(KERN_ERR
"entry offset %llu, bytes %llu\n", info
->offset
,
363 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
367 u64
btrfs_block_group_free_space(struct btrfs_block_group_cache
*block_group
)
369 struct btrfs_free_space
*info
;
373 for (n
= rb_first(&block_group
->free_space_offset
); n
;
375 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
383 * for a given cluster, put all of its extents back into the free
384 * space cache. If the block group passed doesn't match the block group
385 * pointed to by the cluster, someone else raced in and freed the
386 * cluster already. In that case, we just return without changing anything
389 __btrfs_return_cluster_to_free_space(
390 struct btrfs_block_group_cache
*block_group
,
391 struct btrfs_free_cluster
*cluster
)
393 struct btrfs_free_space
*entry
;
394 struct rb_node
*node
;
396 spin_lock(&cluster
->lock
);
397 if (cluster
->block_group
!= block_group
)
400 cluster
->window_start
= 0;
401 node
= rb_first(&cluster
->root
);
403 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
404 node
= rb_next(&entry
->offset_index
);
405 rb_erase(&entry
->offset_index
, &cluster
->root
);
406 link_free_space(block_group
, entry
);
408 list_del_init(&cluster
->block_group_list
);
410 btrfs_put_block_group(cluster
->block_group
);
411 cluster
->block_group
= NULL
;
412 cluster
->root
.rb_node
= NULL
;
414 spin_unlock(&cluster
->lock
);
418 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
420 struct btrfs_free_space
*info
;
421 struct rb_node
*node
;
422 struct btrfs_free_cluster
*cluster
;
423 struct btrfs_free_cluster
*safe
;
425 spin_lock(&block_group
->tree_lock
);
427 list_for_each_entry_safe(cluster
, safe
, &block_group
->cluster_list
,
430 WARN_ON(cluster
->block_group
!= block_group
);
431 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
434 while ((node
= rb_last(&block_group
->free_space_bytes
)) != NULL
) {
435 info
= rb_entry(node
, struct btrfs_free_space
, bytes_index
);
436 unlink_free_space(block_group
, info
);
438 if (need_resched()) {
439 spin_unlock(&block_group
->tree_lock
);
441 spin_lock(&block_group
->tree_lock
);
444 spin_unlock(&block_group
->tree_lock
);
447 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
448 u64 offset
, u64 bytes
, u64 empty_size
)
450 struct btrfs_free_space
*entry
= NULL
;
453 spin_lock(&block_group
->tree_lock
);
454 entry
= tree_search_offset(&block_group
->free_space_offset
, offset
,
455 bytes
+ empty_size
, 1);
457 entry
= tree_search_bytes(&block_group
->free_space_bytes
,
458 offset
, bytes
+ empty_size
);
460 unlink_free_space(block_group
, entry
);
462 entry
->offset
+= bytes
;
463 entry
->bytes
-= bytes
;
468 link_free_space(block_group
, entry
);
470 spin_unlock(&block_group
->tree_lock
);
476 * given a cluster, put all of its extents back into the free space
477 * cache. If a block group is passed, this function will only free
478 * a cluster that belongs to the passed block group.
480 * Otherwise, it'll get a reference on the block group pointed to by the
481 * cluster and remove the cluster from it.
483 int btrfs_return_cluster_to_free_space(
484 struct btrfs_block_group_cache
*block_group
,
485 struct btrfs_free_cluster
*cluster
)
489 /* first, get a safe pointer to the block group */
490 spin_lock(&cluster
->lock
);
492 block_group
= cluster
->block_group
;
494 spin_unlock(&cluster
->lock
);
497 } else if (cluster
->block_group
!= block_group
) {
498 /* someone else has already freed it don't redo their work */
499 spin_unlock(&cluster
->lock
);
502 atomic_inc(&block_group
->count
);
503 spin_unlock(&cluster
->lock
);
505 /* now return any extents the cluster had on it */
506 spin_lock(&block_group
->tree_lock
);
507 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
508 spin_unlock(&block_group
->tree_lock
);
510 /* finally drop our ref */
511 btrfs_put_block_group(block_group
);
516 * given a cluster, try to allocate 'bytes' from it, returns 0
517 * if it couldn't find anything suitably large, or a logical disk offset
518 * if things worked out
520 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
521 struct btrfs_free_cluster
*cluster
, u64 bytes
,
524 struct btrfs_free_space
*entry
= NULL
;
525 struct rb_node
*node
;
528 spin_lock(&cluster
->lock
);
529 if (bytes
> cluster
->max_size
)
532 if (cluster
->block_group
!= block_group
)
535 node
= rb_first(&cluster
->root
);
539 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
542 if (entry
->bytes
< bytes
|| entry
->offset
< min_start
) {
543 struct rb_node
*node
;
545 node
= rb_next(&entry
->offset_index
);
548 entry
= rb_entry(node
, struct btrfs_free_space
,
554 entry
->offset
+= bytes
;
555 entry
->bytes
-= bytes
;
557 if (entry
->bytes
== 0) {
558 rb_erase(&entry
->offset_index
, &cluster
->root
);
564 spin_unlock(&cluster
->lock
);
569 * here we try to find a cluster of blocks in a block group. The goal
570 * is to find at least bytes free and up to empty_size + bytes free.
571 * We might not find them all in one contiguous area.
573 * returns zero and sets up cluster if things worked out, otherwise
576 int btrfs_find_space_cluster(struct btrfs_trans_handle
*trans
,
577 struct btrfs_block_group_cache
*block_group
,
578 struct btrfs_free_cluster
*cluster
,
579 u64 offset
, u64 bytes
, u64 empty_size
)
581 struct btrfs_free_space
*entry
= NULL
;
582 struct rb_node
*node
;
583 struct btrfs_free_space
*next
;
584 struct btrfs_free_space
*last
;
589 int total_retries
= 0;
592 /* for metadata, allow allocates with more holes */
593 if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
595 * we want to do larger allocations when we are
596 * flushing out the delayed refs, it helps prevent
597 * making more work as we go along.
599 if (trans
->transaction
->delayed_refs
.flushing
)
600 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 1);
602 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 4);
604 min_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
606 spin_lock(&block_group
->tree_lock
);
607 spin_lock(&cluster
->lock
);
609 /* someone already found a cluster, hooray */
610 if (cluster
->block_group
) {
615 min_bytes
= min(min_bytes
, bytes
+ empty_size
);
616 entry
= tree_search_bytes(&block_group
->free_space_bytes
,
622 window_start
= entry
->offset
;
623 window_free
= entry
->bytes
;
625 max_extent
= entry
->bytes
;
628 /* out window is just right, lets fill it */
629 if (window_free
>= bytes
+ empty_size
)
632 node
= rb_next(&last
->offset_index
);
637 next
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
640 * we haven't filled the empty size and the window is
641 * very large. reset and try again
643 if (next
->offset
- window_start
> (bytes
+ empty_size
) * 2) {
645 window_start
= entry
->offset
;
646 window_free
= entry
->bytes
;
650 if (total_retries
% 256 == 0) {
651 if (min_bytes
>= (bytes
+ empty_size
)) {
656 * grow our allocation a bit, we're not having
664 window_free
+= next
->bytes
;
665 if (entry
->bytes
> max_extent
)
666 max_extent
= entry
->bytes
;
670 cluster
->window_start
= entry
->offset
;
673 * now we've found our entries, pull them out of the free space
674 * cache and put them into the cluster rbtree
676 * The cluster includes an rbtree, but only uses the offset index
677 * of each free space cache entry.
680 node
= rb_next(&entry
->offset_index
);
681 unlink_free_space(block_group
, entry
);
682 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
683 &entry
->offset_index
);
686 if (!node
|| entry
== last
)
689 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
692 cluster
->max_size
= max_extent
;
693 atomic_inc(&block_group
->count
);
694 list_add_tail(&cluster
->block_group_list
, &block_group
->cluster_list
);
695 cluster
->block_group
= block_group
;
697 spin_unlock(&cluster
->lock
);
698 spin_unlock(&block_group
->tree_lock
);
704 * simple code to zero out a cluster
706 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
708 spin_lock_init(&cluster
->lock
);
709 spin_lock_init(&cluster
->refill_lock
);
710 cluster
->root
.rb_node
= NULL
;
711 cluster
->max_size
= 0;
712 INIT_LIST_HEAD(&cluster
->block_group_list
);
713 cluster
->block_group
= NULL
;