2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
34 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
35 struct btrfs_free_space
*info
);
36 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
37 struct btrfs_free_space
*info
);
39 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
40 struct btrfs_path
*path
,
44 struct btrfs_key location
;
45 struct btrfs_disk_key disk_key
;
46 struct btrfs_free_space_header
*header
;
47 struct extent_buffer
*leaf
;
48 struct inode
*inode
= NULL
;
51 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
55 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
59 btrfs_release_path(path
);
60 return ERR_PTR(-ENOENT
);
63 leaf
= path
->nodes
[0];
64 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
65 struct btrfs_free_space_header
);
66 btrfs_free_space_key(leaf
, header
, &disk_key
);
67 btrfs_disk_key_to_cpu(&location
, &disk_key
);
68 btrfs_release_path(path
);
70 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
72 return ERR_PTR(-ENOENT
);
75 if (is_bad_inode(inode
)) {
77 return ERR_PTR(-ENOENT
);
80 mapping_set_gfp_mask(inode
->i_mapping
,
81 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
86 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
87 struct btrfs_block_group_cache
88 *block_group
, struct btrfs_path
*path
)
90 struct inode
*inode
= NULL
;
91 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
93 spin_lock(&block_group
->lock
);
94 if (block_group
->inode
)
95 inode
= igrab(block_group
->inode
);
96 spin_unlock(&block_group
->lock
);
100 inode
= __lookup_free_space_inode(root
, path
,
101 block_group
->key
.objectid
);
105 spin_lock(&block_group
->lock
);
106 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
107 btrfs_info(root
->fs_info
,
108 "Old style space inode found, converting.");
109 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
110 BTRFS_INODE_NODATACOW
;
111 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
114 if (!block_group
->iref
) {
115 block_group
->inode
= igrab(inode
);
116 block_group
->iref
= 1;
118 spin_unlock(&block_group
->lock
);
123 static int __create_free_space_inode(struct btrfs_root
*root
,
124 struct btrfs_trans_handle
*trans
,
125 struct btrfs_path
*path
,
128 struct btrfs_key key
;
129 struct btrfs_disk_key disk_key
;
130 struct btrfs_free_space_header
*header
;
131 struct btrfs_inode_item
*inode_item
;
132 struct extent_buffer
*leaf
;
133 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
136 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
140 /* We inline crc's for the free disk space cache */
141 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
142 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
144 leaf
= path
->nodes
[0];
145 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
146 struct btrfs_inode_item
);
147 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
148 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
149 sizeof(*inode_item
));
150 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
151 btrfs_set_inode_size(leaf
, inode_item
, 0);
152 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
153 btrfs_set_inode_uid(leaf
, inode_item
, 0);
154 btrfs_set_inode_gid(leaf
, inode_item
, 0);
155 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
156 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
157 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
158 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
159 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
160 btrfs_mark_buffer_dirty(leaf
);
161 btrfs_release_path(path
);
163 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
167 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
168 sizeof(struct btrfs_free_space_header
));
170 btrfs_release_path(path
);
173 leaf
= path
->nodes
[0];
174 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
175 struct btrfs_free_space_header
);
176 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
177 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
178 btrfs_mark_buffer_dirty(leaf
);
179 btrfs_release_path(path
);
184 int create_free_space_inode(struct btrfs_root
*root
,
185 struct btrfs_trans_handle
*trans
,
186 struct btrfs_block_group_cache
*block_group
,
187 struct btrfs_path
*path
)
192 ret
= btrfs_find_free_objectid(root
, &ino
);
196 return __create_free_space_inode(root
, trans
, path
, ino
,
197 block_group
->key
.objectid
);
200 int btrfs_check_trunc_cache_free_space(struct btrfs_root
*root
,
201 struct btrfs_block_rsv
*rsv
)
206 /* 1 for slack space, 1 for updating the inode */
207 needed_bytes
= btrfs_calc_trunc_metadata_size(root
, 1) +
208 btrfs_calc_trans_metadata_size(root
, 1);
210 spin_lock(&rsv
->lock
);
211 if (rsv
->reserved
< needed_bytes
)
215 spin_unlock(&rsv
->lock
);
219 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
220 struct btrfs_trans_handle
*trans
,
221 struct btrfs_path
*path
,
226 btrfs_i_size_write(inode
, 0);
227 truncate_pagecache(inode
, 0);
230 * We don't need an orphan item because truncating the free space cache
231 * will never be split across transactions.
233 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
234 0, BTRFS_EXTENT_DATA_KEY
);
236 btrfs_abort_transaction(trans
, root
, ret
);
240 ret
= btrfs_update_inode(trans
, root
, inode
);
242 btrfs_abort_transaction(trans
, root
, ret
);
247 static int readahead_cache(struct inode
*inode
)
249 struct file_ra_state
*ra
;
250 unsigned long last_index
;
252 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
256 file_ra_state_init(ra
, inode
->i_mapping
);
257 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
259 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
270 struct btrfs_root
*root
;
274 unsigned check_crcs
:1;
277 static int io_ctl_init(struct io_ctl
*io_ctl
, struct inode
*inode
,
278 struct btrfs_root
*root
)
280 memset(io_ctl
, 0, sizeof(struct io_ctl
));
281 io_ctl
->num_pages
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >>
283 io_ctl
->pages
= kzalloc(sizeof(struct page
*) * io_ctl
->num_pages
,
288 if (btrfs_ino(inode
) != BTRFS_FREE_INO_OBJECTID
)
289 io_ctl
->check_crcs
= 1;
293 static void io_ctl_free(struct io_ctl
*io_ctl
)
295 kfree(io_ctl
->pages
);
298 static void io_ctl_unmap_page(struct io_ctl
*io_ctl
)
301 kunmap(io_ctl
->page
);
307 static void io_ctl_map_page(struct io_ctl
*io_ctl
, int clear
)
309 ASSERT(io_ctl
->index
< io_ctl
->num_pages
);
310 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
311 io_ctl
->cur
= kmap(io_ctl
->page
);
312 io_ctl
->orig
= io_ctl
->cur
;
313 io_ctl
->size
= PAGE_CACHE_SIZE
;
315 memset(io_ctl
->cur
, 0, PAGE_CACHE_SIZE
);
318 static void io_ctl_drop_pages(struct io_ctl
*io_ctl
)
322 io_ctl_unmap_page(io_ctl
);
324 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
325 if (io_ctl
->pages
[i
]) {
326 ClearPageChecked(io_ctl
->pages
[i
]);
327 unlock_page(io_ctl
->pages
[i
]);
328 page_cache_release(io_ctl
->pages
[i
]);
333 static int io_ctl_prepare_pages(struct io_ctl
*io_ctl
, struct inode
*inode
,
337 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
340 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
341 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
343 io_ctl_drop_pages(io_ctl
);
346 io_ctl
->pages
[i
] = page
;
347 if (uptodate
&& !PageUptodate(page
)) {
348 btrfs_readpage(NULL
, page
);
350 if (!PageUptodate(page
)) {
351 printk(KERN_ERR
"btrfs: error reading free "
353 io_ctl_drop_pages(io_ctl
);
359 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
360 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
361 set_page_extent_mapped(io_ctl
->pages
[i
]);
367 static void io_ctl_set_generation(struct io_ctl
*io_ctl
, u64 generation
)
371 io_ctl_map_page(io_ctl
, 1);
374 * Skip the csum areas. If we don't check crcs then we just have a
375 * 64bit chunk at the front of the first page.
377 if (io_ctl
->check_crcs
) {
378 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
379 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
381 io_ctl
->cur
+= sizeof(u64
);
382 io_ctl
->size
-= sizeof(u64
) * 2;
386 *val
= cpu_to_le64(generation
);
387 io_ctl
->cur
+= sizeof(u64
);
390 static int io_ctl_check_generation(struct io_ctl
*io_ctl
, u64 generation
)
395 * Skip the crc area. If we don't check crcs then we just have a 64bit
396 * chunk at the front of the first page.
398 if (io_ctl
->check_crcs
) {
399 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
400 io_ctl
->size
-= sizeof(u64
) +
401 (sizeof(u32
) * io_ctl
->num_pages
);
403 io_ctl
->cur
+= sizeof(u64
);
404 io_ctl
->size
-= sizeof(u64
) * 2;
408 if (le64_to_cpu(*gen
) != generation
) {
409 printk_ratelimited(KERN_ERR
"btrfs: space cache generation "
410 "(%Lu) does not match inode (%Lu)\n", *gen
,
412 io_ctl_unmap_page(io_ctl
);
415 io_ctl
->cur
+= sizeof(u64
);
419 static void io_ctl_set_crc(struct io_ctl
*io_ctl
, int index
)
425 if (!io_ctl
->check_crcs
) {
426 io_ctl_unmap_page(io_ctl
);
431 offset
= sizeof(u32
) * io_ctl
->num_pages
;
433 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
434 PAGE_CACHE_SIZE
- offset
);
435 btrfs_csum_final(crc
, (char *)&crc
);
436 io_ctl_unmap_page(io_ctl
);
437 tmp
= kmap(io_ctl
->pages
[0]);
440 kunmap(io_ctl
->pages
[0]);
443 static int io_ctl_check_crc(struct io_ctl
*io_ctl
, int index
)
449 if (!io_ctl
->check_crcs
) {
450 io_ctl_map_page(io_ctl
, 0);
455 offset
= sizeof(u32
) * io_ctl
->num_pages
;
457 tmp
= kmap(io_ctl
->pages
[0]);
460 kunmap(io_ctl
->pages
[0]);
462 io_ctl_map_page(io_ctl
, 0);
463 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
464 PAGE_CACHE_SIZE
- offset
);
465 btrfs_csum_final(crc
, (char *)&crc
);
467 printk_ratelimited(KERN_ERR
"btrfs: csum mismatch on free "
469 io_ctl_unmap_page(io_ctl
);
476 static int io_ctl_add_entry(struct io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
479 struct btrfs_free_space_entry
*entry
;
485 entry
->offset
= cpu_to_le64(offset
);
486 entry
->bytes
= cpu_to_le64(bytes
);
487 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
488 BTRFS_FREE_SPACE_EXTENT
;
489 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
490 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
492 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
495 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
497 /* No more pages to map */
498 if (io_ctl
->index
>= io_ctl
->num_pages
)
501 /* map the next page */
502 io_ctl_map_page(io_ctl
, 1);
506 static int io_ctl_add_bitmap(struct io_ctl
*io_ctl
, void *bitmap
)
512 * If we aren't at the start of the current page, unmap this one and
513 * map the next one if there is any left.
515 if (io_ctl
->cur
!= io_ctl
->orig
) {
516 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
517 if (io_ctl
->index
>= io_ctl
->num_pages
)
519 io_ctl_map_page(io_ctl
, 0);
522 memcpy(io_ctl
->cur
, bitmap
, PAGE_CACHE_SIZE
);
523 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
524 if (io_ctl
->index
< io_ctl
->num_pages
)
525 io_ctl_map_page(io_ctl
, 0);
529 static void io_ctl_zero_remaining_pages(struct io_ctl
*io_ctl
)
532 * If we're not on the boundary we know we've modified the page and we
533 * need to crc the page.
535 if (io_ctl
->cur
!= io_ctl
->orig
)
536 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
538 io_ctl_unmap_page(io_ctl
);
540 while (io_ctl
->index
< io_ctl
->num_pages
) {
541 io_ctl_map_page(io_ctl
, 1);
542 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
546 static int io_ctl_read_entry(struct io_ctl
*io_ctl
,
547 struct btrfs_free_space
*entry
, u8
*type
)
549 struct btrfs_free_space_entry
*e
;
553 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
559 entry
->offset
= le64_to_cpu(e
->offset
);
560 entry
->bytes
= le64_to_cpu(e
->bytes
);
562 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
563 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
565 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
568 io_ctl_unmap_page(io_ctl
);
573 static int io_ctl_read_bitmap(struct io_ctl
*io_ctl
,
574 struct btrfs_free_space
*entry
)
578 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
582 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_CACHE_SIZE
);
583 io_ctl_unmap_page(io_ctl
);
589 * Since we attach pinned extents after the fact we can have contiguous sections
590 * of free space that are split up in entries. This poses a problem with the
591 * tree logging stuff since it could have allocated across what appears to be 2
592 * entries since we would have merged the entries when adding the pinned extents
593 * back to the free space cache. So run through the space cache that we just
594 * loaded and merge contiguous entries. This will make the log replay stuff not
595 * blow up and it will make for nicer allocator behavior.
597 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
599 struct btrfs_free_space
*e
, *prev
= NULL
;
603 spin_lock(&ctl
->tree_lock
);
604 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
605 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
608 if (e
->bitmap
|| prev
->bitmap
)
610 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
611 unlink_free_space(ctl
, prev
);
612 unlink_free_space(ctl
, e
);
613 prev
->bytes
+= e
->bytes
;
614 kmem_cache_free(btrfs_free_space_cachep
, e
);
615 link_free_space(ctl
, prev
);
617 spin_unlock(&ctl
->tree_lock
);
623 spin_unlock(&ctl
->tree_lock
);
626 static int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
627 struct btrfs_free_space_ctl
*ctl
,
628 struct btrfs_path
*path
, u64 offset
)
630 struct btrfs_free_space_header
*header
;
631 struct extent_buffer
*leaf
;
632 struct io_ctl io_ctl
;
633 struct btrfs_key key
;
634 struct btrfs_free_space
*e
, *n
;
635 struct list_head bitmaps
;
642 INIT_LIST_HEAD(&bitmaps
);
644 /* Nothing in the space cache, goodbye */
645 if (!i_size_read(inode
))
648 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
652 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
656 btrfs_release_path(path
);
662 leaf
= path
->nodes
[0];
663 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
664 struct btrfs_free_space_header
);
665 num_entries
= btrfs_free_space_entries(leaf
, header
);
666 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
667 generation
= btrfs_free_space_generation(leaf
, header
);
668 btrfs_release_path(path
);
670 if (BTRFS_I(inode
)->generation
!= generation
) {
671 btrfs_err(root
->fs_info
,
672 "free space inode generation (%llu) "
673 "did not match free space cache generation (%llu)",
674 BTRFS_I(inode
)->generation
, generation
);
681 ret
= io_ctl_init(&io_ctl
, inode
, root
);
685 ret
= readahead_cache(inode
);
689 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
693 ret
= io_ctl_check_crc(&io_ctl
, 0);
697 ret
= io_ctl_check_generation(&io_ctl
, generation
);
701 while (num_entries
) {
702 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
707 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
709 kmem_cache_free(btrfs_free_space_cachep
, e
);
714 kmem_cache_free(btrfs_free_space_cachep
, e
);
718 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
719 spin_lock(&ctl
->tree_lock
);
720 ret
= link_free_space(ctl
, e
);
721 spin_unlock(&ctl
->tree_lock
);
723 btrfs_err(root
->fs_info
,
724 "Duplicate entries in free space cache, dumping");
725 kmem_cache_free(btrfs_free_space_cachep
, e
);
731 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
734 btrfs_free_space_cachep
, e
);
737 spin_lock(&ctl
->tree_lock
);
738 ret
= link_free_space(ctl
, e
);
739 ctl
->total_bitmaps
++;
740 ctl
->op
->recalc_thresholds(ctl
);
741 spin_unlock(&ctl
->tree_lock
);
743 btrfs_err(root
->fs_info
,
744 "Duplicate entries in free space cache, dumping");
745 kmem_cache_free(btrfs_free_space_cachep
, e
);
748 list_add_tail(&e
->list
, &bitmaps
);
754 io_ctl_unmap_page(&io_ctl
);
757 * We add the bitmaps at the end of the entries in order that
758 * the bitmap entries are added to the cache.
760 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
761 list_del_init(&e
->list
);
762 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
767 io_ctl_drop_pages(&io_ctl
);
768 merge_space_tree(ctl
);
771 io_ctl_free(&io_ctl
);
774 io_ctl_drop_pages(&io_ctl
);
775 __btrfs_remove_free_space_cache(ctl
);
779 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
780 struct btrfs_block_group_cache
*block_group
)
782 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
783 struct btrfs_root
*root
= fs_info
->tree_root
;
785 struct btrfs_path
*path
;
788 u64 used
= btrfs_block_group_used(&block_group
->item
);
791 * If this block group has been marked to be cleared for one reason or
792 * another then we can't trust the on disk cache, so just return.
794 spin_lock(&block_group
->lock
);
795 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
796 spin_unlock(&block_group
->lock
);
799 spin_unlock(&block_group
->lock
);
801 path
= btrfs_alloc_path();
804 path
->search_commit_root
= 1;
805 path
->skip_locking
= 1;
807 inode
= lookup_free_space_inode(root
, block_group
, path
);
809 btrfs_free_path(path
);
813 /* We may have converted the inode and made the cache invalid. */
814 spin_lock(&block_group
->lock
);
815 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
816 spin_unlock(&block_group
->lock
);
817 btrfs_free_path(path
);
820 spin_unlock(&block_group
->lock
);
822 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
823 path
, block_group
->key
.objectid
);
824 btrfs_free_path(path
);
828 spin_lock(&ctl
->tree_lock
);
829 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
830 block_group
->bytes_super
));
831 spin_unlock(&ctl
->tree_lock
);
834 __btrfs_remove_free_space_cache(ctl
);
835 btrfs_err(fs_info
, "block group %llu has wrong amount of free space",
836 block_group
->key
.objectid
);
841 /* This cache is bogus, make sure it gets cleared */
842 spin_lock(&block_group
->lock
);
843 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
844 spin_unlock(&block_group
->lock
);
847 btrfs_err(fs_info
, "failed to load free space cache for block group %llu",
848 block_group
->key
.objectid
);
856 * __btrfs_write_out_cache - write out cached info to an inode
857 * @root - the root the inode belongs to
858 * @ctl - the free space cache we are going to write out
859 * @block_group - the block_group for this cache if it belongs to a block_group
860 * @trans - the trans handle
861 * @path - the path to use
862 * @offset - the offset for the key we'll insert
864 * This function writes out a free space cache struct to disk for quick recovery
865 * on mount. This will return 0 if it was successfull in writing the cache out,
866 * and -1 if it was not.
868 static int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
869 struct btrfs_free_space_ctl
*ctl
,
870 struct btrfs_block_group_cache
*block_group
,
871 struct btrfs_trans_handle
*trans
,
872 struct btrfs_path
*path
, u64 offset
)
874 struct btrfs_free_space_header
*header
;
875 struct extent_buffer
*leaf
;
876 struct rb_node
*node
;
877 struct list_head
*pos
, *n
;
878 struct extent_state
*cached_state
= NULL
;
879 struct btrfs_free_cluster
*cluster
= NULL
;
880 struct extent_io_tree
*unpin
= NULL
;
881 struct io_ctl io_ctl
;
882 struct list_head bitmap_list
;
883 struct btrfs_key key
;
884 u64 start
, extent_start
, extent_end
, len
;
890 INIT_LIST_HEAD(&bitmap_list
);
892 if (!i_size_read(inode
))
895 ret
= io_ctl_init(&io_ctl
, inode
, root
);
899 /* Get the cluster for this block_group if it exists */
900 if (block_group
&& !list_empty(&block_group
->cluster_list
))
901 cluster
= list_entry(block_group
->cluster_list
.next
,
902 struct btrfs_free_cluster
,
905 /* Lock all pages first so we can lock the extent safely. */
906 io_ctl_prepare_pages(&io_ctl
, inode
, 0);
908 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
911 node
= rb_first(&ctl
->free_space_offset
);
912 if (!node
&& cluster
) {
913 node
= rb_first(&cluster
->root
);
917 /* Make sure we can fit our crcs into the first page */
918 if (io_ctl
.check_crcs
&&
919 (io_ctl
.num_pages
* sizeof(u32
)) >= PAGE_CACHE_SIZE
)
922 io_ctl_set_generation(&io_ctl
, trans
->transid
);
924 /* Write out the extent entries */
926 struct btrfs_free_space
*e
;
928 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
931 ret
= io_ctl_add_entry(&io_ctl
, e
->offset
, e
->bytes
,
937 list_add_tail(&e
->list
, &bitmap_list
);
940 node
= rb_next(node
);
941 if (!node
&& cluster
) {
942 node
= rb_first(&cluster
->root
);
948 * We want to add any pinned extents to our free space cache
949 * so we don't leak the space
953 * We shouldn't have switched the pinned extents yet so this is the
956 unpin
= root
->fs_info
->pinned_extents
;
959 start
= block_group
->key
.objectid
;
961 while (block_group
&& (start
< block_group
->key
.objectid
+
962 block_group
->key
.offset
)) {
963 ret
= find_first_extent_bit(unpin
, start
,
964 &extent_start
, &extent_end
,
971 /* This pinned extent is out of our range */
972 if (extent_start
>= block_group
->key
.objectid
+
973 block_group
->key
.offset
)
976 extent_start
= max(extent_start
, start
);
977 extent_end
= min(block_group
->key
.objectid
+
978 block_group
->key
.offset
, extent_end
+ 1);
979 len
= extent_end
- extent_start
;
982 ret
= io_ctl_add_entry(&io_ctl
, extent_start
, len
, NULL
);
989 /* Write out the bitmaps */
990 list_for_each_safe(pos
, n
, &bitmap_list
) {
991 struct btrfs_free_space
*entry
=
992 list_entry(pos
, struct btrfs_free_space
, list
);
994 ret
= io_ctl_add_bitmap(&io_ctl
, entry
->bitmap
);
997 list_del_init(&entry
->list
);
1000 /* Zero out the rest of the pages just to make sure */
1001 io_ctl_zero_remaining_pages(&io_ctl
);
1003 ret
= btrfs_dirty_pages(root
, inode
, io_ctl
.pages
, io_ctl
.num_pages
,
1004 0, i_size_read(inode
), &cached_state
);
1005 io_ctl_drop_pages(&io_ctl
);
1006 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1007 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1013 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1015 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
1016 key
.offset
= offset
;
1019 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1021 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1022 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1026 leaf
= path
->nodes
[0];
1028 struct btrfs_key found_key
;
1029 ASSERT(path
->slots
[0]);
1031 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1032 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
1033 found_key
.offset
!= offset
) {
1034 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
1036 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
1038 btrfs_release_path(path
);
1043 BTRFS_I(inode
)->generation
= trans
->transid
;
1044 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
1045 struct btrfs_free_space_header
);
1046 btrfs_set_free_space_entries(leaf
, header
, entries
);
1047 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1048 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1049 btrfs_mark_buffer_dirty(leaf
);
1050 btrfs_release_path(path
);
1054 io_ctl_free(&io_ctl
);
1056 invalidate_inode_pages2(inode
->i_mapping
);
1057 BTRFS_I(inode
)->generation
= 0;
1059 btrfs_update_inode(trans
, root
, inode
);
1063 list_for_each_safe(pos
, n
, &bitmap_list
) {
1064 struct btrfs_free_space
*entry
=
1065 list_entry(pos
, struct btrfs_free_space
, list
);
1066 list_del_init(&entry
->list
);
1068 io_ctl_drop_pages(&io_ctl
);
1069 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1070 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1074 int btrfs_write_out_cache(struct btrfs_root
*root
,
1075 struct btrfs_trans_handle
*trans
,
1076 struct btrfs_block_group_cache
*block_group
,
1077 struct btrfs_path
*path
)
1079 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1080 struct inode
*inode
;
1083 root
= root
->fs_info
->tree_root
;
1085 spin_lock(&block_group
->lock
);
1086 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1087 spin_unlock(&block_group
->lock
);
1090 spin_unlock(&block_group
->lock
);
1092 inode
= lookup_free_space_inode(root
, block_group
, path
);
1096 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
, trans
,
1097 path
, block_group
->key
.objectid
);
1099 spin_lock(&block_group
->lock
);
1100 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1101 spin_unlock(&block_group
->lock
);
1104 btrfs_err(root
->fs_info
,
1105 "failed to write free space cache for block group %llu",
1106 block_group
->key
.objectid
);
1114 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1117 ASSERT(offset
>= bitmap_start
);
1118 offset
-= bitmap_start
;
1119 return (unsigned long)(div_u64(offset
, unit
));
1122 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1124 return (unsigned long)(div_u64(bytes
, unit
));
1127 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1131 u64 bytes_per_bitmap
;
1133 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1134 bitmap_start
= offset
- ctl
->start
;
1135 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
1136 bitmap_start
*= bytes_per_bitmap
;
1137 bitmap_start
+= ctl
->start
;
1139 return bitmap_start
;
1142 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1143 struct rb_node
*node
, int bitmap
)
1145 struct rb_node
**p
= &root
->rb_node
;
1146 struct rb_node
*parent
= NULL
;
1147 struct btrfs_free_space
*info
;
1151 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1153 if (offset
< info
->offset
) {
1155 } else if (offset
> info
->offset
) {
1156 p
= &(*p
)->rb_right
;
1159 * we could have a bitmap entry and an extent entry
1160 * share the same offset. If this is the case, we want
1161 * the extent entry to always be found first if we do a
1162 * linear search through the tree, since we want to have
1163 * the quickest allocation time, and allocating from an
1164 * extent is faster than allocating from a bitmap. So
1165 * if we're inserting a bitmap and we find an entry at
1166 * this offset, we want to go right, or after this entry
1167 * logically. If we are inserting an extent and we've
1168 * found a bitmap, we want to go left, or before
1176 p
= &(*p
)->rb_right
;
1178 if (!info
->bitmap
) {
1187 rb_link_node(node
, parent
, p
);
1188 rb_insert_color(node
, root
);
1194 * searches the tree for the given offset.
1196 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1197 * want a section that has at least bytes size and comes at or after the given
1200 static struct btrfs_free_space
*
1201 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1202 u64 offset
, int bitmap_only
, int fuzzy
)
1204 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1205 struct btrfs_free_space
*entry
, *prev
= NULL
;
1207 /* find entry that is closest to the 'offset' */
1214 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1217 if (offset
< entry
->offset
)
1219 else if (offset
> entry
->offset
)
1232 * bitmap entry and extent entry may share same offset,
1233 * in that case, bitmap entry comes after extent entry.
1238 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1239 if (entry
->offset
!= offset
)
1242 WARN_ON(!entry
->bitmap
);
1245 if (entry
->bitmap
) {
1247 * if previous extent entry covers the offset,
1248 * we should return it instead of the bitmap entry
1250 n
= rb_prev(&entry
->offset_index
);
1252 prev
= rb_entry(n
, struct btrfs_free_space
,
1254 if (!prev
->bitmap
&&
1255 prev
->offset
+ prev
->bytes
> offset
)
1265 /* find last entry before the 'offset' */
1267 if (entry
->offset
> offset
) {
1268 n
= rb_prev(&entry
->offset_index
);
1270 entry
= rb_entry(n
, struct btrfs_free_space
,
1272 ASSERT(entry
->offset
<= offset
);
1281 if (entry
->bitmap
) {
1282 n
= rb_prev(&entry
->offset_index
);
1284 prev
= rb_entry(n
, struct btrfs_free_space
,
1286 if (!prev
->bitmap
&&
1287 prev
->offset
+ prev
->bytes
> offset
)
1290 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1292 } else if (entry
->offset
+ entry
->bytes
> offset
)
1299 if (entry
->bitmap
) {
1300 if (entry
->offset
+ BITS_PER_BITMAP
*
1304 if (entry
->offset
+ entry
->bytes
> offset
)
1308 n
= rb_next(&entry
->offset_index
);
1311 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1317 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1318 struct btrfs_free_space
*info
)
1320 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1321 ctl
->free_extents
--;
1324 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1325 struct btrfs_free_space
*info
)
1327 __unlink_free_space(ctl
, info
);
1328 ctl
->free_space
-= info
->bytes
;
1331 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1332 struct btrfs_free_space
*info
)
1336 ASSERT(info
->bytes
|| info
->bitmap
);
1337 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1338 &info
->offset_index
, (info
->bitmap
!= NULL
));
1342 ctl
->free_space
+= info
->bytes
;
1343 ctl
->free_extents
++;
1347 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1349 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1353 u64 size
= block_group
->key
.offset
;
1354 u64 bytes_per_bg
= BITS_PER_BITMAP
* ctl
->unit
;
1355 int max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1357 max_bitmaps
= max(max_bitmaps
, 1);
1359 ASSERT(ctl
->total_bitmaps
<= max_bitmaps
);
1362 * The goal is to keep the total amount of memory used per 1gb of space
1363 * at or below 32k, so we need to adjust how much memory we allow to be
1364 * used by extent based free space tracking
1366 if (size
< 1024 * 1024 * 1024)
1367 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1369 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1370 div64_u64(size
, 1024 * 1024 * 1024);
1373 * we want to account for 1 more bitmap than what we have so we can make
1374 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1375 * we add more bitmaps.
1377 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1379 if (bitmap_bytes
>= max_bytes
) {
1380 ctl
->extents_thresh
= 0;
1385 * we want the extent entry threshold to always be at most 1/2 the maxw
1386 * bytes we can have, or whatever is less than that.
1388 extent_bytes
= max_bytes
- bitmap_bytes
;
1389 extent_bytes
= min_t(u64
, extent_bytes
, div64_u64(max_bytes
, 2));
1391 ctl
->extents_thresh
=
1392 div64_u64(extent_bytes
, (sizeof(struct btrfs_free_space
)));
1395 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1396 struct btrfs_free_space
*info
,
1397 u64 offset
, u64 bytes
)
1399 unsigned long start
, count
;
1401 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1402 count
= bytes_to_bits(bytes
, ctl
->unit
);
1403 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1405 bitmap_clear(info
->bitmap
, start
, count
);
1407 info
->bytes
-= bytes
;
1410 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1411 struct btrfs_free_space
*info
, u64 offset
,
1414 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1415 ctl
->free_space
-= bytes
;
1418 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1419 struct btrfs_free_space
*info
, u64 offset
,
1422 unsigned long start
, count
;
1424 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1425 count
= bytes_to_bits(bytes
, ctl
->unit
);
1426 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1428 bitmap_set(info
->bitmap
, start
, count
);
1430 info
->bytes
+= bytes
;
1431 ctl
->free_space
+= bytes
;
1435 * If we can not find suitable extent, we will use bytes to record
1436 * the size of the max extent.
1438 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1439 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1442 unsigned long found_bits
= 0;
1443 unsigned long max_bits
= 0;
1444 unsigned long bits
, i
;
1445 unsigned long next_zero
;
1446 unsigned long extent_bits
;
1448 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1449 max_t(u64
, *offset
, bitmap_info
->offset
));
1450 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1452 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP
) {
1453 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1454 BITS_PER_BITMAP
, i
);
1455 extent_bits
= next_zero
- i
;
1456 if (extent_bits
>= bits
) {
1457 found_bits
= extent_bits
;
1459 } else if (extent_bits
> max_bits
) {
1460 max_bits
= extent_bits
;
1466 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1467 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1471 *bytes
= (u64
)(max_bits
) * ctl
->unit
;
1475 /* Cache the size of the max extent in bytes */
1476 static struct btrfs_free_space
*
1477 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
,
1478 unsigned long align
, u64
*max_extent_size
)
1480 struct btrfs_free_space
*entry
;
1481 struct rb_node
*node
;
1486 if (!ctl
->free_space_offset
.rb_node
)
1489 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1493 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1494 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1495 if (entry
->bytes
< *bytes
) {
1496 if (entry
->bytes
> *max_extent_size
)
1497 *max_extent_size
= entry
->bytes
;
1501 /* make sure the space returned is big enough
1502 * to match our requested alignment
1504 if (*bytes
>= align
) {
1505 tmp
= entry
->offset
- ctl
->start
+ align
- 1;
1507 tmp
= tmp
* align
+ ctl
->start
;
1508 align_off
= tmp
- entry
->offset
;
1511 tmp
= entry
->offset
;
1514 if (entry
->bytes
< *bytes
+ align_off
) {
1515 if (entry
->bytes
> *max_extent_size
)
1516 *max_extent_size
= entry
->bytes
;
1520 if (entry
->bitmap
) {
1523 ret
= search_bitmap(ctl
, entry
, &tmp
, &size
);
1528 } else if (size
> *max_extent_size
) {
1529 *max_extent_size
= size
;
1535 *bytes
= entry
->bytes
- align_off
;
1542 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1543 struct btrfs_free_space
*info
, u64 offset
)
1545 info
->offset
= offset_to_bitmap(ctl
, offset
);
1547 INIT_LIST_HEAD(&info
->list
);
1548 link_free_space(ctl
, info
);
1549 ctl
->total_bitmaps
++;
1551 ctl
->op
->recalc_thresholds(ctl
);
1554 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1555 struct btrfs_free_space
*bitmap_info
)
1557 unlink_free_space(ctl
, bitmap_info
);
1558 kfree(bitmap_info
->bitmap
);
1559 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1560 ctl
->total_bitmaps
--;
1561 ctl
->op
->recalc_thresholds(ctl
);
1564 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1565 struct btrfs_free_space
*bitmap_info
,
1566 u64
*offset
, u64
*bytes
)
1569 u64 search_start
, search_bytes
;
1573 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1576 * We need to search for bits in this bitmap. We could only cover some
1577 * of the extent in this bitmap thanks to how we add space, so we need
1578 * to search for as much as it as we can and clear that amount, and then
1579 * go searching for the next bit.
1581 search_start
= *offset
;
1582 search_bytes
= ctl
->unit
;
1583 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1584 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1585 if (ret
< 0 || search_start
!= *offset
)
1588 /* We may have found more bits than what we need */
1589 search_bytes
= min(search_bytes
, *bytes
);
1591 /* Cannot clear past the end of the bitmap */
1592 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1594 bitmap_clear_bits(ctl
, bitmap_info
, search_start
, search_bytes
);
1595 *offset
+= search_bytes
;
1596 *bytes
-= search_bytes
;
1599 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1600 if (!bitmap_info
->bytes
)
1601 free_bitmap(ctl
, bitmap_info
);
1604 * no entry after this bitmap, but we still have bytes to
1605 * remove, so something has gone wrong.
1610 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1614 * if the next entry isn't a bitmap we need to return to let the
1615 * extent stuff do its work.
1617 if (!bitmap_info
->bitmap
)
1621 * Ok the next item is a bitmap, but it may not actually hold
1622 * the information for the rest of this free space stuff, so
1623 * look for it, and if we don't find it return so we can try
1624 * everything over again.
1626 search_start
= *offset
;
1627 search_bytes
= ctl
->unit
;
1628 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1630 if (ret
< 0 || search_start
!= *offset
)
1634 } else if (!bitmap_info
->bytes
)
1635 free_bitmap(ctl
, bitmap_info
);
1640 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1641 struct btrfs_free_space
*info
, u64 offset
,
1644 u64 bytes_to_set
= 0;
1647 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1649 bytes_to_set
= min(end
- offset
, bytes
);
1651 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1653 return bytes_to_set
;
1657 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1658 struct btrfs_free_space
*info
)
1660 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1663 * If we are below the extents threshold then we can add this as an
1664 * extent, and don't have to deal with the bitmap
1666 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1668 * If this block group has some small extents we don't want to
1669 * use up all of our free slots in the cache with them, we want
1670 * to reserve them to larger extents, however if we have plent
1671 * of cache left then go ahead an dadd them, no sense in adding
1672 * the overhead of a bitmap if we don't have to.
1674 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1675 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1683 * The original block groups from mkfs can be really small, like 8
1684 * megabytes, so don't bother with a bitmap for those entries. However
1685 * some block groups can be smaller than what a bitmap would cover but
1686 * are still large enough that they could overflow the 32k memory limit,
1687 * so allow those block groups to still be allowed to have a bitmap
1690 if (((BITS_PER_BITMAP
* ctl
->unit
) >> 1) > block_group
->key
.offset
)
1696 static struct btrfs_free_space_op free_space_op
= {
1697 .recalc_thresholds
= recalculate_thresholds
,
1698 .use_bitmap
= use_bitmap
,
1701 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1702 struct btrfs_free_space
*info
)
1704 struct btrfs_free_space
*bitmap_info
;
1705 struct btrfs_block_group_cache
*block_group
= NULL
;
1707 u64 bytes
, offset
, bytes_added
;
1710 bytes
= info
->bytes
;
1711 offset
= info
->offset
;
1713 if (!ctl
->op
->use_bitmap(ctl
, info
))
1716 if (ctl
->op
== &free_space_op
)
1717 block_group
= ctl
->private;
1720 * Since we link bitmaps right into the cluster we need to see if we
1721 * have a cluster here, and if so and it has our bitmap we need to add
1722 * the free space to that bitmap.
1724 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
1725 struct btrfs_free_cluster
*cluster
;
1726 struct rb_node
*node
;
1727 struct btrfs_free_space
*entry
;
1729 cluster
= list_entry(block_group
->cluster_list
.next
,
1730 struct btrfs_free_cluster
,
1732 spin_lock(&cluster
->lock
);
1733 node
= rb_first(&cluster
->root
);
1735 spin_unlock(&cluster
->lock
);
1736 goto no_cluster_bitmap
;
1739 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1740 if (!entry
->bitmap
) {
1741 spin_unlock(&cluster
->lock
);
1742 goto no_cluster_bitmap
;
1745 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
1746 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
1748 bytes
-= bytes_added
;
1749 offset
+= bytes_added
;
1751 spin_unlock(&cluster
->lock
);
1759 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1766 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
1767 bytes
-= bytes_added
;
1768 offset
+= bytes_added
;
1778 if (info
&& info
->bitmap
) {
1779 add_new_bitmap(ctl
, info
, offset
);
1784 spin_unlock(&ctl
->tree_lock
);
1786 /* no pre-allocated info, allocate a new one */
1788 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
1791 spin_lock(&ctl
->tree_lock
);
1797 /* allocate the bitmap */
1798 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
1799 spin_lock(&ctl
->tree_lock
);
1800 if (!info
->bitmap
) {
1810 kfree(info
->bitmap
);
1811 kmem_cache_free(btrfs_free_space_cachep
, info
);
1817 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
1818 struct btrfs_free_space
*info
, bool update_stat
)
1820 struct btrfs_free_space
*left_info
;
1821 struct btrfs_free_space
*right_info
;
1822 bool merged
= false;
1823 u64 offset
= info
->offset
;
1824 u64 bytes
= info
->bytes
;
1827 * first we want to see if there is free space adjacent to the range we
1828 * are adding, if there is remove that struct and add a new one to
1829 * cover the entire range
1831 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
1832 if (right_info
&& rb_prev(&right_info
->offset_index
))
1833 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
1834 struct btrfs_free_space
, offset_index
);
1836 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
1838 if (right_info
&& !right_info
->bitmap
) {
1840 unlink_free_space(ctl
, right_info
);
1842 __unlink_free_space(ctl
, right_info
);
1843 info
->bytes
+= right_info
->bytes
;
1844 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
1848 if (left_info
&& !left_info
->bitmap
&&
1849 left_info
->offset
+ left_info
->bytes
== offset
) {
1851 unlink_free_space(ctl
, left_info
);
1853 __unlink_free_space(ctl
, left_info
);
1854 info
->offset
= left_info
->offset
;
1855 info
->bytes
+= left_info
->bytes
;
1856 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
1863 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
1864 u64 offset
, u64 bytes
)
1866 struct btrfs_free_space
*info
;
1869 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
1873 info
->offset
= offset
;
1874 info
->bytes
= bytes
;
1876 spin_lock(&ctl
->tree_lock
);
1878 if (try_merge_free_space(ctl
, info
, true))
1882 * There was no extent directly to the left or right of this new
1883 * extent then we know we're going to have to allocate a new extent, so
1884 * before we do that see if we need to drop this into a bitmap
1886 ret
= insert_into_bitmap(ctl
, info
);
1894 ret
= link_free_space(ctl
, info
);
1896 kmem_cache_free(btrfs_free_space_cachep
, info
);
1898 spin_unlock(&ctl
->tree_lock
);
1901 printk(KERN_CRIT
"btrfs: unable to add free space :%d\n", ret
);
1902 ASSERT(ret
!= -EEXIST
);
1908 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
1909 u64 offset
, u64 bytes
)
1911 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1912 struct btrfs_free_space
*info
;
1914 bool re_search
= false;
1916 spin_lock(&ctl
->tree_lock
);
1923 info
= tree_search_offset(ctl
, offset
, 0, 0);
1926 * oops didn't find an extent that matched the space we wanted
1927 * to remove, look for a bitmap instead
1929 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1933 * If we found a partial bit of our free space in a
1934 * bitmap but then couldn't find the other part this may
1935 * be a problem, so WARN about it.
1943 if (!info
->bitmap
) {
1944 unlink_free_space(ctl
, info
);
1945 if (offset
== info
->offset
) {
1946 u64 to_free
= min(bytes
, info
->bytes
);
1948 info
->bytes
-= to_free
;
1949 info
->offset
+= to_free
;
1951 ret
= link_free_space(ctl
, info
);
1954 kmem_cache_free(btrfs_free_space_cachep
, info
);
1961 u64 old_end
= info
->bytes
+ info
->offset
;
1963 info
->bytes
= offset
- info
->offset
;
1964 ret
= link_free_space(ctl
, info
);
1969 /* Not enough bytes in this entry to satisfy us */
1970 if (old_end
< offset
+ bytes
) {
1971 bytes
-= old_end
- offset
;
1974 } else if (old_end
== offset
+ bytes
) {
1978 spin_unlock(&ctl
->tree_lock
);
1980 ret
= btrfs_add_free_space(block_group
, offset
+ bytes
,
1981 old_end
- (offset
+ bytes
));
1987 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
1988 if (ret
== -EAGAIN
) {
1993 spin_unlock(&ctl
->tree_lock
);
1998 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
2001 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2002 struct btrfs_free_space
*info
;
2006 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
2007 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
2008 if (info
->bytes
>= bytes
&& !block_group
->ro
)
2010 printk(KERN_CRIT
"entry offset %llu, bytes %llu, bitmap %s\n",
2011 info
->offset
, info
->bytes
,
2012 (info
->bitmap
) ? "yes" : "no");
2014 printk(KERN_INFO
"block group has cluster?: %s\n",
2015 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2016 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
2020 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2022 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2024 spin_lock_init(&ctl
->tree_lock
);
2025 ctl
->unit
= block_group
->sectorsize
;
2026 ctl
->start
= block_group
->key
.objectid
;
2027 ctl
->private = block_group
;
2028 ctl
->op
= &free_space_op
;
2031 * we only want to have 32k of ram per block group for keeping
2032 * track of free space, and if we pass 1/2 of that we want to
2033 * start converting things over to using bitmaps
2035 ctl
->extents_thresh
= ((1024 * 32) / 2) /
2036 sizeof(struct btrfs_free_space
);
2040 * for a given cluster, put all of its extents back into the free
2041 * space cache. If the block group passed doesn't match the block group
2042 * pointed to by the cluster, someone else raced in and freed the
2043 * cluster already. In that case, we just return without changing anything
2046 __btrfs_return_cluster_to_free_space(
2047 struct btrfs_block_group_cache
*block_group
,
2048 struct btrfs_free_cluster
*cluster
)
2050 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2051 struct btrfs_free_space
*entry
;
2052 struct rb_node
*node
;
2054 spin_lock(&cluster
->lock
);
2055 if (cluster
->block_group
!= block_group
)
2058 cluster
->block_group
= NULL
;
2059 cluster
->window_start
= 0;
2060 list_del_init(&cluster
->block_group_list
);
2062 node
= rb_first(&cluster
->root
);
2066 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2067 node
= rb_next(&entry
->offset_index
);
2068 rb_erase(&entry
->offset_index
, &cluster
->root
);
2070 bitmap
= (entry
->bitmap
!= NULL
);
2072 try_merge_free_space(ctl
, entry
, false);
2073 tree_insert_offset(&ctl
->free_space_offset
,
2074 entry
->offset
, &entry
->offset_index
, bitmap
);
2076 cluster
->root
= RB_ROOT
;
2079 spin_unlock(&cluster
->lock
);
2080 btrfs_put_block_group(block_group
);
2084 static void __btrfs_remove_free_space_cache_locked(
2085 struct btrfs_free_space_ctl
*ctl
)
2087 struct btrfs_free_space
*info
;
2088 struct rb_node
*node
;
2090 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2091 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2092 if (!info
->bitmap
) {
2093 unlink_free_space(ctl
, info
);
2094 kmem_cache_free(btrfs_free_space_cachep
, info
);
2096 free_bitmap(ctl
, info
);
2098 if (need_resched()) {
2099 spin_unlock(&ctl
->tree_lock
);
2101 spin_lock(&ctl
->tree_lock
);
2106 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2108 spin_lock(&ctl
->tree_lock
);
2109 __btrfs_remove_free_space_cache_locked(ctl
);
2110 spin_unlock(&ctl
->tree_lock
);
2113 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2115 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2116 struct btrfs_free_cluster
*cluster
;
2117 struct list_head
*head
;
2119 spin_lock(&ctl
->tree_lock
);
2120 while ((head
= block_group
->cluster_list
.next
) !=
2121 &block_group
->cluster_list
) {
2122 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2125 WARN_ON(cluster
->block_group
!= block_group
);
2126 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2127 if (need_resched()) {
2128 spin_unlock(&ctl
->tree_lock
);
2130 spin_lock(&ctl
->tree_lock
);
2133 __btrfs_remove_free_space_cache_locked(ctl
);
2134 spin_unlock(&ctl
->tree_lock
);
2138 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2139 u64 offset
, u64 bytes
, u64 empty_size
,
2140 u64
*max_extent_size
)
2142 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2143 struct btrfs_free_space
*entry
= NULL
;
2144 u64 bytes_search
= bytes
+ empty_size
;
2147 u64 align_gap_len
= 0;
2149 spin_lock(&ctl
->tree_lock
);
2150 entry
= find_free_space(ctl
, &offset
, &bytes_search
,
2151 block_group
->full_stripe_len
, max_extent_size
);
2156 if (entry
->bitmap
) {
2157 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2159 free_bitmap(ctl
, entry
);
2161 unlink_free_space(ctl
, entry
);
2162 align_gap_len
= offset
- entry
->offset
;
2163 align_gap
= entry
->offset
;
2165 entry
->offset
= offset
+ bytes
;
2166 WARN_ON(entry
->bytes
< bytes
+ align_gap_len
);
2168 entry
->bytes
-= bytes
+ align_gap_len
;
2170 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2172 link_free_space(ctl
, entry
);
2175 spin_unlock(&ctl
->tree_lock
);
2178 __btrfs_add_free_space(ctl
, align_gap
, align_gap_len
);
2183 * given a cluster, put all of its extents back into the free space
2184 * cache. If a block group is passed, this function will only free
2185 * a cluster that belongs to the passed block group.
2187 * Otherwise, it'll get a reference on the block group pointed to by the
2188 * cluster and remove the cluster from it.
2190 int btrfs_return_cluster_to_free_space(
2191 struct btrfs_block_group_cache
*block_group
,
2192 struct btrfs_free_cluster
*cluster
)
2194 struct btrfs_free_space_ctl
*ctl
;
2197 /* first, get a safe pointer to the block group */
2198 spin_lock(&cluster
->lock
);
2200 block_group
= cluster
->block_group
;
2202 spin_unlock(&cluster
->lock
);
2205 } else if (cluster
->block_group
!= block_group
) {
2206 /* someone else has already freed it don't redo their work */
2207 spin_unlock(&cluster
->lock
);
2210 atomic_inc(&block_group
->count
);
2211 spin_unlock(&cluster
->lock
);
2213 ctl
= block_group
->free_space_ctl
;
2215 /* now return any extents the cluster had on it */
2216 spin_lock(&ctl
->tree_lock
);
2217 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2218 spin_unlock(&ctl
->tree_lock
);
2220 /* finally drop our ref */
2221 btrfs_put_block_group(block_group
);
2225 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2226 struct btrfs_free_cluster
*cluster
,
2227 struct btrfs_free_space
*entry
,
2228 u64 bytes
, u64 min_start
,
2229 u64
*max_extent_size
)
2231 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2233 u64 search_start
= cluster
->window_start
;
2234 u64 search_bytes
= bytes
;
2237 search_start
= min_start
;
2238 search_bytes
= bytes
;
2240 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
2242 if (search_bytes
> *max_extent_size
)
2243 *max_extent_size
= search_bytes
;
2248 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2254 * given a cluster, try to allocate 'bytes' from it, returns 0
2255 * if it couldn't find anything suitably large, or a logical disk offset
2256 * if things worked out
2258 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2259 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2260 u64 min_start
, u64
*max_extent_size
)
2262 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2263 struct btrfs_free_space
*entry
= NULL
;
2264 struct rb_node
*node
;
2267 spin_lock(&cluster
->lock
);
2268 if (bytes
> cluster
->max_size
)
2271 if (cluster
->block_group
!= block_group
)
2274 node
= rb_first(&cluster
->root
);
2278 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2280 if (entry
->bytes
< bytes
&& entry
->bytes
> *max_extent_size
)
2281 *max_extent_size
= entry
->bytes
;
2283 if (entry
->bytes
< bytes
||
2284 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2285 node
= rb_next(&entry
->offset_index
);
2288 entry
= rb_entry(node
, struct btrfs_free_space
,
2293 if (entry
->bitmap
) {
2294 ret
= btrfs_alloc_from_bitmap(block_group
,
2295 cluster
, entry
, bytes
,
2296 cluster
->window_start
,
2299 node
= rb_next(&entry
->offset_index
);
2302 entry
= rb_entry(node
, struct btrfs_free_space
,
2306 cluster
->window_start
+= bytes
;
2308 ret
= entry
->offset
;
2310 entry
->offset
+= bytes
;
2311 entry
->bytes
-= bytes
;
2314 if (entry
->bytes
== 0)
2315 rb_erase(&entry
->offset_index
, &cluster
->root
);
2319 spin_unlock(&cluster
->lock
);
2324 spin_lock(&ctl
->tree_lock
);
2326 ctl
->free_space
-= bytes
;
2327 if (entry
->bytes
== 0) {
2328 ctl
->free_extents
--;
2329 if (entry
->bitmap
) {
2330 kfree(entry
->bitmap
);
2331 ctl
->total_bitmaps
--;
2332 ctl
->op
->recalc_thresholds(ctl
);
2334 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2337 spin_unlock(&ctl
->tree_lock
);
2342 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2343 struct btrfs_free_space
*entry
,
2344 struct btrfs_free_cluster
*cluster
,
2345 u64 offset
, u64 bytes
,
2346 u64 cont1_bytes
, u64 min_bytes
)
2348 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2349 unsigned long next_zero
;
2351 unsigned long want_bits
;
2352 unsigned long min_bits
;
2353 unsigned long found_bits
;
2354 unsigned long start
= 0;
2355 unsigned long total_found
= 0;
2358 i
= offset_to_bit(entry
->offset
, ctl
->unit
,
2359 max_t(u64
, offset
, entry
->offset
));
2360 want_bits
= bytes_to_bits(bytes
, ctl
->unit
);
2361 min_bits
= bytes_to_bits(min_bytes
, ctl
->unit
);
2365 for_each_set_bit_from(i
, entry
->bitmap
, BITS_PER_BITMAP
) {
2366 next_zero
= find_next_zero_bit(entry
->bitmap
,
2367 BITS_PER_BITMAP
, i
);
2368 if (next_zero
- i
>= min_bits
) {
2369 found_bits
= next_zero
- i
;
2380 cluster
->max_size
= 0;
2383 total_found
+= found_bits
;
2385 if (cluster
->max_size
< found_bits
* ctl
->unit
)
2386 cluster
->max_size
= found_bits
* ctl
->unit
;
2388 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2393 cluster
->window_start
= start
* ctl
->unit
+ entry
->offset
;
2394 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2395 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2396 &entry
->offset_index
, 1);
2397 ASSERT(!ret
); /* -EEXIST; Logic error */
2399 trace_btrfs_setup_cluster(block_group
, cluster
,
2400 total_found
* ctl
->unit
, 1);
2405 * This searches the block group for just extents to fill the cluster with.
2406 * Try to find a cluster with at least bytes total bytes, at least one
2407 * extent of cont1_bytes, and other clusters of at least min_bytes.
2410 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2411 struct btrfs_free_cluster
*cluster
,
2412 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2413 u64 cont1_bytes
, u64 min_bytes
)
2415 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2416 struct btrfs_free_space
*first
= NULL
;
2417 struct btrfs_free_space
*entry
= NULL
;
2418 struct btrfs_free_space
*last
;
2419 struct rb_node
*node
;
2425 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2430 * We don't want bitmaps, so just move along until we find a normal
2433 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2434 if (entry
->bitmap
&& list_empty(&entry
->list
))
2435 list_add_tail(&entry
->list
, bitmaps
);
2436 node
= rb_next(&entry
->offset_index
);
2439 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2442 window_start
= entry
->offset
;
2443 window_free
= entry
->bytes
;
2444 max_extent
= entry
->bytes
;
2448 for (node
= rb_next(&entry
->offset_index
); node
;
2449 node
= rb_next(&entry
->offset_index
)) {
2450 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2452 if (entry
->bitmap
) {
2453 if (list_empty(&entry
->list
))
2454 list_add_tail(&entry
->list
, bitmaps
);
2458 if (entry
->bytes
< min_bytes
)
2462 window_free
+= entry
->bytes
;
2463 if (entry
->bytes
> max_extent
)
2464 max_extent
= entry
->bytes
;
2467 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2470 cluster
->window_start
= first
->offset
;
2472 node
= &first
->offset_index
;
2475 * now we've found our entries, pull them out of the free space
2476 * cache and put them into the cluster rbtree
2481 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2482 node
= rb_next(&entry
->offset_index
);
2483 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2486 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2487 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2488 &entry
->offset_index
, 0);
2489 total_size
+= entry
->bytes
;
2490 ASSERT(!ret
); /* -EEXIST; Logic error */
2491 } while (node
&& entry
!= last
);
2493 cluster
->max_size
= max_extent
;
2494 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2499 * This specifically looks for bitmaps that may work in the cluster, we assume
2500 * that we have already failed to find extents that will work.
2503 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2504 struct btrfs_free_cluster
*cluster
,
2505 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2506 u64 cont1_bytes
, u64 min_bytes
)
2508 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2509 struct btrfs_free_space
*entry
;
2511 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
2513 if (ctl
->total_bitmaps
== 0)
2517 * The bitmap that covers offset won't be in the list unless offset
2518 * is just its start offset.
2520 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
2521 if (entry
->offset
!= bitmap_offset
) {
2522 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2523 if (entry
&& list_empty(&entry
->list
))
2524 list_add(&entry
->list
, bitmaps
);
2527 list_for_each_entry(entry
, bitmaps
, list
) {
2528 if (entry
->bytes
< bytes
)
2530 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2531 bytes
, cont1_bytes
, min_bytes
);
2537 * The bitmaps list has all the bitmaps that record free space
2538 * starting after offset, so no more search is required.
2544 * here we try to find a cluster of blocks in a block group. The goal
2545 * is to find at least bytes+empty_size.
2546 * We might not find them all in one contiguous area.
2548 * returns zero and sets up cluster if things worked out, otherwise
2549 * it returns -enospc
2551 int btrfs_find_space_cluster(struct btrfs_root
*root
,
2552 struct btrfs_block_group_cache
*block_group
,
2553 struct btrfs_free_cluster
*cluster
,
2554 u64 offset
, u64 bytes
, u64 empty_size
)
2556 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2557 struct btrfs_free_space
*entry
, *tmp
;
2564 * Choose the minimum extent size we'll require for this
2565 * cluster. For SSD_SPREAD, don't allow any fragmentation.
2566 * For metadata, allow allocates with smaller extents. For
2567 * data, keep it dense.
2569 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2570 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
2571 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2572 cont1_bytes
= bytes
;
2573 min_bytes
= block_group
->sectorsize
;
2575 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2576 min_bytes
= block_group
->sectorsize
;
2579 spin_lock(&ctl
->tree_lock
);
2582 * If we know we don't have enough space to make a cluster don't even
2583 * bother doing all the work to try and find one.
2585 if (ctl
->free_space
< bytes
) {
2586 spin_unlock(&ctl
->tree_lock
);
2590 spin_lock(&cluster
->lock
);
2592 /* someone already found a cluster, hooray */
2593 if (cluster
->block_group
) {
2598 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
2601 INIT_LIST_HEAD(&bitmaps
);
2602 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
2604 cont1_bytes
, min_bytes
);
2606 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
2607 offset
, bytes
+ empty_size
,
2608 cont1_bytes
, min_bytes
);
2610 /* Clear our temporary list */
2611 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
2612 list_del_init(&entry
->list
);
2615 atomic_inc(&block_group
->count
);
2616 list_add_tail(&cluster
->block_group_list
,
2617 &block_group
->cluster_list
);
2618 cluster
->block_group
= block_group
;
2620 trace_btrfs_failed_cluster_setup(block_group
);
2623 spin_unlock(&cluster
->lock
);
2624 spin_unlock(&ctl
->tree_lock
);
2630 * simple code to zero out a cluster
2632 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
2634 spin_lock_init(&cluster
->lock
);
2635 spin_lock_init(&cluster
->refill_lock
);
2636 cluster
->root
= RB_ROOT
;
2637 cluster
->max_size
= 0;
2638 INIT_LIST_HEAD(&cluster
->block_group_list
);
2639 cluster
->block_group
= NULL
;
2642 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
2643 u64
*total_trimmed
, u64 start
, u64 bytes
,
2644 u64 reserved_start
, u64 reserved_bytes
)
2646 struct btrfs_space_info
*space_info
= block_group
->space_info
;
2647 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2652 spin_lock(&space_info
->lock
);
2653 spin_lock(&block_group
->lock
);
2654 if (!block_group
->ro
) {
2655 block_group
->reserved
+= reserved_bytes
;
2656 space_info
->bytes_reserved
+= reserved_bytes
;
2659 spin_unlock(&block_group
->lock
);
2660 spin_unlock(&space_info
->lock
);
2662 ret
= btrfs_error_discard_extent(fs_info
->extent_root
,
2663 start
, bytes
, &trimmed
);
2665 *total_trimmed
+= trimmed
;
2667 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
2670 spin_lock(&space_info
->lock
);
2671 spin_lock(&block_group
->lock
);
2672 if (block_group
->ro
)
2673 space_info
->bytes_readonly
+= reserved_bytes
;
2674 block_group
->reserved
-= reserved_bytes
;
2675 space_info
->bytes_reserved
-= reserved_bytes
;
2676 spin_unlock(&space_info
->lock
);
2677 spin_unlock(&block_group
->lock
);
2683 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2684 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
2686 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2687 struct btrfs_free_space
*entry
;
2688 struct rb_node
*node
;
2694 while (start
< end
) {
2695 spin_lock(&ctl
->tree_lock
);
2697 if (ctl
->free_space
< minlen
) {
2698 spin_unlock(&ctl
->tree_lock
);
2702 entry
= tree_search_offset(ctl
, start
, 0, 1);
2704 spin_unlock(&ctl
->tree_lock
);
2709 while (entry
->bitmap
) {
2710 node
= rb_next(&entry
->offset_index
);
2712 spin_unlock(&ctl
->tree_lock
);
2715 entry
= rb_entry(node
, struct btrfs_free_space
,
2719 if (entry
->offset
>= end
) {
2720 spin_unlock(&ctl
->tree_lock
);
2724 extent_start
= entry
->offset
;
2725 extent_bytes
= entry
->bytes
;
2726 start
= max(start
, extent_start
);
2727 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
2728 if (bytes
< minlen
) {
2729 spin_unlock(&ctl
->tree_lock
);
2733 unlink_free_space(ctl
, entry
);
2734 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2736 spin_unlock(&ctl
->tree_lock
);
2738 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
2739 extent_start
, extent_bytes
);
2745 if (fatal_signal_pending(current
)) {
2756 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
2757 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
2759 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2760 struct btrfs_free_space
*entry
;
2764 u64 offset
= offset_to_bitmap(ctl
, start
);
2766 while (offset
< end
) {
2767 bool next_bitmap
= false;
2769 spin_lock(&ctl
->tree_lock
);
2771 if (ctl
->free_space
< minlen
) {
2772 spin_unlock(&ctl
->tree_lock
);
2776 entry
= tree_search_offset(ctl
, offset
, 1, 0);
2778 spin_unlock(&ctl
->tree_lock
);
2784 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
);
2785 if (ret2
|| start
>= end
) {
2786 spin_unlock(&ctl
->tree_lock
);
2791 bytes
= min(bytes
, end
- start
);
2792 if (bytes
< minlen
) {
2793 spin_unlock(&ctl
->tree_lock
);
2797 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
2798 if (entry
->bytes
== 0)
2799 free_bitmap(ctl
, entry
);
2801 spin_unlock(&ctl
->tree_lock
);
2803 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
2809 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
2812 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
2813 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
2816 if (fatal_signal_pending(current
)) {
2827 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
2828 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
2834 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
2838 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
2844 * Find the left-most item in the cache tree, and then return the
2845 * smallest inode number in the item.
2847 * Note: the returned inode number may not be the smallest one in
2848 * the tree, if the left-most item is a bitmap.
2850 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
2852 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
2853 struct btrfs_free_space
*entry
= NULL
;
2856 spin_lock(&ctl
->tree_lock
);
2858 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
2861 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
2862 struct btrfs_free_space
, offset_index
);
2864 if (!entry
->bitmap
) {
2865 ino
= entry
->offset
;
2867 unlink_free_space(ctl
, entry
);
2871 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2873 link_free_space(ctl
, entry
);
2879 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
2880 /* Logic error; Should be empty if it can't find anything */
2884 bitmap_clear_bits(ctl
, entry
, offset
, 1);
2885 if (entry
->bytes
== 0)
2886 free_bitmap(ctl
, entry
);
2889 spin_unlock(&ctl
->tree_lock
);
2894 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
2895 struct btrfs_path
*path
)
2897 struct inode
*inode
= NULL
;
2899 spin_lock(&root
->cache_lock
);
2900 if (root
->cache_inode
)
2901 inode
= igrab(root
->cache_inode
);
2902 spin_unlock(&root
->cache_lock
);
2906 inode
= __lookup_free_space_inode(root
, path
, 0);
2910 spin_lock(&root
->cache_lock
);
2911 if (!btrfs_fs_closing(root
->fs_info
))
2912 root
->cache_inode
= igrab(inode
);
2913 spin_unlock(&root
->cache_lock
);
2918 int create_free_ino_inode(struct btrfs_root
*root
,
2919 struct btrfs_trans_handle
*trans
,
2920 struct btrfs_path
*path
)
2922 return __create_free_space_inode(root
, trans
, path
,
2923 BTRFS_FREE_INO_OBJECTID
, 0);
2926 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2928 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2929 struct btrfs_path
*path
;
2930 struct inode
*inode
;
2932 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
2934 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2938 * If we're unmounting then just return, since this does a search on the
2939 * normal root and not the commit root and we could deadlock.
2941 if (btrfs_fs_closing(fs_info
))
2944 path
= btrfs_alloc_path();
2948 inode
= lookup_free_ino_inode(root
, path
);
2952 if (root_gen
!= BTRFS_I(inode
)->generation
)
2955 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
2959 "failed to load free ino cache for root %llu",
2960 root
->root_key
.objectid
);
2964 btrfs_free_path(path
);
2968 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
2969 struct btrfs_trans_handle
*trans
,
2970 struct btrfs_path
*path
)
2972 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2973 struct inode
*inode
;
2976 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2979 inode
= lookup_free_ino_inode(root
, path
);
2983 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, trans
, path
, 0);
2985 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
2987 btrfs_err(root
->fs_info
,
2988 "failed to write free ino cache for root %llu",
2989 root
->root_key
.objectid
);
2997 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
2999 * Use this if you need to make a bitmap or extent entry specifically, it
3000 * doesn't do any of the merging that add_free_space does, this acts a lot like
3001 * how the free space cache loading stuff works, so you can get really weird
3004 int test_add_free_space_entry(struct btrfs_block_group_cache
*cache
,
3005 u64 offset
, u64 bytes
, bool bitmap
)
3007 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3008 struct btrfs_free_space
*info
= NULL
, *bitmap_info
;
3015 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
3021 spin_lock(&ctl
->tree_lock
);
3022 info
->offset
= offset
;
3023 info
->bytes
= bytes
;
3024 ret
= link_free_space(ctl
, info
);
3025 spin_unlock(&ctl
->tree_lock
);
3027 kmem_cache_free(btrfs_free_space_cachep
, info
);
3032 map
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
3034 kmem_cache_free(btrfs_free_space_cachep
, info
);
3039 spin_lock(&ctl
->tree_lock
);
3040 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3045 add_new_bitmap(ctl
, info
, offset
);
3049 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
3050 bytes
-= bytes_added
;
3051 offset
+= bytes_added
;
3052 spin_unlock(&ctl
->tree_lock
);
3063 * Checks to see if the given range is in the free space cache. This is really
3064 * just used to check the absence of space, so if there is free space in the
3065 * range at all we will return 1.
3067 int test_check_exists(struct btrfs_block_group_cache
*cache
,
3068 u64 offset
, u64 bytes
)
3070 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3071 struct btrfs_free_space
*info
;
3074 spin_lock(&ctl
->tree_lock
);
3075 info
= tree_search_offset(ctl
, offset
, 0, 0);
3077 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3085 u64 bit_off
, bit_bytes
;
3087 struct btrfs_free_space
*tmp
;
3090 bit_bytes
= ctl
->unit
;
3091 ret
= search_bitmap(ctl
, info
, &bit_off
, &bit_bytes
);
3093 if (bit_off
== offset
) {
3096 } else if (bit_off
> offset
&&
3097 offset
+ bytes
> bit_off
) {
3103 n
= rb_prev(&info
->offset_index
);
3105 tmp
= rb_entry(n
, struct btrfs_free_space
,
3107 if (tmp
->offset
+ tmp
->bytes
< offset
)
3109 if (offset
+ bytes
< tmp
->offset
) {
3110 n
= rb_prev(&info
->offset_index
);
3117 n
= rb_next(&info
->offset_index
);
3119 tmp
= rb_entry(n
, struct btrfs_free_space
,
3121 if (offset
+ bytes
< tmp
->offset
)
3123 if (tmp
->offset
+ tmp
->bytes
< offset
) {
3124 n
= rb_next(&info
->offset_index
);
3134 if (info
->offset
== offset
) {
3139 if (offset
> info
->offset
&& offset
< info
->offset
+ info
->bytes
)
3142 spin_unlock(&ctl
->tree_lock
);
3145 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */