1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Red Hat. All rights reserved.
6 #include <linux/pagemap.h>
7 #include <linux/sched.h>
8 #include <linux/sched/signal.h>
9 #include <linux/slab.h>
10 #include <linux/math64.h>
11 #include <linux/ratelimit.h>
12 #include <linux/error-injection.h>
14 #include "free-space-cache.h"
15 #include "transaction.h"
17 #include "extent_io.h"
18 #include "inode-map.h"
21 #define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
22 #define MAX_CACHE_BYTES_PER_GIG SZ_32K
24 struct btrfs_trim_range
{
27 struct list_head list
;
30 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
31 struct btrfs_free_space
*info
);
32 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
33 struct btrfs_free_space
*info
);
34 static int btrfs_wait_cache_io_root(struct btrfs_root
*root
,
35 struct btrfs_trans_handle
*trans
,
36 struct btrfs_io_ctl
*io_ctl
,
37 struct btrfs_path
*path
);
39 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
40 struct btrfs_path
*path
,
43 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
45 struct btrfs_key location
;
46 struct btrfs_disk_key disk_key
;
47 struct btrfs_free_space_header
*header
;
48 struct extent_buffer
*leaf
;
49 struct inode
*inode
= NULL
;
52 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
56 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
60 btrfs_release_path(path
);
61 return ERR_PTR(-ENOENT
);
64 leaf
= path
->nodes
[0];
65 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
66 struct btrfs_free_space_header
);
67 btrfs_free_space_key(leaf
, header
, &disk_key
);
68 btrfs_disk_key_to_cpu(&location
, &disk_key
);
69 btrfs_release_path(path
);
71 inode
= btrfs_iget(fs_info
->sb
, &location
, root
, NULL
);
74 if (is_bad_inode(inode
)) {
76 return ERR_PTR(-ENOENT
);
79 mapping_set_gfp_mask(inode
->i_mapping
,
80 mapping_gfp_constraint(inode
->i_mapping
,
81 ~(__GFP_FS
| __GFP_HIGHMEM
)));
86 struct inode
*lookup_free_space_inode(struct btrfs_fs_info
*fs_info
,
87 struct btrfs_block_group_cache
88 *block_group
, struct btrfs_path
*path
)
90 struct inode
*inode
= NULL
;
91 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
93 spin_lock(&block_group
->lock
);
94 if (block_group
->inode
)
95 inode
= igrab(block_group
->inode
);
96 spin_unlock(&block_group
->lock
);
100 inode
= __lookup_free_space_inode(fs_info
->tree_root
, path
,
101 block_group
->key
.objectid
);
105 spin_lock(&block_group
->lock
);
106 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
107 btrfs_info(fs_info
, "Old style space inode found, converting.");
108 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
109 BTRFS_INODE_NODATACOW
;
110 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
113 if (!block_group
->iref
) {
114 block_group
->inode
= igrab(inode
);
115 block_group
->iref
= 1;
117 spin_unlock(&block_group
->lock
);
122 static int __create_free_space_inode(struct btrfs_root
*root
,
123 struct btrfs_trans_handle
*trans
,
124 struct btrfs_path
*path
,
127 struct btrfs_key key
;
128 struct btrfs_disk_key disk_key
;
129 struct btrfs_free_space_header
*header
;
130 struct btrfs_inode_item
*inode_item
;
131 struct extent_buffer
*leaf
;
132 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
135 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
139 /* We inline crc's for the free disk space cache */
140 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
141 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
143 leaf
= path
->nodes
[0];
144 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
145 struct btrfs_inode_item
);
146 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
147 memzero_extent_buffer(leaf
, (unsigned long)inode_item
,
148 sizeof(*inode_item
));
149 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
150 btrfs_set_inode_size(leaf
, inode_item
, 0);
151 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
152 btrfs_set_inode_uid(leaf
, inode_item
, 0);
153 btrfs_set_inode_gid(leaf
, inode_item
, 0);
154 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
155 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
156 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
157 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
158 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
159 btrfs_mark_buffer_dirty(leaf
);
160 btrfs_release_path(path
);
162 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
165 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
166 sizeof(struct btrfs_free_space_header
));
168 btrfs_release_path(path
);
172 leaf
= path
->nodes
[0];
173 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
174 struct btrfs_free_space_header
);
175 memzero_extent_buffer(leaf
, (unsigned long)header
, sizeof(*header
));
176 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
177 btrfs_mark_buffer_dirty(leaf
);
178 btrfs_release_path(path
);
183 int create_free_space_inode(struct btrfs_fs_info
*fs_info
,
184 struct btrfs_trans_handle
*trans
,
185 struct btrfs_block_group_cache
*block_group
,
186 struct btrfs_path
*path
)
191 ret
= btrfs_find_free_objectid(fs_info
->tree_root
, &ino
);
195 return __create_free_space_inode(fs_info
->tree_root
, trans
, path
, ino
,
196 block_group
->key
.objectid
);
199 int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info
*fs_info
,
200 struct btrfs_block_rsv
*rsv
)
205 /* 1 for slack space, 1 for updating the inode */
206 needed_bytes
= btrfs_calc_trunc_metadata_size(fs_info
, 1) +
207 btrfs_calc_trans_metadata_size(fs_info
, 1);
209 spin_lock(&rsv
->lock
);
210 if (rsv
->reserved
< needed_bytes
)
214 spin_unlock(&rsv
->lock
);
218 int btrfs_truncate_free_space_cache(struct btrfs_trans_handle
*trans
,
219 struct btrfs_block_group_cache
*block_group
,
222 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
227 struct btrfs_path
*path
= btrfs_alloc_path();
234 mutex_lock(&trans
->transaction
->cache_write_mutex
);
235 if (!list_empty(&block_group
->io_list
)) {
236 list_del_init(&block_group
->io_list
);
238 btrfs_wait_cache_io(trans
, block_group
, path
);
239 btrfs_put_block_group(block_group
);
243 * now that we've truncated the cache away, its no longer
246 spin_lock(&block_group
->lock
);
247 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
248 spin_unlock(&block_group
->lock
);
249 btrfs_free_path(path
);
252 btrfs_i_size_write(BTRFS_I(inode
), 0);
253 truncate_pagecache(inode
, 0);
256 * We don't need an orphan item because truncating the free space cache
257 * will never be split across transactions.
258 * We don't need to check for -EAGAIN because we're a free space
261 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
262 0, BTRFS_EXTENT_DATA_KEY
);
266 ret
= btrfs_update_inode(trans
, root
, inode
);
270 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
272 btrfs_abort_transaction(trans
, ret
);
277 static void readahead_cache(struct inode
*inode
)
279 struct file_ra_state
*ra
;
280 unsigned long last_index
;
282 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
286 file_ra_state_init(ra
, inode
->i_mapping
);
287 last_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
289 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
294 static int io_ctl_init(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
300 num_pages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
302 if (btrfs_ino(BTRFS_I(inode
)) != BTRFS_FREE_INO_OBJECTID
)
305 /* Make sure we can fit our crcs into the first page */
306 if (write
&& check_crcs
&&
307 (num_pages
* sizeof(u32
)) >= PAGE_SIZE
)
310 memset(io_ctl
, 0, sizeof(struct btrfs_io_ctl
));
312 io_ctl
->pages
= kcalloc(num_pages
, sizeof(struct page
*), GFP_NOFS
);
316 io_ctl
->num_pages
= num_pages
;
317 io_ctl
->fs_info
= btrfs_sb(inode
->i_sb
);
318 io_ctl
->check_crcs
= check_crcs
;
319 io_ctl
->inode
= inode
;
323 ALLOW_ERROR_INJECTION(io_ctl_init
, ERRNO
);
325 static void io_ctl_free(struct btrfs_io_ctl
*io_ctl
)
327 kfree(io_ctl
->pages
);
328 io_ctl
->pages
= NULL
;
331 static void io_ctl_unmap_page(struct btrfs_io_ctl
*io_ctl
)
339 static void io_ctl_map_page(struct btrfs_io_ctl
*io_ctl
, int clear
)
341 ASSERT(io_ctl
->index
< io_ctl
->num_pages
);
342 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
343 io_ctl
->cur
= page_address(io_ctl
->page
);
344 io_ctl
->orig
= io_ctl
->cur
;
345 io_ctl
->size
= PAGE_SIZE
;
347 clear_page(io_ctl
->cur
);
350 static void io_ctl_drop_pages(struct btrfs_io_ctl
*io_ctl
)
354 io_ctl_unmap_page(io_ctl
);
356 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
357 if (io_ctl
->pages
[i
]) {
358 ClearPageChecked(io_ctl
->pages
[i
]);
359 unlock_page(io_ctl
->pages
[i
]);
360 put_page(io_ctl
->pages
[i
]);
365 static int io_ctl_prepare_pages(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
369 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
372 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
373 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
375 io_ctl_drop_pages(io_ctl
);
378 io_ctl
->pages
[i
] = page
;
379 if (uptodate
&& !PageUptodate(page
)) {
380 btrfs_readpage(NULL
, page
);
382 if (!PageUptodate(page
)) {
383 btrfs_err(BTRFS_I(inode
)->root
->fs_info
,
384 "error reading free space cache");
385 io_ctl_drop_pages(io_ctl
);
391 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
392 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
393 set_page_extent_mapped(io_ctl
->pages
[i
]);
399 static void io_ctl_set_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
403 io_ctl_map_page(io_ctl
, 1);
406 * Skip the csum areas. If we don't check crcs then we just have a
407 * 64bit chunk at the front of the first page.
409 if (io_ctl
->check_crcs
) {
410 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
411 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
413 io_ctl
->cur
+= sizeof(u64
);
414 io_ctl
->size
-= sizeof(u64
) * 2;
418 *val
= cpu_to_le64(generation
);
419 io_ctl
->cur
+= sizeof(u64
);
422 static int io_ctl_check_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
427 * Skip the crc area. If we don't check crcs then we just have a 64bit
428 * chunk at the front of the first page.
430 if (io_ctl
->check_crcs
) {
431 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
432 io_ctl
->size
-= sizeof(u64
) +
433 (sizeof(u32
) * io_ctl
->num_pages
);
435 io_ctl
->cur
+= sizeof(u64
);
436 io_ctl
->size
-= sizeof(u64
) * 2;
440 if (le64_to_cpu(*gen
) != generation
) {
441 btrfs_err_rl(io_ctl
->fs_info
,
442 "space cache generation (%llu) does not match inode (%llu)",
444 io_ctl_unmap_page(io_ctl
);
447 io_ctl
->cur
+= sizeof(u64
);
451 static void io_ctl_set_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
457 if (!io_ctl
->check_crcs
) {
458 io_ctl_unmap_page(io_ctl
);
463 offset
= sizeof(u32
) * io_ctl
->num_pages
;
465 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
467 btrfs_csum_final(crc
, (u8
*)&crc
);
468 io_ctl_unmap_page(io_ctl
);
469 tmp
= page_address(io_ctl
->pages
[0]);
474 static int io_ctl_check_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
480 if (!io_ctl
->check_crcs
) {
481 io_ctl_map_page(io_ctl
, 0);
486 offset
= sizeof(u32
) * io_ctl
->num_pages
;
488 tmp
= page_address(io_ctl
->pages
[0]);
492 io_ctl_map_page(io_ctl
, 0);
493 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
495 btrfs_csum_final(crc
, (u8
*)&crc
);
497 btrfs_err_rl(io_ctl
->fs_info
,
498 "csum mismatch on free space cache");
499 io_ctl_unmap_page(io_ctl
);
506 static int io_ctl_add_entry(struct btrfs_io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
509 struct btrfs_free_space_entry
*entry
;
515 entry
->offset
= cpu_to_le64(offset
);
516 entry
->bytes
= cpu_to_le64(bytes
);
517 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
518 BTRFS_FREE_SPACE_EXTENT
;
519 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
520 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
522 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
525 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
527 /* No more pages to map */
528 if (io_ctl
->index
>= io_ctl
->num_pages
)
531 /* map the next page */
532 io_ctl_map_page(io_ctl
, 1);
536 static int io_ctl_add_bitmap(struct btrfs_io_ctl
*io_ctl
, void *bitmap
)
542 * If we aren't at the start of the current page, unmap this one and
543 * map the next one if there is any left.
545 if (io_ctl
->cur
!= io_ctl
->orig
) {
546 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
547 if (io_ctl
->index
>= io_ctl
->num_pages
)
549 io_ctl_map_page(io_ctl
, 0);
552 memcpy(io_ctl
->cur
, bitmap
, PAGE_SIZE
);
553 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
554 if (io_ctl
->index
< io_ctl
->num_pages
)
555 io_ctl_map_page(io_ctl
, 0);
559 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl
*io_ctl
)
562 * If we're not on the boundary we know we've modified the page and we
563 * need to crc the page.
565 if (io_ctl
->cur
!= io_ctl
->orig
)
566 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
568 io_ctl_unmap_page(io_ctl
);
570 while (io_ctl
->index
< io_ctl
->num_pages
) {
571 io_ctl_map_page(io_ctl
, 1);
572 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
576 static int io_ctl_read_entry(struct btrfs_io_ctl
*io_ctl
,
577 struct btrfs_free_space
*entry
, u8
*type
)
579 struct btrfs_free_space_entry
*e
;
583 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
589 entry
->offset
= le64_to_cpu(e
->offset
);
590 entry
->bytes
= le64_to_cpu(e
->bytes
);
592 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
593 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
595 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
598 io_ctl_unmap_page(io_ctl
);
603 static int io_ctl_read_bitmap(struct btrfs_io_ctl
*io_ctl
,
604 struct btrfs_free_space
*entry
)
608 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
612 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_SIZE
);
613 io_ctl_unmap_page(io_ctl
);
619 * Since we attach pinned extents after the fact we can have contiguous sections
620 * of free space that are split up in entries. This poses a problem with the
621 * tree logging stuff since it could have allocated across what appears to be 2
622 * entries since we would have merged the entries when adding the pinned extents
623 * back to the free space cache. So run through the space cache that we just
624 * loaded and merge contiguous entries. This will make the log replay stuff not
625 * blow up and it will make for nicer allocator behavior.
627 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
629 struct btrfs_free_space
*e
, *prev
= NULL
;
633 spin_lock(&ctl
->tree_lock
);
634 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
635 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
638 if (e
->bitmap
|| prev
->bitmap
)
640 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
641 unlink_free_space(ctl
, prev
);
642 unlink_free_space(ctl
, e
);
643 prev
->bytes
+= e
->bytes
;
644 kmem_cache_free(btrfs_free_space_cachep
, e
);
645 link_free_space(ctl
, prev
);
647 spin_unlock(&ctl
->tree_lock
);
653 spin_unlock(&ctl
->tree_lock
);
656 static int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
657 struct btrfs_free_space_ctl
*ctl
,
658 struct btrfs_path
*path
, u64 offset
)
660 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
661 struct btrfs_free_space_header
*header
;
662 struct extent_buffer
*leaf
;
663 struct btrfs_io_ctl io_ctl
;
664 struct btrfs_key key
;
665 struct btrfs_free_space
*e
, *n
;
673 /* Nothing in the space cache, goodbye */
674 if (!i_size_read(inode
))
677 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
681 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
685 btrfs_release_path(path
);
691 leaf
= path
->nodes
[0];
692 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
693 struct btrfs_free_space_header
);
694 num_entries
= btrfs_free_space_entries(leaf
, header
);
695 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
696 generation
= btrfs_free_space_generation(leaf
, header
);
697 btrfs_release_path(path
);
699 if (!BTRFS_I(inode
)->generation
) {
701 "the free space cache file (%llu) is invalid, skip it",
706 if (BTRFS_I(inode
)->generation
!= generation
) {
708 "free space inode generation (%llu) did not match free space cache generation (%llu)",
709 BTRFS_I(inode
)->generation
, generation
);
716 ret
= io_ctl_init(&io_ctl
, inode
, 0);
720 readahead_cache(inode
);
722 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
726 ret
= io_ctl_check_crc(&io_ctl
, 0);
730 ret
= io_ctl_check_generation(&io_ctl
, generation
);
734 while (num_entries
) {
735 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
740 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
742 kmem_cache_free(btrfs_free_space_cachep
, e
);
747 kmem_cache_free(btrfs_free_space_cachep
, e
);
751 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
752 spin_lock(&ctl
->tree_lock
);
753 ret
= link_free_space(ctl
, e
);
754 spin_unlock(&ctl
->tree_lock
);
757 "Duplicate entries in free space cache, dumping");
758 kmem_cache_free(btrfs_free_space_cachep
, e
);
764 e
->bitmap
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
767 btrfs_free_space_cachep
, e
);
770 spin_lock(&ctl
->tree_lock
);
771 ret
= link_free_space(ctl
, e
);
772 ctl
->total_bitmaps
++;
773 ctl
->op
->recalc_thresholds(ctl
);
774 spin_unlock(&ctl
->tree_lock
);
777 "Duplicate entries in free space cache, dumping");
778 kmem_cache_free(btrfs_free_space_cachep
, e
);
781 list_add_tail(&e
->list
, &bitmaps
);
787 io_ctl_unmap_page(&io_ctl
);
790 * We add the bitmaps at the end of the entries in order that
791 * the bitmap entries are added to the cache.
793 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
794 list_del_init(&e
->list
);
795 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
800 io_ctl_drop_pages(&io_ctl
);
801 merge_space_tree(ctl
);
804 io_ctl_free(&io_ctl
);
807 io_ctl_drop_pages(&io_ctl
);
808 __btrfs_remove_free_space_cache(ctl
);
812 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
813 struct btrfs_block_group_cache
*block_group
)
815 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
817 struct btrfs_path
*path
;
820 u64 used
= btrfs_block_group_used(&block_group
->item
);
823 * If this block group has been marked to be cleared for one reason or
824 * another then we can't trust the on disk cache, so just return.
826 spin_lock(&block_group
->lock
);
827 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
828 spin_unlock(&block_group
->lock
);
831 spin_unlock(&block_group
->lock
);
833 path
= btrfs_alloc_path();
836 path
->search_commit_root
= 1;
837 path
->skip_locking
= 1;
839 inode
= lookup_free_space_inode(fs_info
, block_group
, path
);
841 btrfs_free_path(path
);
845 /* We may have converted the inode and made the cache invalid. */
846 spin_lock(&block_group
->lock
);
847 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
848 spin_unlock(&block_group
->lock
);
849 btrfs_free_path(path
);
852 spin_unlock(&block_group
->lock
);
854 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
855 path
, block_group
->key
.objectid
);
856 btrfs_free_path(path
);
860 spin_lock(&ctl
->tree_lock
);
861 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
862 block_group
->bytes_super
));
863 spin_unlock(&ctl
->tree_lock
);
866 __btrfs_remove_free_space_cache(ctl
);
868 "block group %llu has wrong amount of free space",
869 block_group
->key
.objectid
);
874 /* This cache is bogus, make sure it gets cleared */
875 spin_lock(&block_group
->lock
);
876 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
877 spin_unlock(&block_group
->lock
);
881 "failed to load free space cache for block group %llu, rebuilding it now",
882 block_group
->key
.objectid
);
889 static noinline_for_stack
890 int write_cache_extent_entries(struct btrfs_io_ctl
*io_ctl
,
891 struct btrfs_free_space_ctl
*ctl
,
892 struct btrfs_block_group_cache
*block_group
,
893 int *entries
, int *bitmaps
,
894 struct list_head
*bitmap_list
)
897 struct btrfs_free_cluster
*cluster
= NULL
;
898 struct btrfs_free_cluster
*cluster_locked
= NULL
;
899 struct rb_node
*node
= rb_first(&ctl
->free_space_offset
);
900 struct btrfs_trim_range
*trim_entry
;
902 /* Get the cluster for this block_group if it exists */
903 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
904 cluster
= list_entry(block_group
->cluster_list
.next
,
905 struct btrfs_free_cluster
,
909 if (!node
&& cluster
) {
910 cluster_locked
= cluster
;
911 spin_lock(&cluster_locked
->lock
);
912 node
= rb_first(&cluster
->root
);
916 /* Write out the extent entries */
918 struct btrfs_free_space
*e
;
920 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
923 ret
= io_ctl_add_entry(io_ctl
, e
->offset
, e
->bytes
,
929 list_add_tail(&e
->list
, bitmap_list
);
932 node
= rb_next(node
);
933 if (!node
&& cluster
) {
934 node
= rb_first(&cluster
->root
);
935 cluster_locked
= cluster
;
936 spin_lock(&cluster_locked
->lock
);
940 if (cluster_locked
) {
941 spin_unlock(&cluster_locked
->lock
);
942 cluster_locked
= NULL
;
946 * Make sure we don't miss any range that was removed from our rbtree
947 * because trimming is running. Otherwise after a umount+mount (or crash
948 * after committing the transaction) we would leak free space and get
949 * an inconsistent free space cache report from fsck.
951 list_for_each_entry(trim_entry
, &ctl
->trimming_ranges
, list
) {
952 ret
= io_ctl_add_entry(io_ctl
, trim_entry
->start
,
953 trim_entry
->bytes
, NULL
);
962 spin_unlock(&cluster_locked
->lock
);
966 static noinline_for_stack
int
967 update_cache_item(struct btrfs_trans_handle
*trans
,
968 struct btrfs_root
*root
,
970 struct btrfs_path
*path
, u64 offset
,
971 int entries
, int bitmaps
)
973 struct btrfs_key key
;
974 struct btrfs_free_space_header
*header
;
975 struct extent_buffer
*leaf
;
978 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
982 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
984 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
985 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
);
988 leaf
= path
->nodes
[0];
990 struct btrfs_key found_key
;
991 ASSERT(path
->slots
[0]);
993 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
994 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
995 found_key
.offset
!= offset
) {
996 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
998 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
1000 btrfs_release_path(path
);
1005 BTRFS_I(inode
)->generation
= trans
->transid
;
1006 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
1007 struct btrfs_free_space_header
);
1008 btrfs_set_free_space_entries(leaf
, header
, entries
);
1009 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1010 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1011 btrfs_mark_buffer_dirty(leaf
);
1012 btrfs_release_path(path
);
1020 static noinline_for_stack
int
1021 write_pinned_extent_entries(struct btrfs_fs_info
*fs_info
,
1022 struct btrfs_block_group_cache
*block_group
,
1023 struct btrfs_io_ctl
*io_ctl
,
1026 u64 start
, extent_start
, extent_end
, len
;
1027 struct extent_io_tree
*unpin
= NULL
;
1034 * We want to add any pinned extents to our free space cache
1035 * so we don't leak the space
1037 * We shouldn't have switched the pinned extents yet so this is the
1040 unpin
= fs_info
->pinned_extents
;
1042 start
= block_group
->key
.objectid
;
1044 while (start
< block_group
->key
.objectid
+ block_group
->key
.offset
) {
1045 ret
= find_first_extent_bit(unpin
, start
,
1046 &extent_start
, &extent_end
,
1047 EXTENT_DIRTY
, NULL
);
1051 /* This pinned extent is out of our range */
1052 if (extent_start
>= block_group
->key
.objectid
+
1053 block_group
->key
.offset
)
1056 extent_start
= max(extent_start
, start
);
1057 extent_end
= min(block_group
->key
.objectid
+
1058 block_group
->key
.offset
, extent_end
+ 1);
1059 len
= extent_end
- extent_start
;
1062 ret
= io_ctl_add_entry(io_ctl
, extent_start
, len
, NULL
);
1072 static noinline_for_stack
int
1073 write_bitmap_entries(struct btrfs_io_ctl
*io_ctl
, struct list_head
*bitmap_list
)
1075 struct btrfs_free_space
*entry
, *next
;
1078 /* Write out the bitmaps */
1079 list_for_each_entry_safe(entry
, next
, bitmap_list
, list
) {
1080 ret
= io_ctl_add_bitmap(io_ctl
, entry
->bitmap
);
1083 list_del_init(&entry
->list
);
1089 static int flush_dirty_cache(struct inode
*inode
)
1093 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1095 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1096 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
);
1101 static void noinline_for_stack
1102 cleanup_bitmap_list(struct list_head
*bitmap_list
)
1104 struct btrfs_free_space
*entry
, *next
;
1106 list_for_each_entry_safe(entry
, next
, bitmap_list
, list
)
1107 list_del_init(&entry
->list
);
1110 static void noinline_for_stack
1111 cleanup_write_cache_enospc(struct inode
*inode
,
1112 struct btrfs_io_ctl
*io_ctl
,
1113 struct extent_state
**cached_state
)
1115 io_ctl_drop_pages(io_ctl
);
1116 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1117 i_size_read(inode
) - 1, cached_state
);
1120 static int __btrfs_wait_cache_io(struct btrfs_root
*root
,
1121 struct btrfs_trans_handle
*trans
,
1122 struct btrfs_block_group_cache
*block_group
,
1123 struct btrfs_io_ctl
*io_ctl
,
1124 struct btrfs_path
*path
, u64 offset
)
1127 struct inode
*inode
= io_ctl
->inode
;
1128 struct btrfs_fs_info
*fs_info
;
1133 fs_info
= btrfs_sb(inode
->i_sb
);
1135 /* Flush the dirty pages in the cache file. */
1136 ret
= flush_dirty_cache(inode
);
1140 /* Update the cache item to tell everyone this cache file is valid. */
1141 ret
= update_cache_item(trans
, root
, inode
, path
, offset
,
1142 io_ctl
->entries
, io_ctl
->bitmaps
);
1144 io_ctl_free(io_ctl
);
1146 invalidate_inode_pages2(inode
->i_mapping
);
1147 BTRFS_I(inode
)->generation
= 0;
1151 "failed to write free space cache for block group %llu",
1152 block_group
->key
.objectid
);
1156 btrfs_update_inode(trans
, root
, inode
);
1159 /* the dirty list is protected by the dirty_bgs_lock */
1160 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
1162 /* the disk_cache_state is protected by the block group lock */
1163 spin_lock(&block_group
->lock
);
1166 * only mark this as written if we didn't get put back on
1167 * the dirty list while waiting for IO. Otherwise our
1168 * cache state won't be right, and we won't get written again
1170 if (!ret
&& list_empty(&block_group
->dirty_list
))
1171 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1173 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1175 spin_unlock(&block_group
->lock
);
1176 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
1177 io_ctl
->inode
= NULL
;
1185 static int btrfs_wait_cache_io_root(struct btrfs_root
*root
,
1186 struct btrfs_trans_handle
*trans
,
1187 struct btrfs_io_ctl
*io_ctl
,
1188 struct btrfs_path
*path
)
1190 return __btrfs_wait_cache_io(root
, trans
, NULL
, io_ctl
, path
, 0);
1193 int btrfs_wait_cache_io(struct btrfs_trans_handle
*trans
,
1194 struct btrfs_block_group_cache
*block_group
,
1195 struct btrfs_path
*path
)
1197 return __btrfs_wait_cache_io(block_group
->fs_info
->tree_root
, trans
,
1198 block_group
, &block_group
->io_ctl
,
1199 path
, block_group
->key
.objectid
);
1203 * __btrfs_write_out_cache - write out cached info to an inode
1204 * @root - the root the inode belongs to
1205 * @ctl - the free space cache we are going to write out
1206 * @block_group - the block_group for this cache if it belongs to a block_group
1207 * @trans - the trans handle
1209 * This function writes out a free space cache struct to disk for quick recovery
1210 * on mount. This will return 0 if it was successful in writing the cache out,
1211 * or an errno if it was not.
1213 static int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
1214 struct btrfs_free_space_ctl
*ctl
,
1215 struct btrfs_block_group_cache
*block_group
,
1216 struct btrfs_io_ctl
*io_ctl
,
1217 struct btrfs_trans_handle
*trans
)
1219 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1220 struct extent_state
*cached_state
= NULL
;
1221 LIST_HEAD(bitmap_list
);
1227 if (!i_size_read(inode
))
1230 WARN_ON(io_ctl
->pages
);
1231 ret
= io_ctl_init(io_ctl
, inode
, 1);
1235 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
)) {
1236 down_write(&block_group
->data_rwsem
);
1237 spin_lock(&block_group
->lock
);
1238 if (block_group
->delalloc_bytes
) {
1239 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1240 spin_unlock(&block_group
->lock
);
1241 up_write(&block_group
->data_rwsem
);
1242 BTRFS_I(inode
)->generation
= 0;
1247 spin_unlock(&block_group
->lock
);
1250 /* Lock all pages first so we can lock the extent safely. */
1251 ret
= io_ctl_prepare_pages(io_ctl
, inode
, 0);
1255 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
1258 io_ctl_set_generation(io_ctl
, trans
->transid
);
1260 mutex_lock(&ctl
->cache_writeout_mutex
);
1261 /* Write out the extent entries in the free space cache */
1262 spin_lock(&ctl
->tree_lock
);
1263 ret
= write_cache_extent_entries(io_ctl
, ctl
,
1264 block_group
, &entries
, &bitmaps
,
1267 goto out_nospc_locked
;
1270 * Some spaces that are freed in the current transaction are pinned,
1271 * they will be added into free space cache after the transaction is
1272 * committed, we shouldn't lose them.
1274 * If this changes while we are working we'll get added back to
1275 * the dirty list and redo it. No locking needed
1277 ret
= write_pinned_extent_entries(fs_info
, block_group
,
1280 goto out_nospc_locked
;
1283 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1284 * locked while doing it because a concurrent trim can be manipulating
1285 * or freeing the bitmap.
1287 ret
= write_bitmap_entries(io_ctl
, &bitmap_list
);
1288 spin_unlock(&ctl
->tree_lock
);
1289 mutex_unlock(&ctl
->cache_writeout_mutex
);
1293 /* Zero out the rest of the pages just to make sure */
1294 io_ctl_zero_remaining_pages(io_ctl
);
1296 /* Everything is written out, now we dirty the pages in the file. */
1297 ret
= btrfs_dirty_pages(inode
, io_ctl
->pages
, io_ctl
->num_pages
, 0,
1298 i_size_read(inode
), &cached_state
);
1302 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1303 up_write(&block_group
->data_rwsem
);
1305 * Release the pages and unlock the extent, we will flush
1308 io_ctl_drop_pages(io_ctl
);
1310 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1311 i_size_read(inode
) - 1, &cached_state
);
1314 * at this point the pages are under IO and we're happy,
1315 * The caller is responsible for waiting on them and updating the
1316 * the cache and the inode
1318 io_ctl
->entries
= entries
;
1319 io_ctl
->bitmaps
= bitmaps
;
1321 ret
= btrfs_fdatawrite_range(inode
, 0, (u64
)-1);
1328 io_ctl
->inode
= NULL
;
1329 io_ctl_free(io_ctl
);
1331 invalidate_inode_pages2(inode
->i_mapping
);
1332 BTRFS_I(inode
)->generation
= 0;
1334 btrfs_update_inode(trans
, root
, inode
);
1340 cleanup_bitmap_list(&bitmap_list
);
1341 spin_unlock(&ctl
->tree_lock
);
1342 mutex_unlock(&ctl
->cache_writeout_mutex
);
1345 cleanup_write_cache_enospc(inode
, io_ctl
, &cached_state
);
1348 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1349 up_write(&block_group
->data_rwsem
);
1354 int btrfs_write_out_cache(struct btrfs_fs_info
*fs_info
,
1355 struct btrfs_trans_handle
*trans
,
1356 struct btrfs_block_group_cache
*block_group
,
1357 struct btrfs_path
*path
)
1359 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1360 struct inode
*inode
;
1363 spin_lock(&block_group
->lock
);
1364 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1365 spin_unlock(&block_group
->lock
);
1368 spin_unlock(&block_group
->lock
);
1370 inode
= lookup_free_space_inode(fs_info
, block_group
, path
);
1374 ret
= __btrfs_write_out_cache(fs_info
->tree_root
, inode
, ctl
,
1375 block_group
, &block_group
->io_ctl
, trans
);
1379 "failed to write free space cache for block group %llu",
1380 block_group
->key
.objectid
);
1382 spin_lock(&block_group
->lock
);
1383 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1384 spin_unlock(&block_group
->lock
);
1386 block_group
->io_ctl
.inode
= NULL
;
1391 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1392 * to wait for IO and put the inode
1398 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1401 ASSERT(offset
>= bitmap_start
);
1402 offset
-= bitmap_start
;
1403 return (unsigned long)(div_u64(offset
, unit
));
1406 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1408 return (unsigned long)(div_u64(bytes
, unit
));
1411 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1415 u64 bytes_per_bitmap
;
1417 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1418 bitmap_start
= offset
- ctl
->start
;
1419 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
1420 bitmap_start
*= bytes_per_bitmap
;
1421 bitmap_start
+= ctl
->start
;
1423 return bitmap_start
;
1426 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1427 struct rb_node
*node
, int bitmap
)
1429 struct rb_node
**p
= &root
->rb_node
;
1430 struct rb_node
*parent
= NULL
;
1431 struct btrfs_free_space
*info
;
1435 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1437 if (offset
< info
->offset
) {
1439 } else if (offset
> info
->offset
) {
1440 p
= &(*p
)->rb_right
;
1443 * we could have a bitmap entry and an extent entry
1444 * share the same offset. If this is the case, we want
1445 * the extent entry to always be found first if we do a
1446 * linear search through the tree, since we want to have
1447 * the quickest allocation time, and allocating from an
1448 * extent is faster than allocating from a bitmap. So
1449 * if we're inserting a bitmap and we find an entry at
1450 * this offset, we want to go right, or after this entry
1451 * logically. If we are inserting an extent and we've
1452 * found a bitmap, we want to go left, or before
1460 p
= &(*p
)->rb_right
;
1462 if (!info
->bitmap
) {
1471 rb_link_node(node
, parent
, p
);
1472 rb_insert_color(node
, root
);
1478 * searches the tree for the given offset.
1480 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1481 * want a section that has at least bytes size and comes at or after the given
1484 static struct btrfs_free_space
*
1485 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1486 u64 offset
, int bitmap_only
, int fuzzy
)
1488 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1489 struct btrfs_free_space
*entry
, *prev
= NULL
;
1491 /* find entry that is closest to the 'offset' */
1498 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1501 if (offset
< entry
->offset
)
1503 else if (offset
> entry
->offset
)
1516 * bitmap entry and extent entry may share same offset,
1517 * in that case, bitmap entry comes after extent entry.
1522 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1523 if (entry
->offset
!= offset
)
1526 WARN_ON(!entry
->bitmap
);
1529 if (entry
->bitmap
) {
1531 * if previous extent entry covers the offset,
1532 * we should return it instead of the bitmap entry
1534 n
= rb_prev(&entry
->offset_index
);
1536 prev
= rb_entry(n
, struct btrfs_free_space
,
1538 if (!prev
->bitmap
&&
1539 prev
->offset
+ prev
->bytes
> offset
)
1549 /* find last entry before the 'offset' */
1551 if (entry
->offset
> offset
) {
1552 n
= rb_prev(&entry
->offset_index
);
1554 entry
= rb_entry(n
, struct btrfs_free_space
,
1556 ASSERT(entry
->offset
<= offset
);
1565 if (entry
->bitmap
) {
1566 n
= rb_prev(&entry
->offset_index
);
1568 prev
= rb_entry(n
, struct btrfs_free_space
,
1570 if (!prev
->bitmap
&&
1571 prev
->offset
+ prev
->bytes
> offset
)
1574 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1576 } else if (entry
->offset
+ entry
->bytes
> offset
)
1583 if (entry
->bitmap
) {
1584 if (entry
->offset
+ BITS_PER_BITMAP
*
1588 if (entry
->offset
+ entry
->bytes
> offset
)
1592 n
= rb_next(&entry
->offset_index
);
1595 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1601 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1602 struct btrfs_free_space
*info
)
1604 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1605 ctl
->free_extents
--;
1608 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1609 struct btrfs_free_space
*info
)
1611 __unlink_free_space(ctl
, info
);
1612 ctl
->free_space
-= info
->bytes
;
1615 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1616 struct btrfs_free_space
*info
)
1620 ASSERT(info
->bytes
|| info
->bitmap
);
1621 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1622 &info
->offset_index
, (info
->bitmap
!= NULL
));
1626 ctl
->free_space
+= info
->bytes
;
1627 ctl
->free_extents
++;
1631 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1633 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1637 u64 size
= block_group
->key
.offset
;
1638 u64 bytes_per_bg
= BITS_PER_BITMAP
* ctl
->unit
;
1639 u64 max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1641 max_bitmaps
= max_t(u64
, max_bitmaps
, 1);
1643 ASSERT(ctl
->total_bitmaps
<= max_bitmaps
);
1646 * The goal is to keep the total amount of memory used per 1gb of space
1647 * at or below 32k, so we need to adjust how much memory we allow to be
1648 * used by extent based free space tracking
1651 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1653 max_bytes
= MAX_CACHE_BYTES_PER_GIG
* div_u64(size
, SZ_1G
);
1656 * we want to account for 1 more bitmap than what we have so we can make
1657 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1658 * we add more bitmaps.
1660 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * ctl
->unit
;
1662 if (bitmap_bytes
>= max_bytes
) {
1663 ctl
->extents_thresh
= 0;
1668 * we want the extent entry threshold to always be at most 1/2 the max
1669 * bytes we can have, or whatever is less than that.
1671 extent_bytes
= max_bytes
- bitmap_bytes
;
1672 extent_bytes
= min_t(u64
, extent_bytes
, max_bytes
>> 1);
1674 ctl
->extents_thresh
=
1675 div_u64(extent_bytes
, sizeof(struct btrfs_free_space
));
1678 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1679 struct btrfs_free_space
*info
,
1680 u64 offset
, u64 bytes
)
1682 unsigned long start
, count
;
1684 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1685 count
= bytes_to_bits(bytes
, ctl
->unit
);
1686 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1688 bitmap_clear(info
->bitmap
, start
, count
);
1690 info
->bytes
-= bytes
;
1693 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1694 struct btrfs_free_space
*info
, u64 offset
,
1697 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1698 ctl
->free_space
-= bytes
;
1701 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1702 struct btrfs_free_space
*info
, u64 offset
,
1705 unsigned long start
, count
;
1707 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1708 count
= bytes_to_bits(bytes
, ctl
->unit
);
1709 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1711 bitmap_set(info
->bitmap
, start
, count
);
1713 info
->bytes
+= bytes
;
1714 ctl
->free_space
+= bytes
;
1718 * If we can not find suitable extent, we will use bytes to record
1719 * the size of the max extent.
1721 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1722 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1723 u64
*bytes
, bool for_alloc
)
1725 unsigned long found_bits
= 0;
1726 unsigned long max_bits
= 0;
1727 unsigned long bits
, i
;
1728 unsigned long next_zero
;
1729 unsigned long extent_bits
;
1732 * Skip searching the bitmap if we don't have a contiguous section that
1733 * is large enough for this allocation.
1736 bitmap_info
->max_extent_size
&&
1737 bitmap_info
->max_extent_size
< *bytes
) {
1738 *bytes
= bitmap_info
->max_extent_size
;
1742 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1743 max_t(u64
, *offset
, bitmap_info
->offset
));
1744 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1746 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP
) {
1747 if (for_alloc
&& bits
== 1) {
1751 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1752 BITS_PER_BITMAP
, i
);
1753 extent_bits
= next_zero
- i
;
1754 if (extent_bits
>= bits
) {
1755 found_bits
= extent_bits
;
1757 } else if (extent_bits
> max_bits
) {
1758 max_bits
= extent_bits
;
1764 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1765 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1769 *bytes
= (u64
)(max_bits
) * ctl
->unit
;
1770 bitmap_info
->max_extent_size
= *bytes
;
1774 /* Cache the size of the max extent in bytes */
1775 static struct btrfs_free_space
*
1776 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
,
1777 unsigned long align
, u64
*max_extent_size
)
1779 struct btrfs_free_space
*entry
;
1780 struct rb_node
*node
;
1785 if (!ctl
->free_space_offset
.rb_node
)
1788 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1792 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1793 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1794 if (entry
->bytes
< *bytes
) {
1795 if (entry
->bytes
> *max_extent_size
)
1796 *max_extent_size
= entry
->bytes
;
1800 /* make sure the space returned is big enough
1801 * to match our requested alignment
1803 if (*bytes
>= align
) {
1804 tmp
= entry
->offset
- ctl
->start
+ align
- 1;
1805 tmp
= div64_u64(tmp
, align
);
1806 tmp
= tmp
* align
+ ctl
->start
;
1807 align_off
= tmp
- entry
->offset
;
1810 tmp
= entry
->offset
;
1813 if (entry
->bytes
< *bytes
+ align_off
) {
1814 if (entry
->bytes
> *max_extent_size
)
1815 *max_extent_size
= entry
->bytes
;
1819 if (entry
->bitmap
) {
1822 ret
= search_bitmap(ctl
, entry
, &tmp
, &size
, true);
1827 } else if (size
> *max_extent_size
) {
1828 *max_extent_size
= size
;
1834 *bytes
= entry
->bytes
- align_off
;
1841 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1842 struct btrfs_free_space
*info
, u64 offset
)
1844 info
->offset
= offset_to_bitmap(ctl
, offset
);
1846 INIT_LIST_HEAD(&info
->list
);
1847 link_free_space(ctl
, info
);
1848 ctl
->total_bitmaps
++;
1850 ctl
->op
->recalc_thresholds(ctl
);
1853 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1854 struct btrfs_free_space
*bitmap_info
)
1856 unlink_free_space(ctl
, bitmap_info
);
1857 kfree(bitmap_info
->bitmap
);
1858 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1859 ctl
->total_bitmaps
--;
1860 ctl
->op
->recalc_thresholds(ctl
);
1863 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1864 struct btrfs_free_space
*bitmap_info
,
1865 u64
*offset
, u64
*bytes
)
1868 u64 search_start
, search_bytes
;
1872 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1875 * We need to search for bits in this bitmap. We could only cover some
1876 * of the extent in this bitmap thanks to how we add space, so we need
1877 * to search for as much as it as we can and clear that amount, and then
1878 * go searching for the next bit.
1880 search_start
= *offset
;
1881 search_bytes
= ctl
->unit
;
1882 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1883 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
,
1885 if (ret
< 0 || search_start
!= *offset
)
1888 /* We may have found more bits than what we need */
1889 search_bytes
= min(search_bytes
, *bytes
);
1891 /* Cannot clear past the end of the bitmap */
1892 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1894 bitmap_clear_bits(ctl
, bitmap_info
, search_start
, search_bytes
);
1895 *offset
+= search_bytes
;
1896 *bytes
-= search_bytes
;
1899 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1900 if (!bitmap_info
->bytes
)
1901 free_bitmap(ctl
, bitmap_info
);
1904 * no entry after this bitmap, but we still have bytes to
1905 * remove, so something has gone wrong.
1910 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1914 * if the next entry isn't a bitmap we need to return to let the
1915 * extent stuff do its work.
1917 if (!bitmap_info
->bitmap
)
1921 * Ok the next item is a bitmap, but it may not actually hold
1922 * the information for the rest of this free space stuff, so
1923 * look for it, and if we don't find it return so we can try
1924 * everything over again.
1926 search_start
= *offset
;
1927 search_bytes
= ctl
->unit
;
1928 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1929 &search_bytes
, false);
1930 if (ret
< 0 || search_start
!= *offset
)
1934 } else if (!bitmap_info
->bytes
)
1935 free_bitmap(ctl
, bitmap_info
);
1940 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1941 struct btrfs_free_space
*info
, u64 offset
,
1944 u64 bytes_to_set
= 0;
1947 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1949 bytes_to_set
= min(end
- offset
, bytes
);
1951 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1954 * We set some bytes, we have no idea what the max extent size is
1957 info
->max_extent_size
= 0;
1959 return bytes_to_set
;
1963 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1964 struct btrfs_free_space
*info
)
1966 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1967 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
1968 bool forced
= false;
1970 #ifdef CONFIG_BTRFS_DEBUG
1971 if (btrfs_should_fragment_free_space(block_group
))
1976 * If we are below the extents threshold then we can add this as an
1977 * extent, and don't have to deal with the bitmap
1979 if (!forced
&& ctl
->free_extents
< ctl
->extents_thresh
) {
1981 * If this block group has some small extents we don't want to
1982 * use up all of our free slots in the cache with them, we want
1983 * to reserve them to larger extents, however if we have plenty
1984 * of cache left then go ahead an dadd them, no sense in adding
1985 * the overhead of a bitmap if we don't have to.
1987 if (info
->bytes
<= fs_info
->sectorsize
* 4) {
1988 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1996 * The original block groups from mkfs can be really small, like 8
1997 * megabytes, so don't bother with a bitmap for those entries. However
1998 * some block groups can be smaller than what a bitmap would cover but
1999 * are still large enough that they could overflow the 32k memory limit,
2000 * so allow those block groups to still be allowed to have a bitmap
2003 if (((BITS_PER_BITMAP
* ctl
->unit
) >> 1) > block_group
->key
.offset
)
2009 static const struct btrfs_free_space_op free_space_op
= {
2010 .recalc_thresholds
= recalculate_thresholds
,
2011 .use_bitmap
= use_bitmap
,
2014 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
2015 struct btrfs_free_space
*info
)
2017 struct btrfs_free_space
*bitmap_info
;
2018 struct btrfs_block_group_cache
*block_group
= NULL
;
2020 u64 bytes
, offset
, bytes_added
;
2023 bytes
= info
->bytes
;
2024 offset
= info
->offset
;
2026 if (!ctl
->op
->use_bitmap(ctl
, info
))
2029 if (ctl
->op
== &free_space_op
)
2030 block_group
= ctl
->private;
2033 * Since we link bitmaps right into the cluster we need to see if we
2034 * have a cluster here, and if so and it has our bitmap we need to add
2035 * the free space to that bitmap.
2037 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
2038 struct btrfs_free_cluster
*cluster
;
2039 struct rb_node
*node
;
2040 struct btrfs_free_space
*entry
;
2042 cluster
= list_entry(block_group
->cluster_list
.next
,
2043 struct btrfs_free_cluster
,
2045 spin_lock(&cluster
->lock
);
2046 node
= rb_first(&cluster
->root
);
2048 spin_unlock(&cluster
->lock
);
2049 goto no_cluster_bitmap
;
2052 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2053 if (!entry
->bitmap
) {
2054 spin_unlock(&cluster
->lock
);
2055 goto no_cluster_bitmap
;
2058 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
2059 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
2061 bytes
-= bytes_added
;
2062 offset
+= bytes_added
;
2064 spin_unlock(&cluster
->lock
);
2072 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2079 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
2080 bytes
-= bytes_added
;
2081 offset
+= bytes_added
;
2091 if (info
&& info
->bitmap
) {
2092 add_new_bitmap(ctl
, info
, offset
);
2097 spin_unlock(&ctl
->tree_lock
);
2099 /* no pre-allocated info, allocate a new one */
2101 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
2104 spin_lock(&ctl
->tree_lock
);
2110 /* allocate the bitmap */
2111 info
->bitmap
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
2112 spin_lock(&ctl
->tree_lock
);
2113 if (!info
->bitmap
) {
2123 kfree(info
->bitmap
);
2124 kmem_cache_free(btrfs_free_space_cachep
, info
);
2130 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
2131 struct btrfs_free_space
*info
, bool update_stat
)
2133 struct btrfs_free_space
*left_info
;
2134 struct btrfs_free_space
*right_info
;
2135 bool merged
= false;
2136 u64 offset
= info
->offset
;
2137 u64 bytes
= info
->bytes
;
2140 * first we want to see if there is free space adjacent to the range we
2141 * are adding, if there is remove that struct and add a new one to
2142 * cover the entire range
2144 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
2145 if (right_info
&& rb_prev(&right_info
->offset_index
))
2146 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
2147 struct btrfs_free_space
, offset_index
);
2149 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
2151 if (right_info
&& !right_info
->bitmap
) {
2153 unlink_free_space(ctl
, right_info
);
2155 __unlink_free_space(ctl
, right_info
);
2156 info
->bytes
+= right_info
->bytes
;
2157 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
2161 if (left_info
&& !left_info
->bitmap
&&
2162 left_info
->offset
+ left_info
->bytes
== offset
) {
2164 unlink_free_space(ctl
, left_info
);
2166 __unlink_free_space(ctl
, left_info
);
2167 info
->offset
= left_info
->offset
;
2168 info
->bytes
+= left_info
->bytes
;
2169 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
2176 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl
*ctl
,
2177 struct btrfs_free_space
*info
,
2180 struct btrfs_free_space
*bitmap
;
2183 const u64 end
= info
->offset
+ info
->bytes
;
2184 const u64 bitmap_offset
= offset_to_bitmap(ctl
, end
);
2187 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2191 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, end
);
2192 j
= find_next_zero_bit(bitmap
->bitmap
, BITS_PER_BITMAP
, i
);
2195 bytes
= (j
- i
) * ctl
->unit
;
2196 info
->bytes
+= bytes
;
2199 bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2201 __bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2204 free_bitmap(ctl
, bitmap
);
2209 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl
*ctl
,
2210 struct btrfs_free_space
*info
,
2213 struct btrfs_free_space
*bitmap
;
2217 unsigned long prev_j
;
2220 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
);
2221 /* If we're on a boundary, try the previous logical bitmap. */
2222 if (bitmap_offset
== info
->offset
) {
2223 if (info
->offset
== 0)
2225 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
- 1);
2228 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2232 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, info
->offset
) - 1;
2234 prev_j
= (unsigned long)-1;
2235 for_each_clear_bit_from(j
, bitmap
->bitmap
, BITS_PER_BITMAP
) {
2243 if (prev_j
== (unsigned long)-1)
2244 bytes
= (i
+ 1) * ctl
->unit
;
2246 bytes
= (i
- prev_j
) * ctl
->unit
;
2248 info
->offset
-= bytes
;
2249 info
->bytes
+= bytes
;
2252 bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2254 __bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2257 free_bitmap(ctl
, bitmap
);
2263 * We prefer always to allocate from extent entries, both for clustered and
2264 * non-clustered allocation requests. So when attempting to add a new extent
2265 * entry, try to see if there's adjacent free space in bitmap entries, and if
2266 * there is, migrate that space from the bitmaps to the extent.
2267 * Like this we get better chances of satisfying space allocation requests
2268 * because we attempt to satisfy them based on a single cache entry, and never
2269 * on 2 or more entries - even if the entries represent a contiguous free space
2270 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2273 static void steal_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
2274 struct btrfs_free_space
*info
,
2278 * Only work with disconnected entries, as we can change their offset,
2279 * and must be extent entries.
2281 ASSERT(!info
->bitmap
);
2282 ASSERT(RB_EMPTY_NODE(&info
->offset_index
));
2284 if (ctl
->total_bitmaps
> 0) {
2286 bool stole_front
= false;
2288 stole_end
= steal_from_bitmap_to_end(ctl
, info
, update_stat
);
2289 if (ctl
->total_bitmaps
> 0)
2290 stole_front
= steal_from_bitmap_to_front(ctl
, info
,
2293 if (stole_end
|| stole_front
)
2294 try_merge_free_space(ctl
, info
, update_stat
);
2298 int __btrfs_add_free_space(struct btrfs_fs_info
*fs_info
,
2299 struct btrfs_free_space_ctl
*ctl
,
2300 u64 offset
, u64 bytes
)
2302 struct btrfs_free_space
*info
;
2305 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
2309 info
->offset
= offset
;
2310 info
->bytes
= bytes
;
2311 RB_CLEAR_NODE(&info
->offset_index
);
2313 spin_lock(&ctl
->tree_lock
);
2315 if (try_merge_free_space(ctl
, info
, true))
2319 * There was no extent directly to the left or right of this new
2320 * extent then we know we're going to have to allocate a new extent, so
2321 * before we do that see if we need to drop this into a bitmap
2323 ret
= insert_into_bitmap(ctl
, info
);
2332 * Only steal free space from adjacent bitmaps if we're sure we're not
2333 * going to add the new free space to existing bitmap entries - because
2334 * that would mean unnecessary work that would be reverted. Therefore
2335 * attempt to steal space from bitmaps if we're adding an extent entry.
2337 steal_from_bitmap(ctl
, info
, true);
2339 ret
= link_free_space(ctl
, info
);
2341 kmem_cache_free(btrfs_free_space_cachep
, info
);
2343 spin_unlock(&ctl
->tree_lock
);
2346 btrfs_crit(fs_info
, "unable to add free space :%d", ret
);
2347 ASSERT(ret
!= -EEXIST
);
2353 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
2354 u64 offset
, u64 bytes
)
2356 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2357 struct btrfs_free_space
*info
;
2359 bool re_search
= false;
2361 spin_lock(&ctl
->tree_lock
);
2368 info
= tree_search_offset(ctl
, offset
, 0, 0);
2371 * oops didn't find an extent that matched the space we wanted
2372 * to remove, look for a bitmap instead
2374 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2378 * If we found a partial bit of our free space in a
2379 * bitmap but then couldn't find the other part this may
2380 * be a problem, so WARN about it.
2388 if (!info
->bitmap
) {
2389 unlink_free_space(ctl
, info
);
2390 if (offset
== info
->offset
) {
2391 u64 to_free
= min(bytes
, info
->bytes
);
2393 info
->bytes
-= to_free
;
2394 info
->offset
+= to_free
;
2396 ret
= link_free_space(ctl
, info
);
2399 kmem_cache_free(btrfs_free_space_cachep
, info
);
2406 u64 old_end
= info
->bytes
+ info
->offset
;
2408 info
->bytes
= offset
- info
->offset
;
2409 ret
= link_free_space(ctl
, info
);
2414 /* Not enough bytes in this entry to satisfy us */
2415 if (old_end
< offset
+ bytes
) {
2416 bytes
-= old_end
- offset
;
2419 } else if (old_end
== offset
+ bytes
) {
2423 spin_unlock(&ctl
->tree_lock
);
2425 ret
= btrfs_add_free_space(block_group
, offset
+ bytes
,
2426 old_end
- (offset
+ bytes
));
2432 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
2433 if (ret
== -EAGAIN
) {
2438 spin_unlock(&ctl
->tree_lock
);
2443 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
2446 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2447 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2448 struct btrfs_free_space
*info
;
2452 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
2453 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
2454 if (info
->bytes
>= bytes
&& !block_group
->ro
)
2456 btrfs_crit(fs_info
, "entry offset %llu, bytes %llu, bitmap %s",
2457 info
->offset
, info
->bytes
,
2458 (info
->bitmap
) ? "yes" : "no");
2460 btrfs_info(fs_info
, "block group has cluster?: %s",
2461 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2463 "%d blocks of free space at or bigger than bytes is", count
);
2466 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2468 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2469 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2471 spin_lock_init(&ctl
->tree_lock
);
2472 ctl
->unit
= fs_info
->sectorsize
;
2473 ctl
->start
= block_group
->key
.objectid
;
2474 ctl
->private = block_group
;
2475 ctl
->op
= &free_space_op
;
2476 INIT_LIST_HEAD(&ctl
->trimming_ranges
);
2477 mutex_init(&ctl
->cache_writeout_mutex
);
2480 * we only want to have 32k of ram per block group for keeping
2481 * track of free space, and if we pass 1/2 of that we want to
2482 * start converting things over to using bitmaps
2484 ctl
->extents_thresh
= (SZ_32K
/ 2) / sizeof(struct btrfs_free_space
);
2488 * for a given cluster, put all of its extents back into the free
2489 * space cache. If the block group passed doesn't match the block group
2490 * pointed to by the cluster, someone else raced in and freed the
2491 * cluster already. In that case, we just return without changing anything
2494 __btrfs_return_cluster_to_free_space(
2495 struct btrfs_block_group_cache
*block_group
,
2496 struct btrfs_free_cluster
*cluster
)
2498 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2499 struct btrfs_free_space
*entry
;
2500 struct rb_node
*node
;
2502 spin_lock(&cluster
->lock
);
2503 if (cluster
->block_group
!= block_group
)
2506 cluster
->block_group
= NULL
;
2507 cluster
->window_start
= 0;
2508 list_del_init(&cluster
->block_group_list
);
2510 node
= rb_first(&cluster
->root
);
2514 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2515 node
= rb_next(&entry
->offset_index
);
2516 rb_erase(&entry
->offset_index
, &cluster
->root
);
2517 RB_CLEAR_NODE(&entry
->offset_index
);
2519 bitmap
= (entry
->bitmap
!= NULL
);
2521 try_merge_free_space(ctl
, entry
, false);
2522 steal_from_bitmap(ctl
, entry
, false);
2524 tree_insert_offset(&ctl
->free_space_offset
,
2525 entry
->offset
, &entry
->offset_index
, bitmap
);
2527 cluster
->root
= RB_ROOT
;
2530 spin_unlock(&cluster
->lock
);
2531 btrfs_put_block_group(block_group
);
2535 static void __btrfs_remove_free_space_cache_locked(
2536 struct btrfs_free_space_ctl
*ctl
)
2538 struct btrfs_free_space
*info
;
2539 struct rb_node
*node
;
2541 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2542 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2543 if (!info
->bitmap
) {
2544 unlink_free_space(ctl
, info
);
2545 kmem_cache_free(btrfs_free_space_cachep
, info
);
2547 free_bitmap(ctl
, info
);
2550 cond_resched_lock(&ctl
->tree_lock
);
2554 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2556 spin_lock(&ctl
->tree_lock
);
2557 __btrfs_remove_free_space_cache_locked(ctl
);
2558 spin_unlock(&ctl
->tree_lock
);
2561 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2563 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2564 struct btrfs_free_cluster
*cluster
;
2565 struct list_head
*head
;
2567 spin_lock(&ctl
->tree_lock
);
2568 while ((head
= block_group
->cluster_list
.next
) !=
2569 &block_group
->cluster_list
) {
2570 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2573 WARN_ON(cluster
->block_group
!= block_group
);
2574 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2576 cond_resched_lock(&ctl
->tree_lock
);
2578 __btrfs_remove_free_space_cache_locked(ctl
);
2579 spin_unlock(&ctl
->tree_lock
);
2583 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2584 u64 offset
, u64 bytes
, u64 empty_size
,
2585 u64
*max_extent_size
)
2587 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2588 struct btrfs_free_space
*entry
= NULL
;
2589 u64 bytes_search
= bytes
+ empty_size
;
2592 u64 align_gap_len
= 0;
2594 spin_lock(&ctl
->tree_lock
);
2595 entry
= find_free_space(ctl
, &offset
, &bytes_search
,
2596 block_group
->full_stripe_len
, max_extent_size
);
2601 if (entry
->bitmap
) {
2602 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2604 free_bitmap(ctl
, entry
);
2606 unlink_free_space(ctl
, entry
);
2607 align_gap_len
= offset
- entry
->offset
;
2608 align_gap
= entry
->offset
;
2610 entry
->offset
= offset
+ bytes
;
2611 WARN_ON(entry
->bytes
< bytes
+ align_gap_len
);
2613 entry
->bytes
-= bytes
+ align_gap_len
;
2615 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2617 link_free_space(ctl
, entry
);
2620 spin_unlock(&ctl
->tree_lock
);
2623 __btrfs_add_free_space(block_group
->fs_info
, ctl
,
2624 align_gap
, align_gap_len
);
2629 * given a cluster, put all of its extents back into the free space
2630 * cache. If a block group is passed, this function will only free
2631 * a cluster that belongs to the passed block group.
2633 * Otherwise, it'll get a reference on the block group pointed to by the
2634 * cluster and remove the cluster from it.
2636 int btrfs_return_cluster_to_free_space(
2637 struct btrfs_block_group_cache
*block_group
,
2638 struct btrfs_free_cluster
*cluster
)
2640 struct btrfs_free_space_ctl
*ctl
;
2643 /* first, get a safe pointer to the block group */
2644 spin_lock(&cluster
->lock
);
2646 block_group
= cluster
->block_group
;
2648 spin_unlock(&cluster
->lock
);
2651 } else if (cluster
->block_group
!= block_group
) {
2652 /* someone else has already freed it don't redo their work */
2653 spin_unlock(&cluster
->lock
);
2656 atomic_inc(&block_group
->count
);
2657 spin_unlock(&cluster
->lock
);
2659 ctl
= block_group
->free_space_ctl
;
2661 /* now return any extents the cluster had on it */
2662 spin_lock(&ctl
->tree_lock
);
2663 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2664 spin_unlock(&ctl
->tree_lock
);
2666 /* finally drop our ref */
2667 btrfs_put_block_group(block_group
);
2671 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2672 struct btrfs_free_cluster
*cluster
,
2673 struct btrfs_free_space
*entry
,
2674 u64 bytes
, u64 min_start
,
2675 u64
*max_extent_size
)
2677 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2679 u64 search_start
= cluster
->window_start
;
2680 u64 search_bytes
= bytes
;
2683 search_start
= min_start
;
2684 search_bytes
= bytes
;
2686 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
, true);
2688 if (search_bytes
> *max_extent_size
)
2689 *max_extent_size
= search_bytes
;
2694 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2700 * given a cluster, try to allocate 'bytes' from it, returns 0
2701 * if it couldn't find anything suitably large, or a logical disk offset
2702 * if things worked out
2704 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2705 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2706 u64 min_start
, u64
*max_extent_size
)
2708 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2709 struct btrfs_free_space
*entry
= NULL
;
2710 struct rb_node
*node
;
2713 spin_lock(&cluster
->lock
);
2714 if (bytes
> cluster
->max_size
)
2717 if (cluster
->block_group
!= block_group
)
2720 node
= rb_first(&cluster
->root
);
2724 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2726 if (entry
->bytes
< bytes
&& entry
->bytes
> *max_extent_size
)
2727 *max_extent_size
= entry
->bytes
;
2729 if (entry
->bytes
< bytes
||
2730 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2731 node
= rb_next(&entry
->offset_index
);
2734 entry
= rb_entry(node
, struct btrfs_free_space
,
2739 if (entry
->bitmap
) {
2740 ret
= btrfs_alloc_from_bitmap(block_group
,
2741 cluster
, entry
, bytes
,
2742 cluster
->window_start
,
2745 node
= rb_next(&entry
->offset_index
);
2748 entry
= rb_entry(node
, struct btrfs_free_space
,
2752 cluster
->window_start
+= bytes
;
2754 ret
= entry
->offset
;
2756 entry
->offset
+= bytes
;
2757 entry
->bytes
-= bytes
;
2760 if (entry
->bytes
== 0)
2761 rb_erase(&entry
->offset_index
, &cluster
->root
);
2765 spin_unlock(&cluster
->lock
);
2770 spin_lock(&ctl
->tree_lock
);
2772 ctl
->free_space
-= bytes
;
2773 if (entry
->bytes
== 0) {
2774 ctl
->free_extents
--;
2775 if (entry
->bitmap
) {
2776 kfree(entry
->bitmap
);
2777 ctl
->total_bitmaps
--;
2778 ctl
->op
->recalc_thresholds(ctl
);
2780 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2783 spin_unlock(&ctl
->tree_lock
);
2788 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2789 struct btrfs_free_space
*entry
,
2790 struct btrfs_free_cluster
*cluster
,
2791 u64 offset
, u64 bytes
,
2792 u64 cont1_bytes
, u64 min_bytes
)
2794 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2795 unsigned long next_zero
;
2797 unsigned long want_bits
;
2798 unsigned long min_bits
;
2799 unsigned long found_bits
;
2800 unsigned long max_bits
= 0;
2801 unsigned long start
= 0;
2802 unsigned long total_found
= 0;
2805 i
= offset_to_bit(entry
->offset
, ctl
->unit
,
2806 max_t(u64
, offset
, entry
->offset
));
2807 want_bits
= bytes_to_bits(bytes
, ctl
->unit
);
2808 min_bits
= bytes_to_bits(min_bytes
, ctl
->unit
);
2811 * Don't bother looking for a cluster in this bitmap if it's heavily
2814 if (entry
->max_extent_size
&&
2815 entry
->max_extent_size
< cont1_bytes
)
2819 for_each_set_bit_from(i
, entry
->bitmap
, BITS_PER_BITMAP
) {
2820 next_zero
= find_next_zero_bit(entry
->bitmap
,
2821 BITS_PER_BITMAP
, i
);
2822 if (next_zero
- i
>= min_bits
) {
2823 found_bits
= next_zero
- i
;
2824 if (found_bits
> max_bits
)
2825 max_bits
= found_bits
;
2828 if (next_zero
- i
> max_bits
)
2829 max_bits
= next_zero
- i
;
2834 entry
->max_extent_size
= (u64
)max_bits
* ctl
->unit
;
2840 cluster
->max_size
= 0;
2843 total_found
+= found_bits
;
2845 if (cluster
->max_size
< found_bits
* ctl
->unit
)
2846 cluster
->max_size
= found_bits
* ctl
->unit
;
2848 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2853 cluster
->window_start
= start
* ctl
->unit
+ entry
->offset
;
2854 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2855 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2856 &entry
->offset_index
, 1);
2857 ASSERT(!ret
); /* -EEXIST; Logic error */
2859 trace_btrfs_setup_cluster(block_group
, cluster
,
2860 total_found
* ctl
->unit
, 1);
2865 * This searches the block group for just extents to fill the cluster with.
2866 * Try to find a cluster with at least bytes total bytes, at least one
2867 * extent of cont1_bytes, and other clusters of at least min_bytes.
2870 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2871 struct btrfs_free_cluster
*cluster
,
2872 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2873 u64 cont1_bytes
, u64 min_bytes
)
2875 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2876 struct btrfs_free_space
*first
= NULL
;
2877 struct btrfs_free_space
*entry
= NULL
;
2878 struct btrfs_free_space
*last
;
2879 struct rb_node
*node
;
2884 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2889 * We don't want bitmaps, so just move along until we find a normal
2892 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2893 if (entry
->bitmap
&& list_empty(&entry
->list
))
2894 list_add_tail(&entry
->list
, bitmaps
);
2895 node
= rb_next(&entry
->offset_index
);
2898 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2901 window_free
= entry
->bytes
;
2902 max_extent
= entry
->bytes
;
2906 for (node
= rb_next(&entry
->offset_index
); node
;
2907 node
= rb_next(&entry
->offset_index
)) {
2908 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2910 if (entry
->bitmap
) {
2911 if (list_empty(&entry
->list
))
2912 list_add_tail(&entry
->list
, bitmaps
);
2916 if (entry
->bytes
< min_bytes
)
2920 window_free
+= entry
->bytes
;
2921 if (entry
->bytes
> max_extent
)
2922 max_extent
= entry
->bytes
;
2925 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2928 cluster
->window_start
= first
->offset
;
2930 node
= &first
->offset_index
;
2933 * now we've found our entries, pull them out of the free space
2934 * cache and put them into the cluster rbtree
2939 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2940 node
= rb_next(&entry
->offset_index
);
2941 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2944 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2945 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2946 &entry
->offset_index
, 0);
2947 total_size
+= entry
->bytes
;
2948 ASSERT(!ret
); /* -EEXIST; Logic error */
2949 } while (node
&& entry
!= last
);
2951 cluster
->max_size
= max_extent
;
2952 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2957 * This specifically looks for bitmaps that may work in the cluster, we assume
2958 * that we have already failed to find extents that will work.
2961 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2962 struct btrfs_free_cluster
*cluster
,
2963 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2964 u64 cont1_bytes
, u64 min_bytes
)
2966 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2967 struct btrfs_free_space
*entry
= NULL
;
2969 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
2971 if (ctl
->total_bitmaps
== 0)
2975 * The bitmap that covers offset won't be in the list unless offset
2976 * is just its start offset.
2978 if (!list_empty(bitmaps
))
2979 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
2981 if (!entry
|| entry
->offset
!= bitmap_offset
) {
2982 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2983 if (entry
&& list_empty(&entry
->list
))
2984 list_add(&entry
->list
, bitmaps
);
2987 list_for_each_entry(entry
, bitmaps
, list
) {
2988 if (entry
->bytes
< bytes
)
2990 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2991 bytes
, cont1_bytes
, min_bytes
);
2997 * The bitmaps list has all the bitmaps that record free space
2998 * starting after offset, so no more search is required.
3004 * here we try to find a cluster of blocks in a block group. The goal
3005 * is to find at least bytes+empty_size.
3006 * We might not find them all in one contiguous area.
3008 * returns zero and sets up cluster if things worked out, otherwise
3009 * it returns -enospc
3011 int btrfs_find_space_cluster(struct btrfs_fs_info
*fs_info
,
3012 struct btrfs_block_group_cache
*block_group
,
3013 struct btrfs_free_cluster
*cluster
,
3014 u64 offset
, u64 bytes
, u64 empty_size
)
3016 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3017 struct btrfs_free_space
*entry
, *tmp
;
3024 * Choose the minimum extent size we'll require for this
3025 * cluster. For SSD_SPREAD, don't allow any fragmentation.
3026 * For metadata, allow allocates with smaller extents. For
3027 * data, keep it dense.
3029 if (btrfs_test_opt(fs_info
, SSD_SPREAD
)) {
3030 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
3031 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3032 cont1_bytes
= bytes
;
3033 min_bytes
= fs_info
->sectorsize
;
3035 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
3036 min_bytes
= fs_info
->sectorsize
;
3039 spin_lock(&ctl
->tree_lock
);
3042 * If we know we don't have enough space to make a cluster don't even
3043 * bother doing all the work to try and find one.
3045 if (ctl
->free_space
< bytes
) {
3046 spin_unlock(&ctl
->tree_lock
);
3050 spin_lock(&cluster
->lock
);
3052 /* someone already found a cluster, hooray */
3053 if (cluster
->block_group
) {
3058 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
3061 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
3063 cont1_bytes
, min_bytes
);
3065 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
3066 offset
, bytes
+ empty_size
,
3067 cont1_bytes
, min_bytes
);
3069 /* Clear our temporary list */
3070 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
3071 list_del_init(&entry
->list
);
3074 atomic_inc(&block_group
->count
);
3075 list_add_tail(&cluster
->block_group_list
,
3076 &block_group
->cluster_list
);
3077 cluster
->block_group
= block_group
;
3079 trace_btrfs_failed_cluster_setup(block_group
);
3082 spin_unlock(&cluster
->lock
);
3083 spin_unlock(&ctl
->tree_lock
);
3089 * simple code to zero out a cluster
3091 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
3093 spin_lock_init(&cluster
->lock
);
3094 spin_lock_init(&cluster
->refill_lock
);
3095 cluster
->root
= RB_ROOT
;
3096 cluster
->max_size
= 0;
3097 cluster
->fragmented
= false;
3098 INIT_LIST_HEAD(&cluster
->block_group_list
);
3099 cluster
->block_group
= NULL
;
3102 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
3103 u64
*total_trimmed
, u64 start
, u64 bytes
,
3104 u64 reserved_start
, u64 reserved_bytes
,
3105 struct btrfs_trim_range
*trim_entry
)
3107 struct btrfs_space_info
*space_info
= block_group
->space_info
;
3108 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3109 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3114 spin_lock(&space_info
->lock
);
3115 spin_lock(&block_group
->lock
);
3116 if (!block_group
->ro
) {
3117 block_group
->reserved
+= reserved_bytes
;
3118 space_info
->bytes_reserved
+= reserved_bytes
;
3121 spin_unlock(&block_group
->lock
);
3122 spin_unlock(&space_info
->lock
);
3124 ret
= btrfs_discard_extent(fs_info
, start
, bytes
, &trimmed
);
3126 *total_trimmed
+= trimmed
;
3128 mutex_lock(&ctl
->cache_writeout_mutex
);
3129 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
3130 list_del(&trim_entry
->list
);
3131 mutex_unlock(&ctl
->cache_writeout_mutex
);
3134 spin_lock(&space_info
->lock
);
3135 spin_lock(&block_group
->lock
);
3136 if (block_group
->ro
)
3137 space_info
->bytes_readonly
+= reserved_bytes
;
3138 block_group
->reserved
-= reserved_bytes
;
3139 space_info
->bytes_reserved
-= reserved_bytes
;
3140 spin_unlock(&space_info
->lock
);
3141 spin_unlock(&block_group
->lock
);
3147 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
3148 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3150 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3151 struct btrfs_free_space
*entry
;
3152 struct rb_node
*node
;
3158 while (start
< end
) {
3159 struct btrfs_trim_range trim_entry
;
3161 mutex_lock(&ctl
->cache_writeout_mutex
);
3162 spin_lock(&ctl
->tree_lock
);
3164 if (ctl
->free_space
< minlen
) {
3165 spin_unlock(&ctl
->tree_lock
);
3166 mutex_unlock(&ctl
->cache_writeout_mutex
);
3170 entry
= tree_search_offset(ctl
, start
, 0, 1);
3172 spin_unlock(&ctl
->tree_lock
);
3173 mutex_unlock(&ctl
->cache_writeout_mutex
);
3178 while (entry
->bitmap
) {
3179 node
= rb_next(&entry
->offset_index
);
3181 spin_unlock(&ctl
->tree_lock
);
3182 mutex_unlock(&ctl
->cache_writeout_mutex
);
3185 entry
= rb_entry(node
, struct btrfs_free_space
,
3189 if (entry
->offset
>= end
) {
3190 spin_unlock(&ctl
->tree_lock
);
3191 mutex_unlock(&ctl
->cache_writeout_mutex
);
3195 extent_start
= entry
->offset
;
3196 extent_bytes
= entry
->bytes
;
3197 start
= max(start
, extent_start
);
3198 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
3199 if (bytes
< minlen
) {
3200 spin_unlock(&ctl
->tree_lock
);
3201 mutex_unlock(&ctl
->cache_writeout_mutex
);
3205 unlink_free_space(ctl
, entry
);
3206 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3208 spin_unlock(&ctl
->tree_lock
);
3209 trim_entry
.start
= extent_start
;
3210 trim_entry
.bytes
= extent_bytes
;
3211 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3212 mutex_unlock(&ctl
->cache_writeout_mutex
);
3214 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3215 extent_start
, extent_bytes
, &trim_entry
);
3221 if (fatal_signal_pending(current
)) {
3232 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
3233 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3235 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3236 struct btrfs_free_space
*entry
;
3240 u64 offset
= offset_to_bitmap(ctl
, start
);
3242 while (offset
< end
) {
3243 bool next_bitmap
= false;
3244 struct btrfs_trim_range trim_entry
;
3246 mutex_lock(&ctl
->cache_writeout_mutex
);
3247 spin_lock(&ctl
->tree_lock
);
3249 if (ctl
->free_space
< minlen
) {
3250 spin_unlock(&ctl
->tree_lock
);
3251 mutex_unlock(&ctl
->cache_writeout_mutex
);
3255 entry
= tree_search_offset(ctl
, offset
, 1, 0);
3257 spin_unlock(&ctl
->tree_lock
);
3258 mutex_unlock(&ctl
->cache_writeout_mutex
);
3264 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
, false);
3265 if (ret2
|| start
>= end
) {
3266 spin_unlock(&ctl
->tree_lock
);
3267 mutex_unlock(&ctl
->cache_writeout_mutex
);
3272 bytes
= min(bytes
, end
- start
);
3273 if (bytes
< minlen
) {
3274 spin_unlock(&ctl
->tree_lock
);
3275 mutex_unlock(&ctl
->cache_writeout_mutex
);
3279 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
3280 if (entry
->bytes
== 0)
3281 free_bitmap(ctl
, entry
);
3283 spin_unlock(&ctl
->tree_lock
);
3284 trim_entry
.start
= start
;
3285 trim_entry
.bytes
= bytes
;
3286 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3287 mutex_unlock(&ctl
->cache_writeout_mutex
);
3289 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3290 start
, bytes
, &trim_entry
);
3295 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3298 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
3299 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3302 if (fatal_signal_pending(current
)) {
3313 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache
*cache
)
3315 atomic_inc(&cache
->trimming
);
3318 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache
*block_group
)
3320 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3321 struct extent_map_tree
*em_tree
;
3322 struct extent_map
*em
;
3325 spin_lock(&block_group
->lock
);
3326 cleanup
= (atomic_dec_and_test(&block_group
->trimming
) &&
3327 block_group
->removed
);
3328 spin_unlock(&block_group
->lock
);
3331 mutex_lock(&fs_info
->chunk_mutex
);
3332 em_tree
= &fs_info
->mapping_tree
.map_tree
;
3333 write_lock(&em_tree
->lock
);
3334 em
= lookup_extent_mapping(em_tree
, block_group
->key
.objectid
,
3336 BUG_ON(!em
); /* logic error, can't happen */
3338 * remove_extent_mapping() will delete us from the pinned_chunks
3339 * list, which is protected by the chunk mutex.
3341 remove_extent_mapping(em_tree
, em
);
3342 write_unlock(&em_tree
->lock
);
3343 mutex_unlock(&fs_info
->chunk_mutex
);
3345 /* once for us and once for the tree */
3346 free_extent_map(em
);
3347 free_extent_map(em
);
3350 * We've left one free space entry and other tasks trimming
3351 * this block group have left 1 entry each one. Free them.
3353 __btrfs_remove_free_space_cache(block_group
->free_space_ctl
);
3357 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
3358 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
3364 spin_lock(&block_group
->lock
);
3365 if (block_group
->removed
) {
3366 spin_unlock(&block_group
->lock
);
3369 btrfs_get_block_group_trimming(block_group
);
3370 spin_unlock(&block_group
->lock
);
3372 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
3376 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
3378 btrfs_put_block_group_trimming(block_group
);
3383 * Find the left-most item in the cache tree, and then return the
3384 * smallest inode number in the item.
3386 * Note: the returned inode number may not be the smallest one in
3387 * the tree, if the left-most item is a bitmap.
3389 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
3391 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
3392 struct btrfs_free_space
*entry
= NULL
;
3395 spin_lock(&ctl
->tree_lock
);
3397 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
3400 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
3401 struct btrfs_free_space
, offset_index
);
3403 if (!entry
->bitmap
) {
3404 ino
= entry
->offset
;
3406 unlink_free_space(ctl
, entry
);
3410 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3412 link_free_space(ctl
, entry
);
3418 ret
= search_bitmap(ctl
, entry
, &offset
, &count
, true);
3419 /* Logic error; Should be empty if it can't find anything */
3423 bitmap_clear_bits(ctl
, entry
, offset
, 1);
3424 if (entry
->bytes
== 0)
3425 free_bitmap(ctl
, entry
);
3428 spin_unlock(&ctl
->tree_lock
);
3433 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
3434 struct btrfs_path
*path
)
3436 struct inode
*inode
= NULL
;
3438 spin_lock(&root
->ino_cache_lock
);
3439 if (root
->ino_cache_inode
)
3440 inode
= igrab(root
->ino_cache_inode
);
3441 spin_unlock(&root
->ino_cache_lock
);
3445 inode
= __lookup_free_space_inode(root
, path
, 0);
3449 spin_lock(&root
->ino_cache_lock
);
3450 if (!btrfs_fs_closing(root
->fs_info
))
3451 root
->ino_cache_inode
= igrab(inode
);
3452 spin_unlock(&root
->ino_cache_lock
);
3457 int create_free_ino_inode(struct btrfs_root
*root
,
3458 struct btrfs_trans_handle
*trans
,
3459 struct btrfs_path
*path
)
3461 return __create_free_space_inode(root
, trans
, path
,
3462 BTRFS_FREE_INO_OBJECTID
, 0);
3465 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
3467 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3468 struct btrfs_path
*path
;
3469 struct inode
*inode
;
3471 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
3473 if (!btrfs_test_opt(fs_info
, INODE_MAP_CACHE
))
3477 * If we're unmounting then just return, since this does a search on the
3478 * normal root and not the commit root and we could deadlock.
3480 if (btrfs_fs_closing(fs_info
))
3483 path
= btrfs_alloc_path();
3487 inode
= lookup_free_ino_inode(root
, path
);
3491 if (root_gen
!= BTRFS_I(inode
)->generation
)
3494 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
3498 "failed to load free ino cache for root %llu",
3499 root
->root_key
.objectid
);
3503 btrfs_free_path(path
);
3507 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
3508 struct btrfs_trans_handle
*trans
,
3509 struct btrfs_path
*path
,
3510 struct inode
*inode
)
3512 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3513 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3515 struct btrfs_io_ctl io_ctl
;
3516 bool release_metadata
= true;
3518 if (!btrfs_test_opt(fs_info
, INODE_MAP_CACHE
))
3521 memset(&io_ctl
, 0, sizeof(io_ctl
));
3522 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, &io_ctl
, trans
);
3525 * At this point writepages() didn't error out, so our metadata
3526 * reservation is released when the writeback finishes, at
3527 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
3528 * with or without an error.
3530 release_metadata
= false;
3531 ret
= btrfs_wait_cache_io_root(root
, trans
, &io_ctl
, path
);
3535 if (release_metadata
)
3536 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
3537 inode
->i_size
, true);
3540 "failed to write free ino cache for root %llu",
3541 root
->root_key
.objectid
);
3548 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3550 * Use this if you need to make a bitmap or extent entry specifically, it
3551 * doesn't do any of the merging that add_free_space does, this acts a lot like
3552 * how the free space cache loading stuff works, so you can get really weird
3555 int test_add_free_space_entry(struct btrfs_block_group_cache
*cache
,
3556 u64 offset
, u64 bytes
, bool bitmap
)
3558 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3559 struct btrfs_free_space
*info
= NULL
, *bitmap_info
;
3566 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
3572 spin_lock(&ctl
->tree_lock
);
3573 info
->offset
= offset
;
3574 info
->bytes
= bytes
;
3575 info
->max_extent_size
= 0;
3576 ret
= link_free_space(ctl
, info
);
3577 spin_unlock(&ctl
->tree_lock
);
3579 kmem_cache_free(btrfs_free_space_cachep
, info
);
3584 map
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
3586 kmem_cache_free(btrfs_free_space_cachep
, info
);
3591 spin_lock(&ctl
->tree_lock
);
3592 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3597 add_new_bitmap(ctl
, info
, offset
);
3602 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
3604 bytes
-= bytes_added
;
3605 offset
+= bytes_added
;
3606 spin_unlock(&ctl
->tree_lock
);
3612 kmem_cache_free(btrfs_free_space_cachep
, info
);
3619 * Checks to see if the given range is in the free space cache. This is really
3620 * just used to check the absence of space, so if there is free space in the
3621 * range at all we will return 1.
3623 int test_check_exists(struct btrfs_block_group_cache
*cache
,
3624 u64 offset
, u64 bytes
)
3626 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3627 struct btrfs_free_space
*info
;
3630 spin_lock(&ctl
->tree_lock
);
3631 info
= tree_search_offset(ctl
, offset
, 0, 0);
3633 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3641 u64 bit_off
, bit_bytes
;
3643 struct btrfs_free_space
*tmp
;
3646 bit_bytes
= ctl
->unit
;
3647 ret
= search_bitmap(ctl
, info
, &bit_off
, &bit_bytes
, false);
3649 if (bit_off
== offset
) {
3652 } else if (bit_off
> offset
&&
3653 offset
+ bytes
> bit_off
) {
3659 n
= rb_prev(&info
->offset_index
);
3661 tmp
= rb_entry(n
, struct btrfs_free_space
,
3663 if (tmp
->offset
+ tmp
->bytes
< offset
)
3665 if (offset
+ bytes
< tmp
->offset
) {
3666 n
= rb_prev(&tmp
->offset_index
);
3673 n
= rb_next(&info
->offset_index
);
3675 tmp
= rb_entry(n
, struct btrfs_free_space
,
3677 if (offset
+ bytes
< tmp
->offset
)
3679 if (tmp
->offset
+ tmp
->bytes
< offset
) {
3680 n
= rb_next(&tmp
->offset_index
);
3691 if (info
->offset
== offset
) {
3696 if (offset
> info
->offset
&& offset
< info
->offset
+ info
->bytes
)
3699 spin_unlock(&ctl
->tree_lock
);
3702 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */