1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Red Hat. All rights reserved.
6 #include <linux/pagemap.h>
7 #include <linux/sched.h>
8 #include <linux/sched/signal.h>
9 #include <linux/slab.h>
10 #include <linux/math64.h>
11 #include <linux/ratelimit.h>
12 #include <linux/error-injection.h>
14 #include "free-space-cache.h"
15 #include "transaction.h"
17 #include "extent_io.h"
18 #include "inode-map.h"
21 #define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
22 #define MAX_CACHE_BYTES_PER_GIG SZ_32K
24 struct btrfs_trim_range
{
27 struct list_head list
;
30 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
31 struct btrfs_free_space
*info
);
32 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
33 struct btrfs_free_space
*info
);
34 static int btrfs_wait_cache_io_root(struct btrfs_root
*root
,
35 struct btrfs_trans_handle
*trans
,
36 struct btrfs_io_ctl
*io_ctl
,
37 struct btrfs_path
*path
);
39 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
40 struct btrfs_path
*path
,
43 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
45 struct btrfs_key location
;
46 struct btrfs_disk_key disk_key
;
47 struct btrfs_free_space_header
*header
;
48 struct extent_buffer
*leaf
;
49 struct inode
*inode
= NULL
;
52 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
56 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
60 btrfs_release_path(path
);
61 return ERR_PTR(-ENOENT
);
64 leaf
= path
->nodes
[0];
65 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
66 struct btrfs_free_space_header
);
67 btrfs_free_space_key(leaf
, header
, &disk_key
);
68 btrfs_disk_key_to_cpu(&location
, &disk_key
);
69 btrfs_release_path(path
);
71 inode
= btrfs_iget(fs_info
->sb
, &location
, root
, NULL
);
75 mapping_set_gfp_mask(inode
->i_mapping
,
76 mapping_gfp_constraint(inode
->i_mapping
,
77 ~(__GFP_FS
| __GFP_HIGHMEM
)));
82 struct inode
*lookup_free_space_inode(struct btrfs_fs_info
*fs_info
,
83 struct btrfs_block_group_cache
84 *block_group
, struct btrfs_path
*path
)
86 struct inode
*inode
= NULL
;
87 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
89 spin_lock(&block_group
->lock
);
90 if (block_group
->inode
)
91 inode
= igrab(block_group
->inode
);
92 spin_unlock(&block_group
->lock
);
96 inode
= __lookup_free_space_inode(fs_info
->tree_root
, path
,
97 block_group
->key
.objectid
);
101 spin_lock(&block_group
->lock
);
102 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
103 btrfs_info(fs_info
, "Old style space inode found, converting.");
104 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
105 BTRFS_INODE_NODATACOW
;
106 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
109 if (!block_group
->iref
) {
110 block_group
->inode
= igrab(inode
);
111 block_group
->iref
= 1;
113 spin_unlock(&block_group
->lock
);
118 static int __create_free_space_inode(struct btrfs_root
*root
,
119 struct btrfs_trans_handle
*trans
,
120 struct btrfs_path
*path
,
123 struct btrfs_key key
;
124 struct btrfs_disk_key disk_key
;
125 struct btrfs_free_space_header
*header
;
126 struct btrfs_inode_item
*inode_item
;
127 struct extent_buffer
*leaf
;
128 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
131 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
135 /* We inline crc's for the free disk space cache */
136 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
137 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
139 leaf
= path
->nodes
[0];
140 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
141 struct btrfs_inode_item
);
142 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
143 memzero_extent_buffer(leaf
, (unsigned long)inode_item
,
144 sizeof(*inode_item
));
145 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
146 btrfs_set_inode_size(leaf
, inode_item
, 0);
147 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
148 btrfs_set_inode_uid(leaf
, inode_item
, 0);
149 btrfs_set_inode_gid(leaf
, inode_item
, 0);
150 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
151 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
152 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
153 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
154 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
155 btrfs_mark_buffer_dirty(leaf
);
156 btrfs_release_path(path
);
158 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
161 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
162 sizeof(struct btrfs_free_space_header
));
164 btrfs_release_path(path
);
168 leaf
= path
->nodes
[0];
169 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
170 struct btrfs_free_space_header
);
171 memzero_extent_buffer(leaf
, (unsigned long)header
, sizeof(*header
));
172 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
173 btrfs_mark_buffer_dirty(leaf
);
174 btrfs_release_path(path
);
179 int create_free_space_inode(struct btrfs_fs_info
*fs_info
,
180 struct btrfs_trans_handle
*trans
,
181 struct btrfs_block_group_cache
*block_group
,
182 struct btrfs_path
*path
)
187 ret
= btrfs_find_free_objectid(fs_info
->tree_root
, &ino
);
191 return __create_free_space_inode(fs_info
->tree_root
, trans
, path
, ino
,
192 block_group
->key
.objectid
);
195 int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info
*fs_info
,
196 struct btrfs_block_rsv
*rsv
)
201 /* 1 for slack space, 1 for updating the inode */
202 needed_bytes
= btrfs_calc_trunc_metadata_size(fs_info
, 1) +
203 btrfs_calc_trans_metadata_size(fs_info
, 1);
205 spin_lock(&rsv
->lock
);
206 if (rsv
->reserved
< needed_bytes
)
210 spin_unlock(&rsv
->lock
);
214 int btrfs_truncate_free_space_cache(struct btrfs_trans_handle
*trans
,
215 struct btrfs_block_group_cache
*block_group
,
218 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
223 struct btrfs_path
*path
= btrfs_alloc_path();
230 mutex_lock(&trans
->transaction
->cache_write_mutex
);
231 if (!list_empty(&block_group
->io_list
)) {
232 list_del_init(&block_group
->io_list
);
234 btrfs_wait_cache_io(trans
, block_group
, path
);
235 btrfs_put_block_group(block_group
);
239 * now that we've truncated the cache away, its no longer
242 spin_lock(&block_group
->lock
);
243 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
244 spin_unlock(&block_group
->lock
);
245 btrfs_free_path(path
);
248 btrfs_i_size_write(BTRFS_I(inode
), 0);
249 truncate_pagecache(inode
, 0);
252 * We skip the throttling logic for free space cache inodes, so we don't
253 * need to check for -EAGAIN.
255 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
256 0, BTRFS_EXTENT_DATA_KEY
);
260 ret
= btrfs_update_inode(trans
, root
, inode
);
264 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
266 btrfs_abort_transaction(trans
, ret
);
271 static void readahead_cache(struct inode
*inode
)
273 struct file_ra_state
*ra
;
274 unsigned long last_index
;
276 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
280 file_ra_state_init(ra
, inode
->i_mapping
);
281 last_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
283 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
288 static int io_ctl_init(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
294 num_pages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
296 if (btrfs_ino(BTRFS_I(inode
)) != BTRFS_FREE_INO_OBJECTID
)
299 /* Make sure we can fit our crcs and generation into the first page */
300 if (write
&& check_crcs
&&
301 (num_pages
* sizeof(u32
) + sizeof(u64
)) > PAGE_SIZE
)
304 memset(io_ctl
, 0, sizeof(struct btrfs_io_ctl
));
306 io_ctl
->pages
= kcalloc(num_pages
, sizeof(struct page
*), GFP_NOFS
);
310 io_ctl
->num_pages
= num_pages
;
311 io_ctl
->fs_info
= btrfs_sb(inode
->i_sb
);
312 io_ctl
->check_crcs
= check_crcs
;
313 io_ctl
->inode
= inode
;
317 ALLOW_ERROR_INJECTION(io_ctl_init
, ERRNO
);
319 static void io_ctl_free(struct btrfs_io_ctl
*io_ctl
)
321 kfree(io_ctl
->pages
);
322 io_ctl
->pages
= NULL
;
325 static void io_ctl_unmap_page(struct btrfs_io_ctl
*io_ctl
)
333 static void io_ctl_map_page(struct btrfs_io_ctl
*io_ctl
, int clear
)
335 ASSERT(io_ctl
->index
< io_ctl
->num_pages
);
336 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
337 io_ctl
->cur
= page_address(io_ctl
->page
);
338 io_ctl
->orig
= io_ctl
->cur
;
339 io_ctl
->size
= PAGE_SIZE
;
341 clear_page(io_ctl
->cur
);
344 static void io_ctl_drop_pages(struct btrfs_io_ctl
*io_ctl
)
348 io_ctl_unmap_page(io_ctl
);
350 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
351 if (io_ctl
->pages
[i
]) {
352 ClearPageChecked(io_ctl
->pages
[i
]);
353 unlock_page(io_ctl
->pages
[i
]);
354 put_page(io_ctl
->pages
[i
]);
359 static int io_ctl_prepare_pages(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
363 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
366 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
367 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
369 io_ctl_drop_pages(io_ctl
);
372 io_ctl
->pages
[i
] = page
;
373 if (uptodate
&& !PageUptodate(page
)) {
374 btrfs_readpage(NULL
, page
);
376 if (!PageUptodate(page
)) {
377 btrfs_err(BTRFS_I(inode
)->root
->fs_info
,
378 "error reading free space cache");
379 io_ctl_drop_pages(io_ctl
);
385 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
386 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
387 set_page_extent_mapped(io_ctl
->pages
[i
]);
393 static void io_ctl_set_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
397 io_ctl_map_page(io_ctl
, 1);
400 * Skip the csum areas. If we don't check crcs then we just have a
401 * 64bit chunk at the front of the first page.
403 if (io_ctl
->check_crcs
) {
404 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
405 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
407 io_ctl
->cur
+= sizeof(u64
);
408 io_ctl
->size
-= sizeof(u64
) * 2;
412 *val
= cpu_to_le64(generation
);
413 io_ctl
->cur
+= sizeof(u64
);
416 static int io_ctl_check_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
421 * Skip the crc area. If we don't check crcs then we just have a 64bit
422 * chunk at the front of the first page.
424 if (io_ctl
->check_crcs
) {
425 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
426 io_ctl
->size
-= sizeof(u64
) +
427 (sizeof(u32
) * io_ctl
->num_pages
);
429 io_ctl
->cur
+= sizeof(u64
);
430 io_ctl
->size
-= sizeof(u64
) * 2;
434 if (le64_to_cpu(*gen
) != generation
) {
435 btrfs_err_rl(io_ctl
->fs_info
,
436 "space cache generation (%llu) does not match inode (%llu)",
438 io_ctl_unmap_page(io_ctl
);
441 io_ctl
->cur
+= sizeof(u64
);
445 static void io_ctl_set_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
451 if (!io_ctl
->check_crcs
) {
452 io_ctl_unmap_page(io_ctl
);
457 offset
= sizeof(u32
) * io_ctl
->num_pages
;
459 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
461 btrfs_csum_final(crc
, (u8
*)&crc
);
462 io_ctl_unmap_page(io_ctl
);
463 tmp
= page_address(io_ctl
->pages
[0]);
468 static int io_ctl_check_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
474 if (!io_ctl
->check_crcs
) {
475 io_ctl_map_page(io_ctl
, 0);
480 offset
= sizeof(u32
) * io_ctl
->num_pages
;
482 tmp
= page_address(io_ctl
->pages
[0]);
486 io_ctl_map_page(io_ctl
, 0);
487 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
489 btrfs_csum_final(crc
, (u8
*)&crc
);
491 btrfs_err_rl(io_ctl
->fs_info
,
492 "csum mismatch on free space cache");
493 io_ctl_unmap_page(io_ctl
);
500 static int io_ctl_add_entry(struct btrfs_io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
503 struct btrfs_free_space_entry
*entry
;
509 entry
->offset
= cpu_to_le64(offset
);
510 entry
->bytes
= cpu_to_le64(bytes
);
511 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
512 BTRFS_FREE_SPACE_EXTENT
;
513 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
514 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
516 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
519 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
521 /* No more pages to map */
522 if (io_ctl
->index
>= io_ctl
->num_pages
)
525 /* map the next page */
526 io_ctl_map_page(io_ctl
, 1);
530 static int io_ctl_add_bitmap(struct btrfs_io_ctl
*io_ctl
, void *bitmap
)
536 * If we aren't at the start of the current page, unmap this one and
537 * map the next one if there is any left.
539 if (io_ctl
->cur
!= io_ctl
->orig
) {
540 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
541 if (io_ctl
->index
>= io_ctl
->num_pages
)
543 io_ctl_map_page(io_ctl
, 0);
546 copy_page(io_ctl
->cur
, bitmap
);
547 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
548 if (io_ctl
->index
< io_ctl
->num_pages
)
549 io_ctl_map_page(io_ctl
, 0);
553 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl
*io_ctl
)
556 * If we're not on the boundary we know we've modified the page and we
557 * need to crc the page.
559 if (io_ctl
->cur
!= io_ctl
->orig
)
560 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
562 io_ctl_unmap_page(io_ctl
);
564 while (io_ctl
->index
< io_ctl
->num_pages
) {
565 io_ctl_map_page(io_ctl
, 1);
566 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
570 static int io_ctl_read_entry(struct btrfs_io_ctl
*io_ctl
,
571 struct btrfs_free_space
*entry
, u8
*type
)
573 struct btrfs_free_space_entry
*e
;
577 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
583 entry
->offset
= le64_to_cpu(e
->offset
);
584 entry
->bytes
= le64_to_cpu(e
->bytes
);
586 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
587 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
589 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
592 io_ctl_unmap_page(io_ctl
);
597 static int io_ctl_read_bitmap(struct btrfs_io_ctl
*io_ctl
,
598 struct btrfs_free_space
*entry
)
602 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
606 copy_page(entry
->bitmap
, io_ctl
->cur
);
607 io_ctl_unmap_page(io_ctl
);
613 * Since we attach pinned extents after the fact we can have contiguous sections
614 * of free space that are split up in entries. This poses a problem with the
615 * tree logging stuff since it could have allocated across what appears to be 2
616 * entries since we would have merged the entries when adding the pinned extents
617 * back to the free space cache. So run through the space cache that we just
618 * loaded and merge contiguous entries. This will make the log replay stuff not
619 * blow up and it will make for nicer allocator behavior.
621 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
623 struct btrfs_free_space
*e
, *prev
= NULL
;
627 spin_lock(&ctl
->tree_lock
);
628 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
629 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
632 if (e
->bitmap
|| prev
->bitmap
)
634 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
635 unlink_free_space(ctl
, prev
);
636 unlink_free_space(ctl
, e
);
637 prev
->bytes
+= e
->bytes
;
638 kmem_cache_free(btrfs_free_space_cachep
, e
);
639 link_free_space(ctl
, prev
);
641 spin_unlock(&ctl
->tree_lock
);
647 spin_unlock(&ctl
->tree_lock
);
650 static int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
651 struct btrfs_free_space_ctl
*ctl
,
652 struct btrfs_path
*path
, u64 offset
)
654 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
655 struct btrfs_free_space_header
*header
;
656 struct extent_buffer
*leaf
;
657 struct btrfs_io_ctl io_ctl
;
658 struct btrfs_key key
;
659 struct btrfs_free_space
*e
, *n
;
667 /* Nothing in the space cache, goodbye */
668 if (!i_size_read(inode
))
671 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
675 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
679 btrfs_release_path(path
);
685 leaf
= path
->nodes
[0];
686 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
687 struct btrfs_free_space_header
);
688 num_entries
= btrfs_free_space_entries(leaf
, header
);
689 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
690 generation
= btrfs_free_space_generation(leaf
, header
);
691 btrfs_release_path(path
);
693 if (!BTRFS_I(inode
)->generation
) {
695 "the free space cache file (%llu) is invalid, skip it",
700 if (BTRFS_I(inode
)->generation
!= generation
) {
702 "free space inode generation (%llu) did not match free space cache generation (%llu)",
703 BTRFS_I(inode
)->generation
, generation
);
710 ret
= io_ctl_init(&io_ctl
, inode
, 0);
714 readahead_cache(inode
);
716 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
720 ret
= io_ctl_check_crc(&io_ctl
, 0);
724 ret
= io_ctl_check_generation(&io_ctl
, generation
);
728 while (num_entries
) {
729 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
734 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
736 kmem_cache_free(btrfs_free_space_cachep
, e
);
741 kmem_cache_free(btrfs_free_space_cachep
, e
);
745 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
746 spin_lock(&ctl
->tree_lock
);
747 ret
= link_free_space(ctl
, e
);
748 spin_unlock(&ctl
->tree_lock
);
751 "Duplicate entries in free space cache, dumping");
752 kmem_cache_free(btrfs_free_space_cachep
, e
);
758 e
->bitmap
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
761 btrfs_free_space_cachep
, e
);
764 spin_lock(&ctl
->tree_lock
);
765 ret
= link_free_space(ctl
, e
);
766 ctl
->total_bitmaps
++;
767 ctl
->op
->recalc_thresholds(ctl
);
768 spin_unlock(&ctl
->tree_lock
);
771 "Duplicate entries in free space cache, dumping");
772 kmem_cache_free(btrfs_free_space_cachep
, e
);
775 list_add_tail(&e
->list
, &bitmaps
);
781 io_ctl_unmap_page(&io_ctl
);
784 * We add the bitmaps at the end of the entries in order that
785 * the bitmap entries are added to the cache.
787 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
788 list_del_init(&e
->list
);
789 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
794 io_ctl_drop_pages(&io_ctl
);
795 merge_space_tree(ctl
);
798 io_ctl_free(&io_ctl
);
801 io_ctl_drop_pages(&io_ctl
);
802 __btrfs_remove_free_space_cache(ctl
);
806 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
807 struct btrfs_block_group_cache
*block_group
)
809 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
811 struct btrfs_path
*path
;
814 u64 used
= btrfs_block_group_used(&block_group
->item
);
817 * If this block group has been marked to be cleared for one reason or
818 * another then we can't trust the on disk cache, so just return.
820 spin_lock(&block_group
->lock
);
821 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
822 spin_unlock(&block_group
->lock
);
825 spin_unlock(&block_group
->lock
);
827 path
= btrfs_alloc_path();
830 path
->search_commit_root
= 1;
831 path
->skip_locking
= 1;
833 inode
= lookup_free_space_inode(fs_info
, block_group
, path
);
835 btrfs_free_path(path
);
839 /* We may have converted the inode and made the cache invalid. */
840 spin_lock(&block_group
->lock
);
841 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
842 spin_unlock(&block_group
->lock
);
843 btrfs_free_path(path
);
846 spin_unlock(&block_group
->lock
);
848 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
849 path
, block_group
->key
.objectid
);
850 btrfs_free_path(path
);
854 spin_lock(&ctl
->tree_lock
);
855 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
856 block_group
->bytes_super
));
857 spin_unlock(&ctl
->tree_lock
);
860 __btrfs_remove_free_space_cache(ctl
);
862 "block group %llu has wrong amount of free space",
863 block_group
->key
.objectid
);
868 /* This cache is bogus, make sure it gets cleared */
869 spin_lock(&block_group
->lock
);
870 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
871 spin_unlock(&block_group
->lock
);
875 "failed to load free space cache for block group %llu, rebuilding it now",
876 block_group
->key
.objectid
);
883 static noinline_for_stack
884 int write_cache_extent_entries(struct btrfs_io_ctl
*io_ctl
,
885 struct btrfs_free_space_ctl
*ctl
,
886 struct btrfs_block_group_cache
*block_group
,
887 int *entries
, int *bitmaps
,
888 struct list_head
*bitmap_list
)
891 struct btrfs_free_cluster
*cluster
= NULL
;
892 struct btrfs_free_cluster
*cluster_locked
= NULL
;
893 struct rb_node
*node
= rb_first(&ctl
->free_space_offset
);
894 struct btrfs_trim_range
*trim_entry
;
896 /* Get the cluster for this block_group if it exists */
897 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
898 cluster
= list_entry(block_group
->cluster_list
.next
,
899 struct btrfs_free_cluster
,
903 if (!node
&& cluster
) {
904 cluster_locked
= cluster
;
905 spin_lock(&cluster_locked
->lock
);
906 node
= rb_first(&cluster
->root
);
910 /* Write out the extent entries */
912 struct btrfs_free_space
*e
;
914 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
917 ret
= io_ctl_add_entry(io_ctl
, e
->offset
, e
->bytes
,
923 list_add_tail(&e
->list
, bitmap_list
);
926 node
= rb_next(node
);
927 if (!node
&& cluster
) {
928 node
= rb_first(&cluster
->root
);
929 cluster_locked
= cluster
;
930 spin_lock(&cluster_locked
->lock
);
934 if (cluster_locked
) {
935 spin_unlock(&cluster_locked
->lock
);
936 cluster_locked
= NULL
;
940 * Make sure we don't miss any range that was removed from our rbtree
941 * because trimming is running. Otherwise after a umount+mount (or crash
942 * after committing the transaction) we would leak free space and get
943 * an inconsistent free space cache report from fsck.
945 list_for_each_entry(trim_entry
, &ctl
->trimming_ranges
, list
) {
946 ret
= io_ctl_add_entry(io_ctl
, trim_entry
->start
,
947 trim_entry
->bytes
, NULL
);
956 spin_unlock(&cluster_locked
->lock
);
960 static noinline_for_stack
int
961 update_cache_item(struct btrfs_trans_handle
*trans
,
962 struct btrfs_root
*root
,
964 struct btrfs_path
*path
, u64 offset
,
965 int entries
, int bitmaps
)
967 struct btrfs_key key
;
968 struct btrfs_free_space_header
*header
;
969 struct extent_buffer
*leaf
;
972 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
976 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
978 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
979 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
);
982 leaf
= path
->nodes
[0];
984 struct btrfs_key found_key
;
985 ASSERT(path
->slots
[0]);
987 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
988 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
989 found_key
.offset
!= offset
) {
990 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
992 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
994 btrfs_release_path(path
);
999 BTRFS_I(inode
)->generation
= trans
->transid
;
1000 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
1001 struct btrfs_free_space_header
);
1002 btrfs_set_free_space_entries(leaf
, header
, entries
);
1003 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1004 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1005 btrfs_mark_buffer_dirty(leaf
);
1006 btrfs_release_path(path
);
1014 static noinline_for_stack
int
1015 write_pinned_extent_entries(struct btrfs_fs_info
*fs_info
,
1016 struct btrfs_block_group_cache
*block_group
,
1017 struct btrfs_io_ctl
*io_ctl
,
1020 u64 start
, extent_start
, extent_end
, len
;
1021 struct extent_io_tree
*unpin
= NULL
;
1028 * We want to add any pinned extents to our free space cache
1029 * so we don't leak the space
1031 * We shouldn't have switched the pinned extents yet so this is the
1034 unpin
= fs_info
->pinned_extents
;
1036 start
= block_group
->key
.objectid
;
1038 while (start
< block_group
->key
.objectid
+ block_group
->key
.offset
) {
1039 ret
= find_first_extent_bit(unpin
, start
,
1040 &extent_start
, &extent_end
,
1041 EXTENT_DIRTY
, NULL
);
1045 /* This pinned extent is out of our range */
1046 if (extent_start
>= block_group
->key
.objectid
+
1047 block_group
->key
.offset
)
1050 extent_start
= max(extent_start
, start
);
1051 extent_end
= min(block_group
->key
.objectid
+
1052 block_group
->key
.offset
, extent_end
+ 1);
1053 len
= extent_end
- extent_start
;
1056 ret
= io_ctl_add_entry(io_ctl
, extent_start
, len
, NULL
);
1066 static noinline_for_stack
int
1067 write_bitmap_entries(struct btrfs_io_ctl
*io_ctl
, struct list_head
*bitmap_list
)
1069 struct btrfs_free_space
*entry
, *next
;
1072 /* Write out the bitmaps */
1073 list_for_each_entry_safe(entry
, next
, bitmap_list
, list
) {
1074 ret
= io_ctl_add_bitmap(io_ctl
, entry
->bitmap
);
1077 list_del_init(&entry
->list
);
1083 static int flush_dirty_cache(struct inode
*inode
)
1087 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1089 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1090 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
);
1095 static void noinline_for_stack
1096 cleanup_bitmap_list(struct list_head
*bitmap_list
)
1098 struct btrfs_free_space
*entry
, *next
;
1100 list_for_each_entry_safe(entry
, next
, bitmap_list
, list
)
1101 list_del_init(&entry
->list
);
1104 static void noinline_for_stack
1105 cleanup_write_cache_enospc(struct inode
*inode
,
1106 struct btrfs_io_ctl
*io_ctl
,
1107 struct extent_state
**cached_state
)
1109 io_ctl_drop_pages(io_ctl
);
1110 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1111 i_size_read(inode
) - 1, cached_state
);
1114 static int __btrfs_wait_cache_io(struct btrfs_root
*root
,
1115 struct btrfs_trans_handle
*trans
,
1116 struct btrfs_block_group_cache
*block_group
,
1117 struct btrfs_io_ctl
*io_ctl
,
1118 struct btrfs_path
*path
, u64 offset
)
1121 struct inode
*inode
= io_ctl
->inode
;
1126 /* Flush the dirty pages in the cache file. */
1127 ret
= flush_dirty_cache(inode
);
1131 /* Update the cache item to tell everyone this cache file is valid. */
1132 ret
= update_cache_item(trans
, root
, inode
, path
, offset
,
1133 io_ctl
->entries
, io_ctl
->bitmaps
);
1135 io_ctl_free(io_ctl
);
1137 invalidate_inode_pages2(inode
->i_mapping
);
1138 BTRFS_I(inode
)->generation
= 0;
1141 btrfs_err(root
->fs_info
,
1142 "failed to write free space cache for block group %llu",
1143 block_group
->key
.objectid
);
1147 btrfs_update_inode(trans
, root
, inode
);
1150 /* the dirty list is protected by the dirty_bgs_lock */
1151 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
1153 /* the disk_cache_state is protected by the block group lock */
1154 spin_lock(&block_group
->lock
);
1157 * only mark this as written if we didn't get put back on
1158 * the dirty list while waiting for IO. Otherwise our
1159 * cache state won't be right, and we won't get written again
1161 if (!ret
&& list_empty(&block_group
->dirty_list
))
1162 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1164 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1166 spin_unlock(&block_group
->lock
);
1167 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
1168 io_ctl
->inode
= NULL
;
1176 static int btrfs_wait_cache_io_root(struct btrfs_root
*root
,
1177 struct btrfs_trans_handle
*trans
,
1178 struct btrfs_io_ctl
*io_ctl
,
1179 struct btrfs_path
*path
)
1181 return __btrfs_wait_cache_io(root
, trans
, NULL
, io_ctl
, path
, 0);
1184 int btrfs_wait_cache_io(struct btrfs_trans_handle
*trans
,
1185 struct btrfs_block_group_cache
*block_group
,
1186 struct btrfs_path
*path
)
1188 return __btrfs_wait_cache_io(block_group
->fs_info
->tree_root
, trans
,
1189 block_group
, &block_group
->io_ctl
,
1190 path
, block_group
->key
.objectid
);
1194 * __btrfs_write_out_cache - write out cached info to an inode
1195 * @root - the root the inode belongs to
1196 * @ctl - the free space cache we are going to write out
1197 * @block_group - the block_group for this cache if it belongs to a block_group
1198 * @trans - the trans handle
1200 * This function writes out a free space cache struct to disk for quick recovery
1201 * on mount. This will return 0 if it was successful in writing the cache out,
1202 * or an errno if it was not.
1204 static int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
1205 struct btrfs_free_space_ctl
*ctl
,
1206 struct btrfs_block_group_cache
*block_group
,
1207 struct btrfs_io_ctl
*io_ctl
,
1208 struct btrfs_trans_handle
*trans
)
1210 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1211 struct extent_state
*cached_state
= NULL
;
1212 LIST_HEAD(bitmap_list
);
1218 if (!i_size_read(inode
))
1221 WARN_ON(io_ctl
->pages
);
1222 ret
= io_ctl_init(io_ctl
, inode
, 1);
1226 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
)) {
1227 down_write(&block_group
->data_rwsem
);
1228 spin_lock(&block_group
->lock
);
1229 if (block_group
->delalloc_bytes
) {
1230 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1231 spin_unlock(&block_group
->lock
);
1232 up_write(&block_group
->data_rwsem
);
1233 BTRFS_I(inode
)->generation
= 0;
1238 spin_unlock(&block_group
->lock
);
1241 /* Lock all pages first so we can lock the extent safely. */
1242 ret
= io_ctl_prepare_pages(io_ctl
, inode
, 0);
1246 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
1249 io_ctl_set_generation(io_ctl
, trans
->transid
);
1251 mutex_lock(&ctl
->cache_writeout_mutex
);
1252 /* Write out the extent entries in the free space cache */
1253 spin_lock(&ctl
->tree_lock
);
1254 ret
= write_cache_extent_entries(io_ctl
, ctl
,
1255 block_group
, &entries
, &bitmaps
,
1258 goto out_nospc_locked
;
1261 * Some spaces that are freed in the current transaction are pinned,
1262 * they will be added into free space cache after the transaction is
1263 * committed, we shouldn't lose them.
1265 * If this changes while we are working we'll get added back to
1266 * the dirty list and redo it. No locking needed
1268 ret
= write_pinned_extent_entries(fs_info
, block_group
,
1271 goto out_nospc_locked
;
1274 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1275 * locked while doing it because a concurrent trim can be manipulating
1276 * or freeing the bitmap.
1278 ret
= write_bitmap_entries(io_ctl
, &bitmap_list
);
1279 spin_unlock(&ctl
->tree_lock
);
1280 mutex_unlock(&ctl
->cache_writeout_mutex
);
1284 /* Zero out the rest of the pages just to make sure */
1285 io_ctl_zero_remaining_pages(io_ctl
);
1287 /* Everything is written out, now we dirty the pages in the file. */
1288 ret
= btrfs_dirty_pages(inode
, io_ctl
->pages
, io_ctl
->num_pages
, 0,
1289 i_size_read(inode
), &cached_state
);
1293 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1294 up_write(&block_group
->data_rwsem
);
1296 * Release the pages and unlock the extent, we will flush
1299 io_ctl_drop_pages(io_ctl
);
1301 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1302 i_size_read(inode
) - 1, &cached_state
);
1305 * at this point the pages are under IO and we're happy,
1306 * The caller is responsible for waiting on them and updating the
1307 * the cache and the inode
1309 io_ctl
->entries
= entries
;
1310 io_ctl
->bitmaps
= bitmaps
;
1312 ret
= btrfs_fdatawrite_range(inode
, 0, (u64
)-1);
1319 io_ctl
->inode
= NULL
;
1320 io_ctl_free(io_ctl
);
1322 invalidate_inode_pages2(inode
->i_mapping
);
1323 BTRFS_I(inode
)->generation
= 0;
1325 btrfs_update_inode(trans
, root
, inode
);
1331 cleanup_bitmap_list(&bitmap_list
);
1332 spin_unlock(&ctl
->tree_lock
);
1333 mutex_unlock(&ctl
->cache_writeout_mutex
);
1336 cleanup_write_cache_enospc(inode
, io_ctl
, &cached_state
);
1339 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1340 up_write(&block_group
->data_rwsem
);
1345 int btrfs_write_out_cache(struct btrfs_fs_info
*fs_info
,
1346 struct btrfs_trans_handle
*trans
,
1347 struct btrfs_block_group_cache
*block_group
,
1348 struct btrfs_path
*path
)
1350 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1351 struct inode
*inode
;
1354 spin_lock(&block_group
->lock
);
1355 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1356 spin_unlock(&block_group
->lock
);
1359 spin_unlock(&block_group
->lock
);
1361 inode
= lookup_free_space_inode(fs_info
, block_group
, path
);
1365 ret
= __btrfs_write_out_cache(fs_info
->tree_root
, inode
, ctl
,
1366 block_group
, &block_group
->io_ctl
, trans
);
1370 "failed to write free space cache for block group %llu",
1371 block_group
->key
.objectid
);
1373 spin_lock(&block_group
->lock
);
1374 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1375 spin_unlock(&block_group
->lock
);
1377 block_group
->io_ctl
.inode
= NULL
;
1382 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1383 * to wait for IO and put the inode
1389 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1392 ASSERT(offset
>= bitmap_start
);
1393 offset
-= bitmap_start
;
1394 return (unsigned long)(div_u64(offset
, unit
));
1397 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1399 return (unsigned long)(div_u64(bytes
, unit
));
1402 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1406 u64 bytes_per_bitmap
;
1408 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1409 bitmap_start
= offset
- ctl
->start
;
1410 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
1411 bitmap_start
*= bytes_per_bitmap
;
1412 bitmap_start
+= ctl
->start
;
1414 return bitmap_start
;
1417 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1418 struct rb_node
*node
, int bitmap
)
1420 struct rb_node
**p
= &root
->rb_node
;
1421 struct rb_node
*parent
= NULL
;
1422 struct btrfs_free_space
*info
;
1426 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1428 if (offset
< info
->offset
) {
1430 } else if (offset
> info
->offset
) {
1431 p
= &(*p
)->rb_right
;
1434 * we could have a bitmap entry and an extent entry
1435 * share the same offset. If this is the case, we want
1436 * the extent entry to always be found first if we do a
1437 * linear search through the tree, since we want to have
1438 * the quickest allocation time, and allocating from an
1439 * extent is faster than allocating from a bitmap. So
1440 * if we're inserting a bitmap and we find an entry at
1441 * this offset, we want to go right, or after this entry
1442 * logically. If we are inserting an extent and we've
1443 * found a bitmap, we want to go left, or before
1451 p
= &(*p
)->rb_right
;
1453 if (!info
->bitmap
) {
1462 rb_link_node(node
, parent
, p
);
1463 rb_insert_color(node
, root
);
1469 * searches the tree for the given offset.
1471 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1472 * want a section that has at least bytes size and comes at or after the given
1475 static struct btrfs_free_space
*
1476 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1477 u64 offset
, int bitmap_only
, int fuzzy
)
1479 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1480 struct btrfs_free_space
*entry
, *prev
= NULL
;
1482 /* find entry that is closest to the 'offset' */
1489 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1492 if (offset
< entry
->offset
)
1494 else if (offset
> entry
->offset
)
1507 * bitmap entry and extent entry may share same offset,
1508 * in that case, bitmap entry comes after extent entry.
1513 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1514 if (entry
->offset
!= offset
)
1517 WARN_ON(!entry
->bitmap
);
1520 if (entry
->bitmap
) {
1522 * if previous extent entry covers the offset,
1523 * we should return it instead of the bitmap entry
1525 n
= rb_prev(&entry
->offset_index
);
1527 prev
= rb_entry(n
, struct btrfs_free_space
,
1529 if (!prev
->bitmap
&&
1530 prev
->offset
+ prev
->bytes
> offset
)
1540 /* find last entry before the 'offset' */
1542 if (entry
->offset
> offset
) {
1543 n
= rb_prev(&entry
->offset_index
);
1545 entry
= rb_entry(n
, struct btrfs_free_space
,
1547 ASSERT(entry
->offset
<= offset
);
1556 if (entry
->bitmap
) {
1557 n
= rb_prev(&entry
->offset_index
);
1559 prev
= rb_entry(n
, struct btrfs_free_space
,
1561 if (!prev
->bitmap
&&
1562 prev
->offset
+ prev
->bytes
> offset
)
1565 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1567 } else if (entry
->offset
+ entry
->bytes
> offset
)
1574 if (entry
->bitmap
) {
1575 if (entry
->offset
+ BITS_PER_BITMAP
*
1579 if (entry
->offset
+ entry
->bytes
> offset
)
1583 n
= rb_next(&entry
->offset_index
);
1586 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1592 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1593 struct btrfs_free_space
*info
)
1595 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1596 ctl
->free_extents
--;
1599 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1600 struct btrfs_free_space
*info
)
1602 __unlink_free_space(ctl
, info
);
1603 ctl
->free_space
-= info
->bytes
;
1606 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1607 struct btrfs_free_space
*info
)
1611 ASSERT(info
->bytes
|| info
->bitmap
);
1612 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1613 &info
->offset_index
, (info
->bitmap
!= NULL
));
1617 ctl
->free_space
+= info
->bytes
;
1618 ctl
->free_extents
++;
1622 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1624 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1628 u64 size
= block_group
->key
.offset
;
1629 u64 bytes_per_bg
= BITS_PER_BITMAP
* ctl
->unit
;
1630 u64 max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1632 max_bitmaps
= max_t(u64
, max_bitmaps
, 1);
1634 ASSERT(ctl
->total_bitmaps
<= max_bitmaps
);
1637 * The goal is to keep the total amount of memory used per 1gb of space
1638 * at or below 32k, so we need to adjust how much memory we allow to be
1639 * used by extent based free space tracking
1642 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1644 max_bytes
= MAX_CACHE_BYTES_PER_GIG
* div_u64(size
, SZ_1G
);
1647 * we want to account for 1 more bitmap than what we have so we can make
1648 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1649 * we add more bitmaps.
1651 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * ctl
->unit
;
1653 if (bitmap_bytes
>= max_bytes
) {
1654 ctl
->extents_thresh
= 0;
1659 * we want the extent entry threshold to always be at most 1/2 the max
1660 * bytes we can have, or whatever is less than that.
1662 extent_bytes
= max_bytes
- bitmap_bytes
;
1663 extent_bytes
= min_t(u64
, extent_bytes
, max_bytes
>> 1);
1665 ctl
->extents_thresh
=
1666 div_u64(extent_bytes
, sizeof(struct btrfs_free_space
));
1669 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1670 struct btrfs_free_space
*info
,
1671 u64 offset
, u64 bytes
)
1673 unsigned long start
, count
;
1675 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1676 count
= bytes_to_bits(bytes
, ctl
->unit
);
1677 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1679 bitmap_clear(info
->bitmap
, start
, count
);
1681 info
->bytes
-= bytes
;
1684 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1685 struct btrfs_free_space
*info
, u64 offset
,
1688 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1689 ctl
->free_space
-= bytes
;
1692 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1693 struct btrfs_free_space
*info
, u64 offset
,
1696 unsigned long start
, count
;
1698 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1699 count
= bytes_to_bits(bytes
, ctl
->unit
);
1700 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1702 bitmap_set(info
->bitmap
, start
, count
);
1704 info
->bytes
+= bytes
;
1705 ctl
->free_space
+= bytes
;
1709 * If we can not find suitable extent, we will use bytes to record
1710 * the size of the max extent.
1712 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1713 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1714 u64
*bytes
, bool for_alloc
)
1716 unsigned long found_bits
= 0;
1717 unsigned long max_bits
= 0;
1718 unsigned long bits
, i
;
1719 unsigned long next_zero
;
1720 unsigned long extent_bits
;
1723 * Skip searching the bitmap if we don't have a contiguous section that
1724 * is large enough for this allocation.
1727 bitmap_info
->max_extent_size
&&
1728 bitmap_info
->max_extent_size
< *bytes
) {
1729 *bytes
= bitmap_info
->max_extent_size
;
1733 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1734 max_t(u64
, *offset
, bitmap_info
->offset
));
1735 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1737 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP
) {
1738 if (for_alloc
&& bits
== 1) {
1742 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1743 BITS_PER_BITMAP
, i
);
1744 extent_bits
= next_zero
- i
;
1745 if (extent_bits
>= bits
) {
1746 found_bits
= extent_bits
;
1748 } else if (extent_bits
> max_bits
) {
1749 max_bits
= extent_bits
;
1755 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1756 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1760 *bytes
= (u64
)(max_bits
) * ctl
->unit
;
1761 bitmap_info
->max_extent_size
= *bytes
;
1765 /* Cache the size of the max extent in bytes */
1766 static struct btrfs_free_space
*
1767 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
,
1768 unsigned long align
, u64
*max_extent_size
)
1770 struct btrfs_free_space
*entry
;
1771 struct rb_node
*node
;
1776 if (!ctl
->free_space_offset
.rb_node
)
1779 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1783 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1784 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1785 if (entry
->bytes
< *bytes
) {
1786 if (entry
->bytes
> *max_extent_size
)
1787 *max_extent_size
= entry
->bytes
;
1791 /* make sure the space returned is big enough
1792 * to match our requested alignment
1794 if (*bytes
>= align
) {
1795 tmp
= entry
->offset
- ctl
->start
+ align
- 1;
1796 tmp
= div64_u64(tmp
, align
);
1797 tmp
= tmp
* align
+ ctl
->start
;
1798 align_off
= tmp
- entry
->offset
;
1801 tmp
= entry
->offset
;
1804 if (entry
->bytes
< *bytes
+ align_off
) {
1805 if (entry
->bytes
> *max_extent_size
)
1806 *max_extent_size
= entry
->bytes
;
1810 if (entry
->bitmap
) {
1813 ret
= search_bitmap(ctl
, entry
, &tmp
, &size
, true);
1818 } else if (size
> *max_extent_size
) {
1819 *max_extent_size
= size
;
1825 *bytes
= entry
->bytes
- align_off
;
1832 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1833 struct btrfs_free_space
*info
, u64 offset
)
1835 info
->offset
= offset_to_bitmap(ctl
, offset
);
1837 INIT_LIST_HEAD(&info
->list
);
1838 link_free_space(ctl
, info
);
1839 ctl
->total_bitmaps
++;
1841 ctl
->op
->recalc_thresholds(ctl
);
1844 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1845 struct btrfs_free_space
*bitmap_info
)
1847 unlink_free_space(ctl
, bitmap_info
);
1848 kfree(bitmap_info
->bitmap
);
1849 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1850 ctl
->total_bitmaps
--;
1851 ctl
->op
->recalc_thresholds(ctl
);
1854 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1855 struct btrfs_free_space
*bitmap_info
,
1856 u64
*offset
, u64
*bytes
)
1859 u64 search_start
, search_bytes
;
1863 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1866 * We need to search for bits in this bitmap. We could only cover some
1867 * of the extent in this bitmap thanks to how we add space, so we need
1868 * to search for as much as it as we can and clear that amount, and then
1869 * go searching for the next bit.
1871 search_start
= *offset
;
1872 search_bytes
= ctl
->unit
;
1873 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1874 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
,
1876 if (ret
< 0 || search_start
!= *offset
)
1879 /* We may have found more bits than what we need */
1880 search_bytes
= min(search_bytes
, *bytes
);
1882 /* Cannot clear past the end of the bitmap */
1883 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1885 bitmap_clear_bits(ctl
, bitmap_info
, search_start
, search_bytes
);
1886 *offset
+= search_bytes
;
1887 *bytes
-= search_bytes
;
1890 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1891 if (!bitmap_info
->bytes
)
1892 free_bitmap(ctl
, bitmap_info
);
1895 * no entry after this bitmap, but we still have bytes to
1896 * remove, so something has gone wrong.
1901 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1905 * if the next entry isn't a bitmap we need to return to let the
1906 * extent stuff do its work.
1908 if (!bitmap_info
->bitmap
)
1912 * Ok the next item is a bitmap, but it may not actually hold
1913 * the information for the rest of this free space stuff, so
1914 * look for it, and if we don't find it return so we can try
1915 * everything over again.
1917 search_start
= *offset
;
1918 search_bytes
= ctl
->unit
;
1919 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1920 &search_bytes
, false);
1921 if (ret
< 0 || search_start
!= *offset
)
1925 } else if (!bitmap_info
->bytes
)
1926 free_bitmap(ctl
, bitmap_info
);
1931 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1932 struct btrfs_free_space
*info
, u64 offset
,
1935 u64 bytes_to_set
= 0;
1938 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1940 bytes_to_set
= min(end
- offset
, bytes
);
1942 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1945 * We set some bytes, we have no idea what the max extent size is
1948 info
->max_extent_size
= 0;
1950 return bytes_to_set
;
1954 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1955 struct btrfs_free_space
*info
)
1957 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1958 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
1959 bool forced
= false;
1961 #ifdef CONFIG_BTRFS_DEBUG
1962 if (btrfs_should_fragment_free_space(block_group
))
1967 * If we are below the extents threshold then we can add this as an
1968 * extent, and don't have to deal with the bitmap
1970 if (!forced
&& ctl
->free_extents
< ctl
->extents_thresh
) {
1972 * If this block group has some small extents we don't want to
1973 * use up all of our free slots in the cache with them, we want
1974 * to reserve them to larger extents, however if we have plenty
1975 * of cache left then go ahead an dadd them, no sense in adding
1976 * the overhead of a bitmap if we don't have to.
1978 if (info
->bytes
<= fs_info
->sectorsize
* 4) {
1979 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1987 * The original block groups from mkfs can be really small, like 8
1988 * megabytes, so don't bother with a bitmap for those entries. However
1989 * some block groups can be smaller than what a bitmap would cover but
1990 * are still large enough that they could overflow the 32k memory limit,
1991 * so allow those block groups to still be allowed to have a bitmap
1994 if (((BITS_PER_BITMAP
* ctl
->unit
) >> 1) > block_group
->key
.offset
)
2000 static const struct btrfs_free_space_op free_space_op
= {
2001 .recalc_thresholds
= recalculate_thresholds
,
2002 .use_bitmap
= use_bitmap
,
2005 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
2006 struct btrfs_free_space
*info
)
2008 struct btrfs_free_space
*bitmap_info
;
2009 struct btrfs_block_group_cache
*block_group
= NULL
;
2011 u64 bytes
, offset
, bytes_added
;
2014 bytes
= info
->bytes
;
2015 offset
= info
->offset
;
2017 if (!ctl
->op
->use_bitmap(ctl
, info
))
2020 if (ctl
->op
== &free_space_op
)
2021 block_group
= ctl
->private;
2024 * Since we link bitmaps right into the cluster we need to see if we
2025 * have a cluster here, and if so and it has our bitmap we need to add
2026 * the free space to that bitmap.
2028 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
2029 struct btrfs_free_cluster
*cluster
;
2030 struct rb_node
*node
;
2031 struct btrfs_free_space
*entry
;
2033 cluster
= list_entry(block_group
->cluster_list
.next
,
2034 struct btrfs_free_cluster
,
2036 spin_lock(&cluster
->lock
);
2037 node
= rb_first(&cluster
->root
);
2039 spin_unlock(&cluster
->lock
);
2040 goto no_cluster_bitmap
;
2043 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2044 if (!entry
->bitmap
) {
2045 spin_unlock(&cluster
->lock
);
2046 goto no_cluster_bitmap
;
2049 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
2050 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
2052 bytes
-= bytes_added
;
2053 offset
+= bytes_added
;
2055 spin_unlock(&cluster
->lock
);
2063 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2070 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
2071 bytes
-= bytes_added
;
2072 offset
+= bytes_added
;
2082 if (info
&& info
->bitmap
) {
2083 add_new_bitmap(ctl
, info
, offset
);
2088 spin_unlock(&ctl
->tree_lock
);
2090 /* no pre-allocated info, allocate a new one */
2092 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
2095 spin_lock(&ctl
->tree_lock
);
2101 /* allocate the bitmap */
2102 info
->bitmap
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
2103 spin_lock(&ctl
->tree_lock
);
2104 if (!info
->bitmap
) {
2114 kfree(info
->bitmap
);
2115 kmem_cache_free(btrfs_free_space_cachep
, info
);
2121 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
2122 struct btrfs_free_space
*info
, bool update_stat
)
2124 struct btrfs_free_space
*left_info
;
2125 struct btrfs_free_space
*right_info
;
2126 bool merged
= false;
2127 u64 offset
= info
->offset
;
2128 u64 bytes
= info
->bytes
;
2131 * first we want to see if there is free space adjacent to the range we
2132 * are adding, if there is remove that struct and add a new one to
2133 * cover the entire range
2135 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
2136 if (right_info
&& rb_prev(&right_info
->offset_index
))
2137 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
2138 struct btrfs_free_space
, offset_index
);
2140 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
2142 if (right_info
&& !right_info
->bitmap
) {
2144 unlink_free_space(ctl
, right_info
);
2146 __unlink_free_space(ctl
, right_info
);
2147 info
->bytes
+= right_info
->bytes
;
2148 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
2152 if (left_info
&& !left_info
->bitmap
&&
2153 left_info
->offset
+ left_info
->bytes
== offset
) {
2155 unlink_free_space(ctl
, left_info
);
2157 __unlink_free_space(ctl
, left_info
);
2158 info
->offset
= left_info
->offset
;
2159 info
->bytes
+= left_info
->bytes
;
2160 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
2167 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl
*ctl
,
2168 struct btrfs_free_space
*info
,
2171 struct btrfs_free_space
*bitmap
;
2174 const u64 end
= info
->offset
+ info
->bytes
;
2175 const u64 bitmap_offset
= offset_to_bitmap(ctl
, end
);
2178 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2182 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, end
);
2183 j
= find_next_zero_bit(bitmap
->bitmap
, BITS_PER_BITMAP
, i
);
2186 bytes
= (j
- i
) * ctl
->unit
;
2187 info
->bytes
+= bytes
;
2190 bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2192 __bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2195 free_bitmap(ctl
, bitmap
);
2200 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl
*ctl
,
2201 struct btrfs_free_space
*info
,
2204 struct btrfs_free_space
*bitmap
;
2208 unsigned long prev_j
;
2211 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
);
2212 /* If we're on a boundary, try the previous logical bitmap. */
2213 if (bitmap_offset
== info
->offset
) {
2214 if (info
->offset
== 0)
2216 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
- 1);
2219 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2223 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, info
->offset
) - 1;
2225 prev_j
= (unsigned long)-1;
2226 for_each_clear_bit_from(j
, bitmap
->bitmap
, BITS_PER_BITMAP
) {
2234 if (prev_j
== (unsigned long)-1)
2235 bytes
= (i
+ 1) * ctl
->unit
;
2237 bytes
= (i
- prev_j
) * ctl
->unit
;
2239 info
->offset
-= bytes
;
2240 info
->bytes
+= bytes
;
2243 bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2245 __bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2248 free_bitmap(ctl
, bitmap
);
2254 * We prefer always to allocate from extent entries, both for clustered and
2255 * non-clustered allocation requests. So when attempting to add a new extent
2256 * entry, try to see if there's adjacent free space in bitmap entries, and if
2257 * there is, migrate that space from the bitmaps to the extent.
2258 * Like this we get better chances of satisfying space allocation requests
2259 * because we attempt to satisfy them based on a single cache entry, and never
2260 * on 2 or more entries - even if the entries represent a contiguous free space
2261 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2264 static void steal_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
2265 struct btrfs_free_space
*info
,
2269 * Only work with disconnected entries, as we can change their offset,
2270 * and must be extent entries.
2272 ASSERT(!info
->bitmap
);
2273 ASSERT(RB_EMPTY_NODE(&info
->offset_index
));
2275 if (ctl
->total_bitmaps
> 0) {
2277 bool stole_front
= false;
2279 stole_end
= steal_from_bitmap_to_end(ctl
, info
, update_stat
);
2280 if (ctl
->total_bitmaps
> 0)
2281 stole_front
= steal_from_bitmap_to_front(ctl
, info
,
2284 if (stole_end
|| stole_front
)
2285 try_merge_free_space(ctl
, info
, update_stat
);
2289 int __btrfs_add_free_space(struct btrfs_fs_info
*fs_info
,
2290 struct btrfs_free_space_ctl
*ctl
,
2291 u64 offset
, u64 bytes
)
2293 struct btrfs_free_space
*info
;
2296 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
2300 info
->offset
= offset
;
2301 info
->bytes
= bytes
;
2302 RB_CLEAR_NODE(&info
->offset_index
);
2304 spin_lock(&ctl
->tree_lock
);
2306 if (try_merge_free_space(ctl
, info
, true))
2310 * There was no extent directly to the left or right of this new
2311 * extent then we know we're going to have to allocate a new extent, so
2312 * before we do that see if we need to drop this into a bitmap
2314 ret
= insert_into_bitmap(ctl
, info
);
2323 * Only steal free space from adjacent bitmaps if we're sure we're not
2324 * going to add the new free space to existing bitmap entries - because
2325 * that would mean unnecessary work that would be reverted. Therefore
2326 * attempt to steal space from bitmaps if we're adding an extent entry.
2328 steal_from_bitmap(ctl
, info
, true);
2330 ret
= link_free_space(ctl
, info
);
2332 kmem_cache_free(btrfs_free_space_cachep
, info
);
2334 spin_unlock(&ctl
->tree_lock
);
2337 btrfs_crit(fs_info
, "unable to add free space :%d", ret
);
2338 ASSERT(ret
!= -EEXIST
);
2344 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
2345 u64 offset
, u64 bytes
)
2347 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2348 struct btrfs_free_space
*info
;
2350 bool re_search
= false;
2352 spin_lock(&ctl
->tree_lock
);
2359 info
= tree_search_offset(ctl
, offset
, 0, 0);
2362 * oops didn't find an extent that matched the space we wanted
2363 * to remove, look for a bitmap instead
2365 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2369 * If we found a partial bit of our free space in a
2370 * bitmap but then couldn't find the other part this may
2371 * be a problem, so WARN about it.
2379 if (!info
->bitmap
) {
2380 unlink_free_space(ctl
, info
);
2381 if (offset
== info
->offset
) {
2382 u64 to_free
= min(bytes
, info
->bytes
);
2384 info
->bytes
-= to_free
;
2385 info
->offset
+= to_free
;
2387 ret
= link_free_space(ctl
, info
);
2390 kmem_cache_free(btrfs_free_space_cachep
, info
);
2397 u64 old_end
= info
->bytes
+ info
->offset
;
2399 info
->bytes
= offset
- info
->offset
;
2400 ret
= link_free_space(ctl
, info
);
2405 /* Not enough bytes in this entry to satisfy us */
2406 if (old_end
< offset
+ bytes
) {
2407 bytes
-= old_end
- offset
;
2410 } else if (old_end
== offset
+ bytes
) {
2414 spin_unlock(&ctl
->tree_lock
);
2416 ret
= btrfs_add_free_space(block_group
, offset
+ bytes
,
2417 old_end
- (offset
+ bytes
));
2423 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
2424 if (ret
== -EAGAIN
) {
2429 spin_unlock(&ctl
->tree_lock
);
2434 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
2437 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2438 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2439 struct btrfs_free_space
*info
;
2443 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
2444 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
2445 if (info
->bytes
>= bytes
&& !block_group
->ro
)
2447 btrfs_crit(fs_info
, "entry offset %llu, bytes %llu, bitmap %s",
2448 info
->offset
, info
->bytes
,
2449 (info
->bitmap
) ? "yes" : "no");
2451 btrfs_info(fs_info
, "block group has cluster?: %s",
2452 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2454 "%d blocks of free space at or bigger than bytes is", count
);
2457 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2459 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2460 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2462 spin_lock_init(&ctl
->tree_lock
);
2463 ctl
->unit
= fs_info
->sectorsize
;
2464 ctl
->start
= block_group
->key
.objectid
;
2465 ctl
->private = block_group
;
2466 ctl
->op
= &free_space_op
;
2467 INIT_LIST_HEAD(&ctl
->trimming_ranges
);
2468 mutex_init(&ctl
->cache_writeout_mutex
);
2471 * we only want to have 32k of ram per block group for keeping
2472 * track of free space, and if we pass 1/2 of that we want to
2473 * start converting things over to using bitmaps
2475 ctl
->extents_thresh
= (SZ_32K
/ 2) / sizeof(struct btrfs_free_space
);
2479 * for a given cluster, put all of its extents back into the free
2480 * space cache. If the block group passed doesn't match the block group
2481 * pointed to by the cluster, someone else raced in and freed the
2482 * cluster already. In that case, we just return without changing anything
2485 __btrfs_return_cluster_to_free_space(
2486 struct btrfs_block_group_cache
*block_group
,
2487 struct btrfs_free_cluster
*cluster
)
2489 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2490 struct btrfs_free_space
*entry
;
2491 struct rb_node
*node
;
2493 spin_lock(&cluster
->lock
);
2494 if (cluster
->block_group
!= block_group
)
2497 cluster
->block_group
= NULL
;
2498 cluster
->window_start
= 0;
2499 list_del_init(&cluster
->block_group_list
);
2501 node
= rb_first(&cluster
->root
);
2505 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2506 node
= rb_next(&entry
->offset_index
);
2507 rb_erase(&entry
->offset_index
, &cluster
->root
);
2508 RB_CLEAR_NODE(&entry
->offset_index
);
2510 bitmap
= (entry
->bitmap
!= NULL
);
2512 try_merge_free_space(ctl
, entry
, false);
2513 steal_from_bitmap(ctl
, entry
, false);
2515 tree_insert_offset(&ctl
->free_space_offset
,
2516 entry
->offset
, &entry
->offset_index
, bitmap
);
2518 cluster
->root
= RB_ROOT
;
2521 spin_unlock(&cluster
->lock
);
2522 btrfs_put_block_group(block_group
);
2526 static void __btrfs_remove_free_space_cache_locked(
2527 struct btrfs_free_space_ctl
*ctl
)
2529 struct btrfs_free_space
*info
;
2530 struct rb_node
*node
;
2532 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2533 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2534 if (!info
->bitmap
) {
2535 unlink_free_space(ctl
, info
);
2536 kmem_cache_free(btrfs_free_space_cachep
, info
);
2538 free_bitmap(ctl
, info
);
2541 cond_resched_lock(&ctl
->tree_lock
);
2545 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2547 spin_lock(&ctl
->tree_lock
);
2548 __btrfs_remove_free_space_cache_locked(ctl
);
2549 spin_unlock(&ctl
->tree_lock
);
2552 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2554 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2555 struct btrfs_free_cluster
*cluster
;
2556 struct list_head
*head
;
2558 spin_lock(&ctl
->tree_lock
);
2559 while ((head
= block_group
->cluster_list
.next
) !=
2560 &block_group
->cluster_list
) {
2561 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2564 WARN_ON(cluster
->block_group
!= block_group
);
2565 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2567 cond_resched_lock(&ctl
->tree_lock
);
2569 __btrfs_remove_free_space_cache_locked(ctl
);
2570 spin_unlock(&ctl
->tree_lock
);
2574 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2575 u64 offset
, u64 bytes
, u64 empty_size
,
2576 u64
*max_extent_size
)
2578 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2579 struct btrfs_free_space
*entry
= NULL
;
2580 u64 bytes_search
= bytes
+ empty_size
;
2583 u64 align_gap_len
= 0;
2585 spin_lock(&ctl
->tree_lock
);
2586 entry
= find_free_space(ctl
, &offset
, &bytes_search
,
2587 block_group
->full_stripe_len
, max_extent_size
);
2592 if (entry
->bitmap
) {
2593 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2595 free_bitmap(ctl
, entry
);
2597 unlink_free_space(ctl
, entry
);
2598 align_gap_len
= offset
- entry
->offset
;
2599 align_gap
= entry
->offset
;
2601 entry
->offset
= offset
+ bytes
;
2602 WARN_ON(entry
->bytes
< bytes
+ align_gap_len
);
2604 entry
->bytes
-= bytes
+ align_gap_len
;
2606 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2608 link_free_space(ctl
, entry
);
2611 spin_unlock(&ctl
->tree_lock
);
2614 __btrfs_add_free_space(block_group
->fs_info
, ctl
,
2615 align_gap
, align_gap_len
);
2620 * given a cluster, put all of its extents back into the free space
2621 * cache. If a block group is passed, this function will only free
2622 * a cluster that belongs to the passed block group.
2624 * Otherwise, it'll get a reference on the block group pointed to by the
2625 * cluster and remove the cluster from it.
2627 int btrfs_return_cluster_to_free_space(
2628 struct btrfs_block_group_cache
*block_group
,
2629 struct btrfs_free_cluster
*cluster
)
2631 struct btrfs_free_space_ctl
*ctl
;
2634 /* first, get a safe pointer to the block group */
2635 spin_lock(&cluster
->lock
);
2637 block_group
= cluster
->block_group
;
2639 spin_unlock(&cluster
->lock
);
2642 } else if (cluster
->block_group
!= block_group
) {
2643 /* someone else has already freed it don't redo their work */
2644 spin_unlock(&cluster
->lock
);
2647 atomic_inc(&block_group
->count
);
2648 spin_unlock(&cluster
->lock
);
2650 ctl
= block_group
->free_space_ctl
;
2652 /* now return any extents the cluster had on it */
2653 spin_lock(&ctl
->tree_lock
);
2654 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2655 spin_unlock(&ctl
->tree_lock
);
2657 /* finally drop our ref */
2658 btrfs_put_block_group(block_group
);
2662 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2663 struct btrfs_free_cluster
*cluster
,
2664 struct btrfs_free_space
*entry
,
2665 u64 bytes
, u64 min_start
,
2666 u64
*max_extent_size
)
2668 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2670 u64 search_start
= cluster
->window_start
;
2671 u64 search_bytes
= bytes
;
2674 search_start
= min_start
;
2675 search_bytes
= bytes
;
2677 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
, true);
2679 if (search_bytes
> *max_extent_size
)
2680 *max_extent_size
= search_bytes
;
2685 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2691 * given a cluster, try to allocate 'bytes' from it, returns 0
2692 * if it couldn't find anything suitably large, or a logical disk offset
2693 * if things worked out
2695 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2696 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2697 u64 min_start
, u64
*max_extent_size
)
2699 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2700 struct btrfs_free_space
*entry
= NULL
;
2701 struct rb_node
*node
;
2704 spin_lock(&cluster
->lock
);
2705 if (bytes
> cluster
->max_size
)
2708 if (cluster
->block_group
!= block_group
)
2711 node
= rb_first(&cluster
->root
);
2715 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2717 if (entry
->bytes
< bytes
&& entry
->bytes
> *max_extent_size
)
2718 *max_extent_size
= entry
->bytes
;
2720 if (entry
->bytes
< bytes
||
2721 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2722 node
= rb_next(&entry
->offset_index
);
2725 entry
= rb_entry(node
, struct btrfs_free_space
,
2730 if (entry
->bitmap
) {
2731 ret
= btrfs_alloc_from_bitmap(block_group
,
2732 cluster
, entry
, bytes
,
2733 cluster
->window_start
,
2736 node
= rb_next(&entry
->offset_index
);
2739 entry
= rb_entry(node
, struct btrfs_free_space
,
2743 cluster
->window_start
+= bytes
;
2745 ret
= entry
->offset
;
2747 entry
->offset
+= bytes
;
2748 entry
->bytes
-= bytes
;
2751 if (entry
->bytes
== 0)
2752 rb_erase(&entry
->offset_index
, &cluster
->root
);
2756 spin_unlock(&cluster
->lock
);
2761 spin_lock(&ctl
->tree_lock
);
2763 ctl
->free_space
-= bytes
;
2764 if (entry
->bytes
== 0) {
2765 ctl
->free_extents
--;
2766 if (entry
->bitmap
) {
2767 kfree(entry
->bitmap
);
2768 ctl
->total_bitmaps
--;
2769 ctl
->op
->recalc_thresholds(ctl
);
2771 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2774 spin_unlock(&ctl
->tree_lock
);
2779 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2780 struct btrfs_free_space
*entry
,
2781 struct btrfs_free_cluster
*cluster
,
2782 u64 offset
, u64 bytes
,
2783 u64 cont1_bytes
, u64 min_bytes
)
2785 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2786 unsigned long next_zero
;
2788 unsigned long want_bits
;
2789 unsigned long min_bits
;
2790 unsigned long found_bits
;
2791 unsigned long max_bits
= 0;
2792 unsigned long start
= 0;
2793 unsigned long total_found
= 0;
2796 i
= offset_to_bit(entry
->offset
, ctl
->unit
,
2797 max_t(u64
, offset
, entry
->offset
));
2798 want_bits
= bytes_to_bits(bytes
, ctl
->unit
);
2799 min_bits
= bytes_to_bits(min_bytes
, ctl
->unit
);
2802 * Don't bother looking for a cluster in this bitmap if it's heavily
2805 if (entry
->max_extent_size
&&
2806 entry
->max_extent_size
< cont1_bytes
)
2810 for_each_set_bit_from(i
, entry
->bitmap
, BITS_PER_BITMAP
) {
2811 next_zero
= find_next_zero_bit(entry
->bitmap
,
2812 BITS_PER_BITMAP
, i
);
2813 if (next_zero
- i
>= min_bits
) {
2814 found_bits
= next_zero
- i
;
2815 if (found_bits
> max_bits
)
2816 max_bits
= found_bits
;
2819 if (next_zero
- i
> max_bits
)
2820 max_bits
= next_zero
- i
;
2825 entry
->max_extent_size
= (u64
)max_bits
* ctl
->unit
;
2831 cluster
->max_size
= 0;
2834 total_found
+= found_bits
;
2836 if (cluster
->max_size
< found_bits
* ctl
->unit
)
2837 cluster
->max_size
= found_bits
* ctl
->unit
;
2839 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2844 cluster
->window_start
= start
* ctl
->unit
+ entry
->offset
;
2845 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2846 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2847 &entry
->offset_index
, 1);
2848 ASSERT(!ret
); /* -EEXIST; Logic error */
2850 trace_btrfs_setup_cluster(block_group
, cluster
,
2851 total_found
* ctl
->unit
, 1);
2856 * This searches the block group for just extents to fill the cluster with.
2857 * Try to find a cluster with at least bytes total bytes, at least one
2858 * extent of cont1_bytes, and other clusters of at least min_bytes.
2861 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2862 struct btrfs_free_cluster
*cluster
,
2863 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2864 u64 cont1_bytes
, u64 min_bytes
)
2866 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2867 struct btrfs_free_space
*first
= NULL
;
2868 struct btrfs_free_space
*entry
= NULL
;
2869 struct btrfs_free_space
*last
;
2870 struct rb_node
*node
;
2875 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2880 * We don't want bitmaps, so just move along until we find a normal
2883 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2884 if (entry
->bitmap
&& list_empty(&entry
->list
))
2885 list_add_tail(&entry
->list
, bitmaps
);
2886 node
= rb_next(&entry
->offset_index
);
2889 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2892 window_free
= entry
->bytes
;
2893 max_extent
= entry
->bytes
;
2897 for (node
= rb_next(&entry
->offset_index
); node
;
2898 node
= rb_next(&entry
->offset_index
)) {
2899 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2901 if (entry
->bitmap
) {
2902 if (list_empty(&entry
->list
))
2903 list_add_tail(&entry
->list
, bitmaps
);
2907 if (entry
->bytes
< min_bytes
)
2911 window_free
+= entry
->bytes
;
2912 if (entry
->bytes
> max_extent
)
2913 max_extent
= entry
->bytes
;
2916 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2919 cluster
->window_start
= first
->offset
;
2921 node
= &first
->offset_index
;
2924 * now we've found our entries, pull them out of the free space
2925 * cache and put them into the cluster rbtree
2930 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2931 node
= rb_next(&entry
->offset_index
);
2932 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2935 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2936 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2937 &entry
->offset_index
, 0);
2938 total_size
+= entry
->bytes
;
2939 ASSERT(!ret
); /* -EEXIST; Logic error */
2940 } while (node
&& entry
!= last
);
2942 cluster
->max_size
= max_extent
;
2943 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2948 * This specifically looks for bitmaps that may work in the cluster, we assume
2949 * that we have already failed to find extents that will work.
2952 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2953 struct btrfs_free_cluster
*cluster
,
2954 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2955 u64 cont1_bytes
, u64 min_bytes
)
2957 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2958 struct btrfs_free_space
*entry
= NULL
;
2960 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
2962 if (ctl
->total_bitmaps
== 0)
2966 * The bitmap that covers offset won't be in the list unless offset
2967 * is just its start offset.
2969 if (!list_empty(bitmaps
))
2970 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
2972 if (!entry
|| entry
->offset
!= bitmap_offset
) {
2973 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2974 if (entry
&& list_empty(&entry
->list
))
2975 list_add(&entry
->list
, bitmaps
);
2978 list_for_each_entry(entry
, bitmaps
, list
) {
2979 if (entry
->bytes
< bytes
)
2981 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2982 bytes
, cont1_bytes
, min_bytes
);
2988 * The bitmaps list has all the bitmaps that record free space
2989 * starting after offset, so no more search is required.
2995 * here we try to find a cluster of blocks in a block group. The goal
2996 * is to find at least bytes+empty_size.
2997 * We might not find them all in one contiguous area.
2999 * returns zero and sets up cluster if things worked out, otherwise
3000 * it returns -enospc
3002 int btrfs_find_space_cluster(struct btrfs_fs_info
*fs_info
,
3003 struct btrfs_block_group_cache
*block_group
,
3004 struct btrfs_free_cluster
*cluster
,
3005 u64 offset
, u64 bytes
, u64 empty_size
)
3007 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3008 struct btrfs_free_space
*entry
, *tmp
;
3015 * Choose the minimum extent size we'll require for this
3016 * cluster. For SSD_SPREAD, don't allow any fragmentation.
3017 * For metadata, allow allocates with smaller extents. For
3018 * data, keep it dense.
3020 if (btrfs_test_opt(fs_info
, SSD_SPREAD
)) {
3021 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
3022 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3023 cont1_bytes
= bytes
;
3024 min_bytes
= fs_info
->sectorsize
;
3026 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
3027 min_bytes
= fs_info
->sectorsize
;
3030 spin_lock(&ctl
->tree_lock
);
3033 * If we know we don't have enough space to make a cluster don't even
3034 * bother doing all the work to try and find one.
3036 if (ctl
->free_space
< bytes
) {
3037 spin_unlock(&ctl
->tree_lock
);
3041 spin_lock(&cluster
->lock
);
3043 /* someone already found a cluster, hooray */
3044 if (cluster
->block_group
) {
3049 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
3052 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
3054 cont1_bytes
, min_bytes
);
3056 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
3057 offset
, bytes
+ empty_size
,
3058 cont1_bytes
, min_bytes
);
3060 /* Clear our temporary list */
3061 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
3062 list_del_init(&entry
->list
);
3065 atomic_inc(&block_group
->count
);
3066 list_add_tail(&cluster
->block_group_list
,
3067 &block_group
->cluster_list
);
3068 cluster
->block_group
= block_group
;
3070 trace_btrfs_failed_cluster_setup(block_group
);
3073 spin_unlock(&cluster
->lock
);
3074 spin_unlock(&ctl
->tree_lock
);
3080 * simple code to zero out a cluster
3082 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
3084 spin_lock_init(&cluster
->lock
);
3085 spin_lock_init(&cluster
->refill_lock
);
3086 cluster
->root
= RB_ROOT
;
3087 cluster
->max_size
= 0;
3088 cluster
->fragmented
= false;
3089 INIT_LIST_HEAD(&cluster
->block_group_list
);
3090 cluster
->block_group
= NULL
;
3093 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
3094 u64
*total_trimmed
, u64 start
, u64 bytes
,
3095 u64 reserved_start
, u64 reserved_bytes
,
3096 struct btrfs_trim_range
*trim_entry
)
3098 struct btrfs_space_info
*space_info
= block_group
->space_info
;
3099 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3100 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3105 spin_lock(&space_info
->lock
);
3106 spin_lock(&block_group
->lock
);
3107 if (!block_group
->ro
) {
3108 block_group
->reserved
+= reserved_bytes
;
3109 space_info
->bytes_reserved
+= reserved_bytes
;
3112 spin_unlock(&block_group
->lock
);
3113 spin_unlock(&space_info
->lock
);
3115 ret
= btrfs_discard_extent(fs_info
, start
, bytes
, &trimmed
);
3117 *total_trimmed
+= trimmed
;
3119 mutex_lock(&ctl
->cache_writeout_mutex
);
3120 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
3121 list_del(&trim_entry
->list
);
3122 mutex_unlock(&ctl
->cache_writeout_mutex
);
3125 spin_lock(&space_info
->lock
);
3126 spin_lock(&block_group
->lock
);
3127 if (block_group
->ro
)
3128 space_info
->bytes_readonly
+= reserved_bytes
;
3129 block_group
->reserved
-= reserved_bytes
;
3130 space_info
->bytes_reserved
-= reserved_bytes
;
3131 spin_unlock(&space_info
->lock
);
3132 spin_unlock(&block_group
->lock
);
3138 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
3139 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3141 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3142 struct btrfs_free_space
*entry
;
3143 struct rb_node
*node
;
3149 while (start
< end
) {
3150 struct btrfs_trim_range trim_entry
;
3152 mutex_lock(&ctl
->cache_writeout_mutex
);
3153 spin_lock(&ctl
->tree_lock
);
3155 if (ctl
->free_space
< minlen
) {
3156 spin_unlock(&ctl
->tree_lock
);
3157 mutex_unlock(&ctl
->cache_writeout_mutex
);
3161 entry
= tree_search_offset(ctl
, start
, 0, 1);
3163 spin_unlock(&ctl
->tree_lock
);
3164 mutex_unlock(&ctl
->cache_writeout_mutex
);
3169 while (entry
->bitmap
) {
3170 node
= rb_next(&entry
->offset_index
);
3172 spin_unlock(&ctl
->tree_lock
);
3173 mutex_unlock(&ctl
->cache_writeout_mutex
);
3176 entry
= rb_entry(node
, struct btrfs_free_space
,
3180 if (entry
->offset
>= end
) {
3181 spin_unlock(&ctl
->tree_lock
);
3182 mutex_unlock(&ctl
->cache_writeout_mutex
);
3186 extent_start
= entry
->offset
;
3187 extent_bytes
= entry
->bytes
;
3188 start
= max(start
, extent_start
);
3189 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
3190 if (bytes
< minlen
) {
3191 spin_unlock(&ctl
->tree_lock
);
3192 mutex_unlock(&ctl
->cache_writeout_mutex
);
3196 unlink_free_space(ctl
, entry
);
3197 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3199 spin_unlock(&ctl
->tree_lock
);
3200 trim_entry
.start
= extent_start
;
3201 trim_entry
.bytes
= extent_bytes
;
3202 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3203 mutex_unlock(&ctl
->cache_writeout_mutex
);
3205 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3206 extent_start
, extent_bytes
, &trim_entry
);
3212 if (fatal_signal_pending(current
)) {
3223 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
3224 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3226 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3227 struct btrfs_free_space
*entry
;
3231 u64 offset
= offset_to_bitmap(ctl
, start
);
3233 while (offset
< end
) {
3234 bool next_bitmap
= false;
3235 struct btrfs_trim_range trim_entry
;
3237 mutex_lock(&ctl
->cache_writeout_mutex
);
3238 spin_lock(&ctl
->tree_lock
);
3240 if (ctl
->free_space
< minlen
) {
3241 spin_unlock(&ctl
->tree_lock
);
3242 mutex_unlock(&ctl
->cache_writeout_mutex
);
3246 entry
= tree_search_offset(ctl
, offset
, 1, 0);
3248 spin_unlock(&ctl
->tree_lock
);
3249 mutex_unlock(&ctl
->cache_writeout_mutex
);
3255 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
, false);
3256 if (ret2
|| start
>= end
) {
3257 spin_unlock(&ctl
->tree_lock
);
3258 mutex_unlock(&ctl
->cache_writeout_mutex
);
3263 bytes
= min(bytes
, end
- start
);
3264 if (bytes
< minlen
) {
3265 spin_unlock(&ctl
->tree_lock
);
3266 mutex_unlock(&ctl
->cache_writeout_mutex
);
3270 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
3271 if (entry
->bytes
== 0)
3272 free_bitmap(ctl
, entry
);
3274 spin_unlock(&ctl
->tree_lock
);
3275 trim_entry
.start
= start
;
3276 trim_entry
.bytes
= bytes
;
3277 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3278 mutex_unlock(&ctl
->cache_writeout_mutex
);
3280 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3281 start
, bytes
, &trim_entry
);
3286 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3289 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
3290 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3293 if (fatal_signal_pending(current
)) {
3304 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache
*cache
)
3306 atomic_inc(&cache
->trimming
);
3309 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache
*block_group
)
3311 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3312 struct extent_map_tree
*em_tree
;
3313 struct extent_map
*em
;
3316 spin_lock(&block_group
->lock
);
3317 cleanup
= (atomic_dec_and_test(&block_group
->trimming
) &&
3318 block_group
->removed
);
3319 spin_unlock(&block_group
->lock
);
3322 mutex_lock(&fs_info
->chunk_mutex
);
3323 em_tree
= &fs_info
->mapping_tree
.map_tree
;
3324 write_lock(&em_tree
->lock
);
3325 em
= lookup_extent_mapping(em_tree
, block_group
->key
.objectid
,
3327 BUG_ON(!em
); /* logic error, can't happen */
3329 * remove_extent_mapping() will delete us from the pinned_chunks
3330 * list, which is protected by the chunk mutex.
3332 remove_extent_mapping(em_tree
, em
);
3333 write_unlock(&em_tree
->lock
);
3334 mutex_unlock(&fs_info
->chunk_mutex
);
3336 /* once for us and once for the tree */
3337 free_extent_map(em
);
3338 free_extent_map(em
);
3341 * We've left one free space entry and other tasks trimming
3342 * this block group have left 1 entry each one. Free them.
3344 __btrfs_remove_free_space_cache(block_group
->free_space_ctl
);
3348 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
3349 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
3355 spin_lock(&block_group
->lock
);
3356 if (block_group
->removed
) {
3357 spin_unlock(&block_group
->lock
);
3360 btrfs_get_block_group_trimming(block_group
);
3361 spin_unlock(&block_group
->lock
);
3363 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
3367 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
3369 btrfs_put_block_group_trimming(block_group
);
3374 * Find the left-most item in the cache tree, and then return the
3375 * smallest inode number in the item.
3377 * Note: the returned inode number may not be the smallest one in
3378 * the tree, if the left-most item is a bitmap.
3380 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
3382 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
3383 struct btrfs_free_space
*entry
= NULL
;
3386 spin_lock(&ctl
->tree_lock
);
3388 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
3391 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
3392 struct btrfs_free_space
, offset_index
);
3394 if (!entry
->bitmap
) {
3395 ino
= entry
->offset
;
3397 unlink_free_space(ctl
, entry
);
3401 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3403 link_free_space(ctl
, entry
);
3409 ret
= search_bitmap(ctl
, entry
, &offset
, &count
, true);
3410 /* Logic error; Should be empty if it can't find anything */
3414 bitmap_clear_bits(ctl
, entry
, offset
, 1);
3415 if (entry
->bytes
== 0)
3416 free_bitmap(ctl
, entry
);
3419 spin_unlock(&ctl
->tree_lock
);
3424 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
3425 struct btrfs_path
*path
)
3427 struct inode
*inode
= NULL
;
3429 spin_lock(&root
->ino_cache_lock
);
3430 if (root
->ino_cache_inode
)
3431 inode
= igrab(root
->ino_cache_inode
);
3432 spin_unlock(&root
->ino_cache_lock
);
3436 inode
= __lookup_free_space_inode(root
, path
, 0);
3440 spin_lock(&root
->ino_cache_lock
);
3441 if (!btrfs_fs_closing(root
->fs_info
))
3442 root
->ino_cache_inode
= igrab(inode
);
3443 spin_unlock(&root
->ino_cache_lock
);
3448 int create_free_ino_inode(struct btrfs_root
*root
,
3449 struct btrfs_trans_handle
*trans
,
3450 struct btrfs_path
*path
)
3452 return __create_free_space_inode(root
, trans
, path
,
3453 BTRFS_FREE_INO_OBJECTID
, 0);
3456 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
3458 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3459 struct btrfs_path
*path
;
3460 struct inode
*inode
;
3462 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
3464 if (!btrfs_test_opt(fs_info
, INODE_MAP_CACHE
))
3468 * If we're unmounting then just return, since this does a search on the
3469 * normal root and not the commit root and we could deadlock.
3471 if (btrfs_fs_closing(fs_info
))
3474 path
= btrfs_alloc_path();
3478 inode
= lookup_free_ino_inode(root
, path
);
3482 if (root_gen
!= BTRFS_I(inode
)->generation
)
3485 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
3489 "failed to load free ino cache for root %llu",
3490 root
->root_key
.objectid
);
3494 btrfs_free_path(path
);
3498 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
3499 struct btrfs_trans_handle
*trans
,
3500 struct btrfs_path
*path
,
3501 struct inode
*inode
)
3503 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3504 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3506 struct btrfs_io_ctl io_ctl
;
3507 bool release_metadata
= true;
3509 if (!btrfs_test_opt(fs_info
, INODE_MAP_CACHE
))
3512 memset(&io_ctl
, 0, sizeof(io_ctl
));
3513 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, &io_ctl
, trans
);
3516 * At this point writepages() didn't error out, so our metadata
3517 * reservation is released when the writeback finishes, at
3518 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
3519 * with or without an error.
3521 release_metadata
= false;
3522 ret
= btrfs_wait_cache_io_root(root
, trans
, &io_ctl
, path
);
3526 if (release_metadata
)
3527 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
3528 inode
->i_size
, true);
3531 "failed to write free ino cache for root %llu",
3532 root
->root_key
.objectid
);
3539 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3541 * Use this if you need to make a bitmap or extent entry specifically, it
3542 * doesn't do any of the merging that add_free_space does, this acts a lot like
3543 * how the free space cache loading stuff works, so you can get really weird
3546 int test_add_free_space_entry(struct btrfs_block_group_cache
*cache
,
3547 u64 offset
, u64 bytes
, bool bitmap
)
3549 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3550 struct btrfs_free_space
*info
= NULL
, *bitmap_info
;
3557 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
3563 spin_lock(&ctl
->tree_lock
);
3564 info
->offset
= offset
;
3565 info
->bytes
= bytes
;
3566 info
->max_extent_size
= 0;
3567 ret
= link_free_space(ctl
, info
);
3568 spin_unlock(&ctl
->tree_lock
);
3570 kmem_cache_free(btrfs_free_space_cachep
, info
);
3575 map
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
3577 kmem_cache_free(btrfs_free_space_cachep
, info
);
3582 spin_lock(&ctl
->tree_lock
);
3583 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3588 add_new_bitmap(ctl
, info
, offset
);
3593 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
3595 bytes
-= bytes_added
;
3596 offset
+= bytes_added
;
3597 spin_unlock(&ctl
->tree_lock
);
3603 kmem_cache_free(btrfs_free_space_cachep
, info
);
3610 * Checks to see if the given range is in the free space cache. This is really
3611 * just used to check the absence of space, so if there is free space in the
3612 * range at all we will return 1.
3614 int test_check_exists(struct btrfs_block_group_cache
*cache
,
3615 u64 offset
, u64 bytes
)
3617 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3618 struct btrfs_free_space
*info
;
3621 spin_lock(&ctl
->tree_lock
);
3622 info
= tree_search_offset(ctl
, offset
, 0, 0);
3624 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3632 u64 bit_off
, bit_bytes
;
3634 struct btrfs_free_space
*tmp
;
3637 bit_bytes
= ctl
->unit
;
3638 ret
= search_bitmap(ctl
, info
, &bit_off
, &bit_bytes
, false);
3640 if (bit_off
== offset
) {
3643 } else if (bit_off
> offset
&&
3644 offset
+ bytes
> bit_off
) {
3650 n
= rb_prev(&info
->offset_index
);
3652 tmp
= rb_entry(n
, struct btrfs_free_space
,
3654 if (tmp
->offset
+ tmp
->bytes
< offset
)
3656 if (offset
+ bytes
< tmp
->offset
) {
3657 n
= rb_prev(&tmp
->offset_index
);
3664 n
= rb_next(&info
->offset_index
);
3666 tmp
= rb_entry(n
, struct btrfs_free_space
,
3668 if (offset
+ bytes
< tmp
->offset
)
3670 if (tmp
->offset
+ tmp
->bytes
< offset
) {
3671 n
= rb_next(&tmp
->offset_index
);
3682 if (info
->offset
== offset
) {
3687 if (offset
> info
->offset
&& offset
< info
->offset
+ info
->bytes
)
3690 spin_unlock(&ctl
->tree_lock
);
3693 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */