2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/signal.h>
22 #include <linux/slab.h>
23 #include <linux/math64.h>
24 #include <linux/ratelimit.h>
26 #include "free-space-cache.h"
27 #include "transaction.h"
29 #include "extent_io.h"
30 #include "inode-map.h"
33 #define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
34 #define MAX_CACHE_BYTES_PER_GIG SZ_32K
36 struct btrfs_trim_range
{
39 struct list_head list
;
42 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
43 struct btrfs_free_space
*info
);
44 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
45 struct btrfs_free_space
*info
);
46 static int btrfs_wait_cache_io_root(struct btrfs_root
*root
,
47 struct btrfs_trans_handle
*trans
,
48 struct btrfs_io_ctl
*io_ctl
,
49 struct btrfs_path
*path
);
51 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
52 struct btrfs_path
*path
,
55 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
57 struct btrfs_key location
;
58 struct btrfs_disk_key disk_key
;
59 struct btrfs_free_space_header
*header
;
60 struct extent_buffer
*leaf
;
61 struct inode
*inode
= NULL
;
64 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
68 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
72 btrfs_release_path(path
);
73 return ERR_PTR(-ENOENT
);
76 leaf
= path
->nodes
[0];
77 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
78 struct btrfs_free_space_header
);
79 btrfs_free_space_key(leaf
, header
, &disk_key
);
80 btrfs_disk_key_to_cpu(&location
, &disk_key
);
81 btrfs_release_path(path
);
83 inode
= btrfs_iget(fs_info
->sb
, &location
, root
, NULL
);
86 if (is_bad_inode(inode
)) {
88 return ERR_PTR(-ENOENT
);
91 mapping_set_gfp_mask(inode
->i_mapping
,
92 mapping_gfp_constraint(inode
->i_mapping
,
93 ~(__GFP_FS
| __GFP_HIGHMEM
)));
98 struct inode
*lookup_free_space_inode(struct btrfs_fs_info
*fs_info
,
99 struct btrfs_block_group_cache
100 *block_group
, struct btrfs_path
*path
)
102 struct inode
*inode
= NULL
;
103 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
105 spin_lock(&block_group
->lock
);
106 if (block_group
->inode
)
107 inode
= igrab(block_group
->inode
);
108 spin_unlock(&block_group
->lock
);
112 inode
= __lookup_free_space_inode(fs_info
->tree_root
, path
,
113 block_group
->key
.objectid
);
117 spin_lock(&block_group
->lock
);
118 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
119 btrfs_info(fs_info
, "Old style space inode found, converting.");
120 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
121 BTRFS_INODE_NODATACOW
;
122 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
125 if (!block_group
->iref
) {
126 block_group
->inode
= igrab(inode
);
127 block_group
->iref
= 1;
129 spin_unlock(&block_group
->lock
);
134 static int __create_free_space_inode(struct btrfs_root
*root
,
135 struct btrfs_trans_handle
*trans
,
136 struct btrfs_path
*path
,
139 struct btrfs_key key
;
140 struct btrfs_disk_key disk_key
;
141 struct btrfs_free_space_header
*header
;
142 struct btrfs_inode_item
*inode_item
;
143 struct extent_buffer
*leaf
;
144 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
147 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
151 /* We inline crc's for the free disk space cache */
152 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
153 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
155 leaf
= path
->nodes
[0];
156 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
157 struct btrfs_inode_item
);
158 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
159 memzero_extent_buffer(leaf
, (unsigned long)inode_item
,
160 sizeof(*inode_item
));
161 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
162 btrfs_set_inode_size(leaf
, inode_item
, 0);
163 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
164 btrfs_set_inode_uid(leaf
, inode_item
, 0);
165 btrfs_set_inode_gid(leaf
, inode_item
, 0);
166 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
167 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
168 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
169 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
170 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
171 btrfs_mark_buffer_dirty(leaf
);
172 btrfs_release_path(path
);
174 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
177 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
178 sizeof(struct btrfs_free_space_header
));
180 btrfs_release_path(path
);
184 leaf
= path
->nodes
[0];
185 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
186 struct btrfs_free_space_header
);
187 memzero_extent_buffer(leaf
, (unsigned long)header
, sizeof(*header
));
188 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
189 btrfs_mark_buffer_dirty(leaf
);
190 btrfs_release_path(path
);
195 int create_free_space_inode(struct btrfs_fs_info
*fs_info
,
196 struct btrfs_trans_handle
*trans
,
197 struct btrfs_block_group_cache
*block_group
,
198 struct btrfs_path
*path
)
203 ret
= btrfs_find_free_objectid(fs_info
->tree_root
, &ino
);
207 return __create_free_space_inode(fs_info
->tree_root
, trans
, path
, ino
,
208 block_group
->key
.objectid
);
211 int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info
*fs_info
,
212 struct btrfs_block_rsv
*rsv
)
217 /* 1 for slack space, 1 for updating the inode */
218 needed_bytes
= btrfs_calc_trunc_metadata_size(fs_info
, 1) +
219 btrfs_calc_trans_metadata_size(fs_info
, 1);
221 spin_lock(&rsv
->lock
);
222 if (rsv
->reserved
< needed_bytes
)
226 spin_unlock(&rsv
->lock
);
230 int btrfs_truncate_free_space_cache(struct btrfs_trans_handle
*trans
,
231 struct btrfs_block_group_cache
*block_group
,
234 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
239 struct btrfs_path
*path
= btrfs_alloc_path();
246 mutex_lock(&trans
->transaction
->cache_write_mutex
);
247 if (!list_empty(&block_group
->io_list
)) {
248 list_del_init(&block_group
->io_list
);
250 btrfs_wait_cache_io(trans
, block_group
, path
);
251 btrfs_put_block_group(block_group
);
255 * now that we've truncated the cache away, its no longer
258 spin_lock(&block_group
->lock
);
259 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
260 spin_unlock(&block_group
->lock
);
261 btrfs_free_path(path
);
264 btrfs_i_size_write(BTRFS_I(inode
), 0);
265 truncate_pagecache(inode
, 0);
268 * We don't need an orphan item because truncating the free space cache
269 * will never be split across transactions.
270 * We don't need to check for -EAGAIN because we're a free space
273 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
274 0, BTRFS_EXTENT_DATA_KEY
);
278 ret
= btrfs_update_inode(trans
, root
, inode
);
282 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
284 btrfs_abort_transaction(trans
, ret
);
289 static void readahead_cache(struct inode
*inode
)
291 struct file_ra_state
*ra
;
292 unsigned long last_index
;
294 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
298 file_ra_state_init(ra
, inode
->i_mapping
);
299 last_index
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
301 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
306 static int io_ctl_init(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
312 num_pages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
314 if (btrfs_ino(BTRFS_I(inode
)) != BTRFS_FREE_INO_OBJECTID
)
317 /* Make sure we can fit our crcs into the first page */
318 if (write
&& check_crcs
&&
319 (num_pages
* sizeof(u32
)) >= PAGE_SIZE
)
322 memset(io_ctl
, 0, sizeof(struct btrfs_io_ctl
));
324 io_ctl
->pages
= kcalloc(num_pages
, sizeof(struct page
*), GFP_NOFS
);
328 io_ctl
->num_pages
= num_pages
;
329 io_ctl
->fs_info
= btrfs_sb(inode
->i_sb
);
330 io_ctl
->check_crcs
= check_crcs
;
331 io_ctl
->inode
= inode
;
336 static void io_ctl_free(struct btrfs_io_ctl
*io_ctl
)
338 kfree(io_ctl
->pages
);
339 io_ctl
->pages
= NULL
;
342 static void io_ctl_unmap_page(struct btrfs_io_ctl
*io_ctl
)
350 static void io_ctl_map_page(struct btrfs_io_ctl
*io_ctl
, int clear
)
352 ASSERT(io_ctl
->index
< io_ctl
->num_pages
);
353 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
354 io_ctl
->cur
= page_address(io_ctl
->page
);
355 io_ctl
->orig
= io_ctl
->cur
;
356 io_ctl
->size
= PAGE_SIZE
;
358 clear_page(io_ctl
->cur
);
361 static void io_ctl_drop_pages(struct btrfs_io_ctl
*io_ctl
)
365 io_ctl_unmap_page(io_ctl
);
367 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
368 if (io_ctl
->pages
[i
]) {
369 ClearPageChecked(io_ctl
->pages
[i
]);
370 unlock_page(io_ctl
->pages
[i
]);
371 put_page(io_ctl
->pages
[i
]);
376 static int io_ctl_prepare_pages(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
380 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
383 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
384 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
386 io_ctl_drop_pages(io_ctl
);
389 io_ctl
->pages
[i
] = page
;
390 if (uptodate
&& !PageUptodate(page
)) {
391 btrfs_readpage(NULL
, page
);
393 if (!PageUptodate(page
)) {
394 btrfs_err(BTRFS_I(inode
)->root
->fs_info
,
395 "error reading free space cache");
396 io_ctl_drop_pages(io_ctl
);
402 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
403 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
404 set_page_extent_mapped(io_ctl
->pages
[i
]);
410 static void io_ctl_set_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
414 io_ctl_map_page(io_ctl
, 1);
417 * Skip the csum areas. If we don't check crcs then we just have a
418 * 64bit chunk at the front of the first page.
420 if (io_ctl
->check_crcs
) {
421 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
422 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
424 io_ctl
->cur
+= sizeof(u64
);
425 io_ctl
->size
-= sizeof(u64
) * 2;
429 *val
= cpu_to_le64(generation
);
430 io_ctl
->cur
+= sizeof(u64
);
433 static int io_ctl_check_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
438 * Skip the crc area. If we don't check crcs then we just have a 64bit
439 * chunk at the front of the first page.
441 if (io_ctl
->check_crcs
) {
442 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
443 io_ctl
->size
-= sizeof(u64
) +
444 (sizeof(u32
) * io_ctl
->num_pages
);
446 io_ctl
->cur
+= sizeof(u64
);
447 io_ctl
->size
-= sizeof(u64
) * 2;
451 if (le64_to_cpu(*gen
) != generation
) {
452 btrfs_err_rl(io_ctl
->fs_info
,
453 "space cache generation (%llu) does not match inode (%llu)",
455 io_ctl_unmap_page(io_ctl
);
458 io_ctl
->cur
+= sizeof(u64
);
462 static void io_ctl_set_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
468 if (!io_ctl
->check_crcs
) {
469 io_ctl_unmap_page(io_ctl
);
474 offset
= sizeof(u32
) * io_ctl
->num_pages
;
476 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
478 btrfs_csum_final(crc
, (u8
*)&crc
);
479 io_ctl_unmap_page(io_ctl
);
480 tmp
= page_address(io_ctl
->pages
[0]);
485 static int io_ctl_check_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
491 if (!io_ctl
->check_crcs
) {
492 io_ctl_map_page(io_ctl
, 0);
497 offset
= sizeof(u32
) * io_ctl
->num_pages
;
499 tmp
= page_address(io_ctl
->pages
[0]);
503 io_ctl_map_page(io_ctl
, 0);
504 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
506 btrfs_csum_final(crc
, (u8
*)&crc
);
508 btrfs_err_rl(io_ctl
->fs_info
,
509 "csum mismatch on free space cache");
510 io_ctl_unmap_page(io_ctl
);
517 static int io_ctl_add_entry(struct btrfs_io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
520 struct btrfs_free_space_entry
*entry
;
526 entry
->offset
= cpu_to_le64(offset
);
527 entry
->bytes
= cpu_to_le64(bytes
);
528 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
529 BTRFS_FREE_SPACE_EXTENT
;
530 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
531 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
533 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
536 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
538 /* No more pages to map */
539 if (io_ctl
->index
>= io_ctl
->num_pages
)
542 /* map the next page */
543 io_ctl_map_page(io_ctl
, 1);
547 static int io_ctl_add_bitmap(struct btrfs_io_ctl
*io_ctl
, void *bitmap
)
553 * If we aren't at the start of the current page, unmap this one and
554 * map the next one if there is any left.
556 if (io_ctl
->cur
!= io_ctl
->orig
) {
557 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
558 if (io_ctl
->index
>= io_ctl
->num_pages
)
560 io_ctl_map_page(io_ctl
, 0);
563 memcpy(io_ctl
->cur
, bitmap
, PAGE_SIZE
);
564 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
565 if (io_ctl
->index
< io_ctl
->num_pages
)
566 io_ctl_map_page(io_ctl
, 0);
570 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl
*io_ctl
)
573 * If we're not on the boundary we know we've modified the page and we
574 * need to crc the page.
576 if (io_ctl
->cur
!= io_ctl
->orig
)
577 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
579 io_ctl_unmap_page(io_ctl
);
581 while (io_ctl
->index
< io_ctl
->num_pages
) {
582 io_ctl_map_page(io_ctl
, 1);
583 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
587 static int io_ctl_read_entry(struct btrfs_io_ctl
*io_ctl
,
588 struct btrfs_free_space
*entry
, u8
*type
)
590 struct btrfs_free_space_entry
*e
;
594 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
600 entry
->offset
= le64_to_cpu(e
->offset
);
601 entry
->bytes
= le64_to_cpu(e
->bytes
);
603 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
604 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
606 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
609 io_ctl_unmap_page(io_ctl
);
614 static int io_ctl_read_bitmap(struct btrfs_io_ctl
*io_ctl
,
615 struct btrfs_free_space
*entry
)
619 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
623 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_SIZE
);
624 io_ctl_unmap_page(io_ctl
);
630 * Since we attach pinned extents after the fact we can have contiguous sections
631 * of free space that are split up in entries. This poses a problem with the
632 * tree logging stuff since it could have allocated across what appears to be 2
633 * entries since we would have merged the entries when adding the pinned extents
634 * back to the free space cache. So run through the space cache that we just
635 * loaded and merge contiguous entries. This will make the log replay stuff not
636 * blow up and it will make for nicer allocator behavior.
638 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
640 struct btrfs_free_space
*e
, *prev
= NULL
;
644 spin_lock(&ctl
->tree_lock
);
645 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
646 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
649 if (e
->bitmap
|| prev
->bitmap
)
651 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
652 unlink_free_space(ctl
, prev
);
653 unlink_free_space(ctl
, e
);
654 prev
->bytes
+= e
->bytes
;
655 kmem_cache_free(btrfs_free_space_cachep
, e
);
656 link_free_space(ctl
, prev
);
658 spin_unlock(&ctl
->tree_lock
);
664 spin_unlock(&ctl
->tree_lock
);
667 static int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
668 struct btrfs_free_space_ctl
*ctl
,
669 struct btrfs_path
*path
, u64 offset
)
671 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
672 struct btrfs_free_space_header
*header
;
673 struct extent_buffer
*leaf
;
674 struct btrfs_io_ctl io_ctl
;
675 struct btrfs_key key
;
676 struct btrfs_free_space
*e
, *n
;
684 /* Nothing in the space cache, goodbye */
685 if (!i_size_read(inode
))
688 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
692 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
696 btrfs_release_path(path
);
702 leaf
= path
->nodes
[0];
703 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
704 struct btrfs_free_space_header
);
705 num_entries
= btrfs_free_space_entries(leaf
, header
);
706 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
707 generation
= btrfs_free_space_generation(leaf
, header
);
708 btrfs_release_path(path
);
710 if (!BTRFS_I(inode
)->generation
) {
712 "the free space cache file (%llu) is invalid, skip it",
717 if (BTRFS_I(inode
)->generation
!= generation
) {
719 "free space inode generation (%llu) did not match free space cache generation (%llu)",
720 BTRFS_I(inode
)->generation
, generation
);
727 ret
= io_ctl_init(&io_ctl
, inode
, 0);
731 readahead_cache(inode
);
733 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
737 ret
= io_ctl_check_crc(&io_ctl
, 0);
741 ret
= io_ctl_check_generation(&io_ctl
, generation
);
745 while (num_entries
) {
746 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
751 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
753 kmem_cache_free(btrfs_free_space_cachep
, e
);
758 kmem_cache_free(btrfs_free_space_cachep
, e
);
762 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
763 spin_lock(&ctl
->tree_lock
);
764 ret
= link_free_space(ctl
, e
);
765 spin_unlock(&ctl
->tree_lock
);
768 "Duplicate entries in free space cache, dumping");
769 kmem_cache_free(btrfs_free_space_cachep
, e
);
775 e
->bitmap
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
778 btrfs_free_space_cachep
, e
);
781 spin_lock(&ctl
->tree_lock
);
782 ret
= link_free_space(ctl
, e
);
783 ctl
->total_bitmaps
++;
784 ctl
->op
->recalc_thresholds(ctl
);
785 spin_unlock(&ctl
->tree_lock
);
788 "Duplicate entries in free space cache, dumping");
789 kmem_cache_free(btrfs_free_space_cachep
, e
);
792 list_add_tail(&e
->list
, &bitmaps
);
798 io_ctl_unmap_page(&io_ctl
);
801 * We add the bitmaps at the end of the entries in order that
802 * the bitmap entries are added to the cache.
804 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
805 list_del_init(&e
->list
);
806 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
811 io_ctl_drop_pages(&io_ctl
);
812 merge_space_tree(ctl
);
815 io_ctl_free(&io_ctl
);
818 io_ctl_drop_pages(&io_ctl
);
819 __btrfs_remove_free_space_cache(ctl
);
823 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
824 struct btrfs_block_group_cache
*block_group
)
826 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
828 struct btrfs_path
*path
;
831 u64 used
= btrfs_block_group_used(&block_group
->item
);
834 * If this block group has been marked to be cleared for one reason or
835 * another then we can't trust the on disk cache, so just return.
837 spin_lock(&block_group
->lock
);
838 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
839 spin_unlock(&block_group
->lock
);
842 spin_unlock(&block_group
->lock
);
844 path
= btrfs_alloc_path();
847 path
->search_commit_root
= 1;
848 path
->skip_locking
= 1;
850 inode
= lookup_free_space_inode(fs_info
, block_group
, path
);
852 btrfs_free_path(path
);
856 /* We may have converted the inode and made the cache invalid. */
857 spin_lock(&block_group
->lock
);
858 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
859 spin_unlock(&block_group
->lock
);
860 btrfs_free_path(path
);
863 spin_unlock(&block_group
->lock
);
865 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
866 path
, block_group
->key
.objectid
);
867 btrfs_free_path(path
);
871 spin_lock(&ctl
->tree_lock
);
872 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
873 block_group
->bytes_super
));
874 spin_unlock(&ctl
->tree_lock
);
877 __btrfs_remove_free_space_cache(ctl
);
879 "block group %llu has wrong amount of free space",
880 block_group
->key
.objectid
);
885 /* This cache is bogus, make sure it gets cleared */
886 spin_lock(&block_group
->lock
);
887 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
888 spin_unlock(&block_group
->lock
);
892 "failed to load free space cache for block group %llu, rebuilding it now",
893 block_group
->key
.objectid
);
900 static noinline_for_stack
901 int write_cache_extent_entries(struct btrfs_io_ctl
*io_ctl
,
902 struct btrfs_free_space_ctl
*ctl
,
903 struct btrfs_block_group_cache
*block_group
,
904 int *entries
, int *bitmaps
,
905 struct list_head
*bitmap_list
)
908 struct btrfs_free_cluster
*cluster
= NULL
;
909 struct btrfs_free_cluster
*cluster_locked
= NULL
;
910 struct rb_node
*node
= rb_first(&ctl
->free_space_offset
);
911 struct btrfs_trim_range
*trim_entry
;
913 /* Get the cluster for this block_group if it exists */
914 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
915 cluster
= list_entry(block_group
->cluster_list
.next
,
916 struct btrfs_free_cluster
,
920 if (!node
&& cluster
) {
921 cluster_locked
= cluster
;
922 spin_lock(&cluster_locked
->lock
);
923 node
= rb_first(&cluster
->root
);
927 /* Write out the extent entries */
929 struct btrfs_free_space
*e
;
931 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
934 ret
= io_ctl_add_entry(io_ctl
, e
->offset
, e
->bytes
,
940 list_add_tail(&e
->list
, bitmap_list
);
943 node
= rb_next(node
);
944 if (!node
&& cluster
) {
945 node
= rb_first(&cluster
->root
);
946 cluster_locked
= cluster
;
947 spin_lock(&cluster_locked
->lock
);
951 if (cluster_locked
) {
952 spin_unlock(&cluster_locked
->lock
);
953 cluster_locked
= NULL
;
957 * Make sure we don't miss any range that was removed from our rbtree
958 * because trimming is running. Otherwise after a umount+mount (or crash
959 * after committing the transaction) we would leak free space and get
960 * an inconsistent free space cache report from fsck.
962 list_for_each_entry(trim_entry
, &ctl
->trimming_ranges
, list
) {
963 ret
= io_ctl_add_entry(io_ctl
, trim_entry
->start
,
964 trim_entry
->bytes
, NULL
);
973 spin_unlock(&cluster_locked
->lock
);
977 static noinline_for_stack
int
978 update_cache_item(struct btrfs_trans_handle
*trans
,
979 struct btrfs_root
*root
,
981 struct btrfs_path
*path
, u64 offset
,
982 int entries
, int bitmaps
)
984 struct btrfs_key key
;
985 struct btrfs_free_space_header
*header
;
986 struct extent_buffer
*leaf
;
989 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
993 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
995 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
996 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1000 leaf
= path
->nodes
[0];
1002 struct btrfs_key found_key
;
1003 ASSERT(path
->slots
[0]);
1005 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1006 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
1007 found_key
.offset
!= offset
) {
1008 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
1010 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
1012 btrfs_release_path(path
);
1017 BTRFS_I(inode
)->generation
= trans
->transid
;
1018 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
1019 struct btrfs_free_space_header
);
1020 btrfs_set_free_space_entries(leaf
, header
, entries
);
1021 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1022 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1023 btrfs_mark_buffer_dirty(leaf
);
1024 btrfs_release_path(path
);
1032 static noinline_for_stack
int
1033 write_pinned_extent_entries(struct btrfs_fs_info
*fs_info
,
1034 struct btrfs_block_group_cache
*block_group
,
1035 struct btrfs_io_ctl
*io_ctl
,
1038 u64 start
, extent_start
, extent_end
, len
;
1039 struct extent_io_tree
*unpin
= NULL
;
1046 * We want to add any pinned extents to our free space cache
1047 * so we don't leak the space
1049 * We shouldn't have switched the pinned extents yet so this is the
1052 unpin
= fs_info
->pinned_extents
;
1054 start
= block_group
->key
.objectid
;
1056 while (start
< block_group
->key
.objectid
+ block_group
->key
.offset
) {
1057 ret
= find_first_extent_bit(unpin
, start
,
1058 &extent_start
, &extent_end
,
1059 EXTENT_DIRTY
, NULL
);
1063 /* This pinned extent is out of our range */
1064 if (extent_start
>= block_group
->key
.objectid
+
1065 block_group
->key
.offset
)
1068 extent_start
= max(extent_start
, start
);
1069 extent_end
= min(block_group
->key
.objectid
+
1070 block_group
->key
.offset
, extent_end
+ 1);
1071 len
= extent_end
- extent_start
;
1074 ret
= io_ctl_add_entry(io_ctl
, extent_start
, len
, NULL
);
1084 static noinline_for_stack
int
1085 write_bitmap_entries(struct btrfs_io_ctl
*io_ctl
, struct list_head
*bitmap_list
)
1087 struct btrfs_free_space
*entry
, *next
;
1090 /* Write out the bitmaps */
1091 list_for_each_entry_safe(entry
, next
, bitmap_list
, list
) {
1092 ret
= io_ctl_add_bitmap(io_ctl
, entry
->bitmap
);
1095 list_del_init(&entry
->list
);
1101 static int flush_dirty_cache(struct inode
*inode
)
1105 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1107 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1108 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1114 static void noinline_for_stack
1115 cleanup_bitmap_list(struct list_head
*bitmap_list
)
1117 struct btrfs_free_space
*entry
, *next
;
1119 list_for_each_entry_safe(entry
, next
, bitmap_list
, list
)
1120 list_del_init(&entry
->list
);
1123 static void noinline_for_stack
1124 cleanup_write_cache_enospc(struct inode
*inode
,
1125 struct btrfs_io_ctl
*io_ctl
,
1126 struct extent_state
**cached_state
)
1128 io_ctl_drop_pages(io_ctl
);
1129 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1130 i_size_read(inode
) - 1, cached_state
,
1134 static int __btrfs_wait_cache_io(struct btrfs_root
*root
,
1135 struct btrfs_trans_handle
*trans
,
1136 struct btrfs_block_group_cache
*block_group
,
1137 struct btrfs_io_ctl
*io_ctl
,
1138 struct btrfs_path
*path
, u64 offset
)
1141 struct inode
*inode
= io_ctl
->inode
;
1142 struct btrfs_fs_info
*fs_info
;
1147 fs_info
= btrfs_sb(inode
->i_sb
);
1149 /* Flush the dirty pages in the cache file. */
1150 ret
= flush_dirty_cache(inode
);
1154 /* Update the cache item to tell everyone this cache file is valid. */
1155 ret
= update_cache_item(trans
, root
, inode
, path
, offset
,
1156 io_ctl
->entries
, io_ctl
->bitmaps
);
1158 io_ctl_free(io_ctl
);
1160 invalidate_inode_pages2(inode
->i_mapping
);
1161 BTRFS_I(inode
)->generation
= 0;
1165 "failed to write free space cache for block group %llu",
1166 block_group
->key
.objectid
);
1170 btrfs_update_inode(trans
, root
, inode
);
1173 /* the dirty list is protected by the dirty_bgs_lock */
1174 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
1176 /* the disk_cache_state is protected by the block group lock */
1177 spin_lock(&block_group
->lock
);
1180 * only mark this as written if we didn't get put back on
1181 * the dirty list while waiting for IO. Otherwise our
1182 * cache state won't be right, and we won't get written again
1184 if (!ret
&& list_empty(&block_group
->dirty_list
))
1185 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1187 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1189 spin_unlock(&block_group
->lock
);
1190 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
1191 io_ctl
->inode
= NULL
;
1199 static int btrfs_wait_cache_io_root(struct btrfs_root
*root
,
1200 struct btrfs_trans_handle
*trans
,
1201 struct btrfs_io_ctl
*io_ctl
,
1202 struct btrfs_path
*path
)
1204 return __btrfs_wait_cache_io(root
, trans
, NULL
, io_ctl
, path
, 0);
1207 int btrfs_wait_cache_io(struct btrfs_trans_handle
*trans
,
1208 struct btrfs_block_group_cache
*block_group
,
1209 struct btrfs_path
*path
)
1211 return __btrfs_wait_cache_io(block_group
->fs_info
->tree_root
, trans
,
1212 block_group
, &block_group
->io_ctl
,
1213 path
, block_group
->key
.objectid
);
1217 * __btrfs_write_out_cache - write out cached info to an inode
1218 * @root - the root the inode belongs to
1219 * @ctl - the free space cache we are going to write out
1220 * @block_group - the block_group for this cache if it belongs to a block_group
1221 * @trans - the trans handle
1223 * This function writes out a free space cache struct to disk for quick recovery
1224 * on mount. This will return 0 if it was successful in writing the cache out,
1225 * or an errno if it was not.
1227 static int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
1228 struct btrfs_free_space_ctl
*ctl
,
1229 struct btrfs_block_group_cache
*block_group
,
1230 struct btrfs_io_ctl
*io_ctl
,
1231 struct btrfs_trans_handle
*trans
)
1233 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1234 struct extent_state
*cached_state
= NULL
;
1235 LIST_HEAD(bitmap_list
);
1241 if (!i_size_read(inode
))
1244 WARN_ON(io_ctl
->pages
);
1245 ret
= io_ctl_init(io_ctl
, inode
, 1);
1249 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
)) {
1250 down_write(&block_group
->data_rwsem
);
1251 spin_lock(&block_group
->lock
);
1252 if (block_group
->delalloc_bytes
) {
1253 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1254 spin_unlock(&block_group
->lock
);
1255 up_write(&block_group
->data_rwsem
);
1256 BTRFS_I(inode
)->generation
= 0;
1261 spin_unlock(&block_group
->lock
);
1264 /* Lock all pages first so we can lock the extent safely. */
1265 ret
= io_ctl_prepare_pages(io_ctl
, inode
, 0);
1269 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
1272 io_ctl_set_generation(io_ctl
, trans
->transid
);
1274 mutex_lock(&ctl
->cache_writeout_mutex
);
1275 /* Write out the extent entries in the free space cache */
1276 spin_lock(&ctl
->tree_lock
);
1277 ret
= write_cache_extent_entries(io_ctl
, ctl
,
1278 block_group
, &entries
, &bitmaps
,
1281 goto out_nospc_locked
;
1284 * Some spaces that are freed in the current transaction are pinned,
1285 * they will be added into free space cache after the transaction is
1286 * committed, we shouldn't lose them.
1288 * If this changes while we are working we'll get added back to
1289 * the dirty list and redo it. No locking needed
1291 ret
= write_pinned_extent_entries(fs_info
, block_group
,
1294 goto out_nospc_locked
;
1297 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1298 * locked while doing it because a concurrent trim can be manipulating
1299 * or freeing the bitmap.
1301 ret
= write_bitmap_entries(io_ctl
, &bitmap_list
);
1302 spin_unlock(&ctl
->tree_lock
);
1303 mutex_unlock(&ctl
->cache_writeout_mutex
);
1307 /* Zero out the rest of the pages just to make sure */
1308 io_ctl_zero_remaining_pages(io_ctl
);
1310 /* Everything is written out, now we dirty the pages in the file. */
1311 ret
= btrfs_dirty_pages(inode
, io_ctl
->pages
, io_ctl
->num_pages
, 0,
1312 i_size_read(inode
), &cached_state
);
1316 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1317 up_write(&block_group
->data_rwsem
);
1319 * Release the pages and unlock the extent, we will flush
1322 io_ctl_drop_pages(io_ctl
);
1324 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1325 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1328 * at this point the pages are under IO and we're happy,
1329 * The caller is responsible for waiting on them and updating the
1330 * the cache and the inode
1332 io_ctl
->entries
= entries
;
1333 io_ctl
->bitmaps
= bitmaps
;
1335 ret
= btrfs_fdatawrite_range(inode
, 0, (u64
)-1);
1342 io_ctl
->inode
= NULL
;
1343 io_ctl_free(io_ctl
);
1345 invalidate_inode_pages2(inode
->i_mapping
);
1346 BTRFS_I(inode
)->generation
= 0;
1348 btrfs_update_inode(trans
, root
, inode
);
1354 cleanup_bitmap_list(&bitmap_list
);
1355 spin_unlock(&ctl
->tree_lock
);
1356 mutex_unlock(&ctl
->cache_writeout_mutex
);
1359 cleanup_write_cache_enospc(inode
, io_ctl
, &cached_state
);
1362 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1363 up_write(&block_group
->data_rwsem
);
1368 int btrfs_write_out_cache(struct btrfs_fs_info
*fs_info
,
1369 struct btrfs_trans_handle
*trans
,
1370 struct btrfs_block_group_cache
*block_group
,
1371 struct btrfs_path
*path
)
1373 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1374 struct inode
*inode
;
1377 spin_lock(&block_group
->lock
);
1378 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1379 spin_unlock(&block_group
->lock
);
1382 spin_unlock(&block_group
->lock
);
1384 inode
= lookup_free_space_inode(fs_info
, block_group
, path
);
1388 ret
= __btrfs_write_out_cache(fs_info
->tree_root
, inode
, ctl
,
1389 block_group
, &block_group
->io_ctl
, trans
);
1393 "failed to write free space cache for block group %llu",
1394 block_group
->key
.objectid
);
1396 spin_lock(&block_group
->lock
);
1397 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1398 spin_unlock(&block_group
->lock
);
1400 block_group
->io_ctl
.inode
= NULL
;
1405 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1406 * to wait for IO and put the inode
1412 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1415 ASSERT(offset
>= bitmap_start
);
1416 offset
-= bitmap_start
;
1417 return (unsigned long)(div_u64(offset
, unit
));
1420 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1422 return (unsigned long)(div_u64(bytes
, unit
));
1425 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1429 u64 bytes_per_bitmap
;
1431 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1432 bitmap_start
= offset
- ctl
->start
;
1433 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
1434 bitmap_start
*= bytes_per_bitmap
;
1435 bitmap_start
+= ctl
->start
;
1437 return bitmap_start
;
1440 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1441 struct rb_node
*node
, int bitmap
)
1443 struct rb_node
**p
= &root
->rb_node
;
1444 struct rb_node
*parent
= NULL
;
1445 struct btrfs_free_space
*info
;
1449 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1451 if (offset
< info
->offset
) {
1453 } else if (offset
> info
->offset
) {
1454 p
= &(*p
)->rb_right
;
1457 * we could have a bitmap entry and an extent entry
1458 * share the same offset. If this is the case, we want
1459 * the extent entry to always be found first if we do a
1460 * linear search through the tree, since we want to have
1461 * the quickest allocation time, and allocating from an
1462 * extent is faster than allocating from a bitmap. So
1463 * if we're inserting a bitmap and we find an entry at
1464 * this offset, we want to go right, or after this entry
1465 * logically. If we are inserting an extent and we've
1466 * found a bitmap, we want to go left, or before
1474 p
= &(*p
)->rb_right
;
1476 if (!info
->bitmap
) {
1485 rb_link_node(node
, parent
, p
);
1486 rb_insert_color(node
, root
);
1492 * searches the tree for the given offset.
1494 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1495 * want a section that has at least bytes size and comes at or after the given
1498 static struct btrfs_free_space
*
1499 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1500 u64 offset
, int bitmap_only
, int fuzzy
)
1502 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1503 struct btrfs_free_space
*entry
, *prev
= NULL
;
1505 /* find entry that is closest to the 'offset' */
1512 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1515 if (offset
< entry
->offset
)
1517 else if (offset
> entry
->offset
)
1530 * bitmap entry and extent entry may share same offset,
1531 * in that case, bitmap entry comes after extent entry.
1536 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1537 if (entry
->offset
!= offset
)
1540 WARN_ON(!entry
->bitmap
);
1543 if (entry
->bitmap
) {
1545 * if previous extent entry covers the offset,
1546 * we should return it instead of the bitmap entry
1548 n
= rb_prev(&entry
->offset_index
);
1550 prev
= rb_entry(n
, struct btrfs_free_space
,
1552 if (!prev
->bitmap
&&
1553 prev
->offset
+ prev
->bytes
> offset
)
1563 /* find last entry before the 'offset' */
1565 if (entry
->offset
> offset
) {
1566 n
= rb_prev(&entry
->offset_index
);
1568 entry
= rb_entry(n
, struct btrfs_free_space
,
1570 ASSERT(entry
->offset
<= offset
);
1579 if (entry
->bitmap
) {
1580 n
= rb_prev(&entry
->offset_index
);
1582 prev
= rb_entry(n
, struct btrfs_free_space
,
1584 if (!prev
->bitmap
&&
1585 prev
->offset
+ prev
->bytes
> offset
)
1588 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1590 } else if (entry
->offset
+ entry
->bytes
> offset
)
1597 if (entry
->bitmap
) {
1598 if (entry
->offset
+ BITS_PER_BITMAP
*
1602 if (entry
->offset
+ entry
->bytes
> offset
)
1606 n
= rb_next(&entry
->offset_index
);
1609 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1615 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1616 struct btrfs_free_space
*info
)
1618 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1619 ctl
->free_extents
--;
1622 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1623 struct btrfs_free_space
*info
)
1625 __unlink_free_space(ctl
, info
);
1626 ctl
->free_space
-= info
->bytes
;
1629 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1630 struct btrfs_free_space
*info
)
1634 ASSERT(info
->bytes
|| info
->bitmap
);
1635 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1636 &info
->offset_index
, (info
->bitmap
!= NULL
));
1640 ctl
->free_space
+= info
->bytes
;
1641 ctl
->free_extents
++;
1645 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1647 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1651 u64 size
= block_group
->key
.offset
;
1652 u64 bytes_per_bg
= BITS_PER_BITMAP
* ctl
->unit
;
1653 u64 max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1655 max_bitmaps
= max_t(u64
, max_bitmaps
, 1);
1657 ASSERT(ctl
->total_bitmaps
<= max_bitmaps
);
1660 * The goal is to keep the total amount of memory used per 1gb of space
1661 * at or below 32k, so we need to adjust how much memory we allow to be
1662 * used by extent based free space tracking
1665 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1667 max_bytes
= MAX_CACHE_BYTES_PER_GIG
* div_u64(size
, SZ_1G
);
1670 * we want to account for 1 more bitmap than what we have so we can make
1671 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1672 * we add more bitmaps.
1674 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * ctl
->unit
;
1676 if (bitmap_bytes
>= max_bytes
) {
1677 ctl
->extents_thresh
= 0;
1682 * we want the extent entry threshold to always be at most 1/2 the max
1683 * bytes we can have, or whatever is less than that.
1685 extent_bytes
= max_bytes
- bitmap_bytes
;
1686 extent_bytes
= min_t(u64
, extent_bytes
, max_bytes
>> 1);
1688 ctl
->extents_thresh
=
1689 div_u64(extent_bytes
, sizeof(struct btrfs_free_space
));
1692 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1693 struct btrfs_free_space
*info
,
1694 u64 offset
, u64 bytes
)
1696 unsigned long start
, count
;
1698 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1699 count
= bytes_to_bits(bytes
, ctl
->unit
);
1700 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1702 bitmap_clear(info
->bitmap
, start
, count
);
1704 info
->bytes
-= bytes
;
1707 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1708 struct btrfs_free_space
*info
, u64 offset
,
1711 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1712 ctl
->free_space
-= bytes
;
1715 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1716 struct btrfs_free_space
*info
, u64 offset
,
1719 unsigned long start
, count
;
1721 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1722 count
= bytes_to_bits(bytes
, ctl
->unit
);
1723 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1725 bitmap_set(info
->bitmap
, start
, count
);
1727 info
->bytes
+= bytes
;
1728 ctl
->free_space
+= bytes
;
1732 * If we can not find suitable extent, we will use bytes to record
1733 * the size of the max extent.
1735 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1736 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1737 u64
*bytes
, bool for_alloc
)
1739 unsigned long found_bits
= 0;
1740 unsigned long max_bits
= 0;
1741 unsigned long bits
, i
;
1742 unsigned long next_zero
;
1743 unsigned long extent_bits
;
1746 * Skip searching the bitmap if we don't have a contiguous section that
1747 * is large enough for this allocation.
1750 bitmap_info
->max_extent_size
&&
1751 bitmap_info
->max_extent_size
< *bytes
) {
1752 *bytes
= bitmap_info
->max_extent_size
;
1756 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1757 max_t(u64
, *offset
, bitmap_info
->offset
));
1758 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1760 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP
) {
1761 if (for_alloc
&& bits
== 1) {
1765 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1766 BITS_PER_BITMAP
, i
);
1767 extent_bits
= next_zero
- i
;
1768 if (extent_bits
>= bits
) {
1769 found_bits
= extent_bits
;
1771 } else if (extent_bits
> max_bits
) {
1772 max_bits
= extent_bits
;
1778 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1779 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1783 *bytes
= (u64
)(max_bits
) * ctl
->unit
;
1784 bitmap_info
->max_extent_size
= *bytes
;
1788 /* Cache the size of the max extent in bytes */
1789 static struct btrfs_free_space
*
1790 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
,
1791 unsigned long align
, u64
*max_extent_size
)
1793 struct btrfs_free_space
*entry
;
1794 struct rb_node
*node
;
1799 if (!ctl
->free_space_offset
.rb_node
)
1802 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1806 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1807 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1808 if (entry
->bytes
< *bytes
) {
1809 if (entry
->bytes
> *max_extent_size
)
1810 *max_extent_size
= entry
->bytes
;
1814 /* make sure the space returned is big enough
1815 * to match our requested alignment
1817 if (*bytes
>= align
) {
1818 tmp
= entry
->offset
- ctl
->start
+ align
- 1;
1819 tmp
= div64_u64(tmp
, align
);
1820 tmp
= tmp
* align
+ ctl
->start
;
1821 align_off
= tmp
- entry
->offset
;
1824 tmp
= entry
->offset
;
1827 if (entry
->bytes
< *bytes
+ align_off
) {
1828 if (entry
->bytes
> *max_extent_size
)
1829 *max_extent_size
= entry
->bytes
;
1833 if (entry
->bitmap
) {
1836 ret
= search_bitmap(ctl
, entry
, &tmp
, &size
, true);
1841 } else if (size
> *max_extent_size
) {
1842 *max_extent_size
= size
;
1848 *bytes
= entry
->bytes
- align_off
;
1855 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1856 struct btrfs_free_space
*info
, u64 offset
)
1858 info
->offset
= offset_to_bitmap(ctl
, offset
);
1860 INIT_LIST_HEAD(&info
->list
);
1861 link_free_space(ctl
, info
);
1862 ctl
->total_bitmaps
++;
1864 ctl
->op
->recalc_thresholds(ctl
);
1867 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1868 struct btrfs_free_space
*bitmap_info
)
1870 unlink_free_space(ctl
, bitmap_info
);
1871 kfree(bitmap_info
->bitmap
);
1872 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1873 ctl
->total_bitmaps
--;
1874 ctl
->op
->recalc_thresholds(ctl
);
1877 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1878 struct btrfs_free_space
*bitmap_info
,
1879 u64
*offset
, u64
*bytes
)
1882 u64 search_start
, search_bytes
;
1886 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1889 * We need to search for bits in this bitmap. We could only cover some
1890 * of the extent in this bitmap thanks to how we add space, so we need
1891 * to search for as much as it as we can and clear that amount, and then
1892 * go searching for the next bit.
1894 search_start
= *offset
;
1895 search_bytes
= ctl
->unit
;
1896 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1897 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
,
1899 if (ret
< 0 || search_start
!= *offset
)
1902 /* We may have found more bits than what we need */
1903 search_bytes
= min(search_bytes
, *bytes
);
1905 /* Cannot clear past the end of the bitmap */
1906 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1908 bitmap_clear_bits(ctl
, bitmap_info
, search_start
, search_bytes
);
1909 *offset
+= search_bytes
;
1910 *bytes
-= search_bytes
;
1913 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1914 if (!bitmap_info
->bytes
)
1915 free_bitmap(ctl
, bitmap_info
);
1918 * no entry after this bitmap, but we still have bytes to
1919 * remove, so something has gone wrong.
1924 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1928 * if the next entry isn't a bitmap we need to return to let the
1929 * extent stuff do its work.
1931 if (!bitmap_info
->bitmap
)
1935 * Ok the next item is a bitmap, but it may not actually hold
1936 * the information for the rest of this free space stuff, so
1937 * look for it, and if we don't find it return so we can try
1938 * everything over again.
1940 search_start
= *offset
;
1941 search_bytes
= ctl
->unit
;
1942 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1943 &search_bytes
, false);
1944 if (ret
< 0 || search_start
!= *offset
)
1948 } else if (!bitmap_info
->bytes
)
1949 free_bitmap(ctl
, bitmap_info
);
1954 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1955 struct btrfs_free_space
*info
, u64 offset
,
1958 u64 bytes_to_set
= 0;
1961 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1963 bytes_to_set
= min(end
- offset
, bytes
);
1965 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1968 * We set some bytes, we have no idea what the max extent size is
1971 info
->max_extent_size
= 0;
1973 return bytes_to_set
;
1977 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1978 struct btrfs_free_space
*info
)
1980 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1981 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
1982 bool forced
= false;
1984 #ifdef CONFIG_BTRFS_DEBUG
1985 if (btrfs_should_fragment_free_space(block_group
))
1990 * If we are below the extents threshold then we can add this as an
1991 * extent, and don't have to deal with the bitmap
1993 if (!forced
&& ctl
->free_extents
< ctl
->extents_thresh
) {
1995 * If this block group has some small extents we don't want to
1996 * use up all of our free slots in the cache with them, we want
1997 * to reserve them to larger extents, however if we have plenty
1998 * of cache left then go ahead an dadd them, no sense in adding
1999 * the overhead of a bitmap if we don't have to.
2001 if (info
->bytes
<= fs_info
->sectorsize
* 4) {
2002 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
2010 * The original block groups from mkfs can be really small, like 8
2011 * megabytes, so don't bother with a bitmap for those entries. However
2012 * some block groups can be smaller than what a bitmap would cover but
2013 * are still large enough that they could overflow the 32k memory limit,
2014 * so allow those block groups to still be allowed to have a bitmap
2017 if (((BITS_PER_BITMAP
* ctl
->unit
) >> 1) > block_group
->key
.offset
)
2023 static const struct btrfs_free_space_op free_space_op
= {
2024 .recalc_thresholds
= recalculate_thresholds
,
2025 .use_bitmap
= use_bitmap
,
2028 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
2029 struct btrfs_free_space
*info
)
2031 struct btrfs_free_space
*bitmap_info
;
2032 struct btrfs_block_group_cache
*block_group
= NULL
;
2034 u64 bytes
, offset
, bytes_added
;
2037 bytes
= info
->bytes
;
2038 offset
= info
->offset
;
2040 if (!ctl
->op
->use_bitmap(ctl
, info
))
2043 if (ctl
->op
== &free_space_op
)
2044 block_group
= ctl
->private;
2047 * Since we link bitmaps right into the cluster we need to see if we
2048 * have a cluster here, and if so and it has our bitmap we need to add
2049 * the free space to that bitmap.
2051 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
2052 struct btrfs_free_cluster
*cluster
;
2053 struct rb_node
*node
;
2054 struct btrfs_free_space
*entry
;
2056 cluster
= list_entry(block_group
->cluster_list
.next
,
2057 struct btrfs_free_cluster
,
2059 spin_lock(&cluster
->lock
);
2060 node
= rb_first(&cluster
->root
);
2062 spin_unlock(&cluster
->lock
);
2063 goto no_cluster_bitmap
;
2066 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2067 if (!entry
->bitmap
) {
2068 spin_unlock(&cluster
->lock
);
2069 goto no_cluster_bitmap
;
2072 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
2073 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
2075 bytes
-= bytes_added
;
2076 offset
+= bytes_added
;
2078 spin_unlock(&cluster
->lock
);
2086 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2093 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
2094 bytes
-= bytes_added
;
2095 offset
+= bytes_added
;
2105 if (info
&& info
->bitmap
) {
2106 add_new_bitmap(ctl
, info
, offset
);
2111 spin_unlock(&ctl
->tree_lock
);
2113 /* no pre-allocated info, allocate a new one */
2115 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
2118 spin_lock(&ctl
->tree_lock
);
2124 /* allocate the bitmap */
2125 info
->bitmap
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
2126 spin_lock(&ctl
->tree_lock
);
2127 if (!info
->bitmap
) {
2137 kfree(info
->bitmap
);
2138 kmem_cache_free(btrfs_free_space_cachep
, info
);
2144 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
2145 struct btrfs_free_space
*info
, bool update_stat
)
2147 struct btrfs_free_space
*left_info
;
2148 struct btrfs_free_space
*right_info
;
2149 bool merged
= false;
2150 u64 offset
= info
->offset
;
2151 u64 bytes
= info
->bytes
;
2154 * first we want to see if there is free space adjacent to the range we
2155 * are adding, if there is remove that struct and add a new one to
2156 * cover the entire range
2158 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
2159 if (right_info
&& rb_prev(&right_info
->offset_index
))
2160 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
2161 struct btrfs_free_space
, offset_index
);
2163 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
2165 if (right_info
&& !right_info
->bitmap
) {
2167 unlink_free_space(ctl
, right_info
);
2169 __unlink_free_space(ctl
, right_info
);
2170 info
->bytes
+= right_info
->bytes
;
2171 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
2175 if (left_info
&& !left_info
->bitmap
&&
2176 left_info
->offset
+ left_info
->bytes
== offset
) {
2178 unlink_free_space(ctl
, left_info
);
2180 __unlink_free_space(ctl
, left_info
);
2181 info
->offset
= left_info
->offset
;
2182 info
->bytes
+= left_info
->bytes
;
2183 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
2190 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl
*ctl
,
2191 struct btrfs_free_space
*info
,
2194 struct btrfs_free_space
*bitmap
;
2197 const u64 end
= info
->offset
+ info
->bytes
;
2198 const u64 bitmap_offset
= offset_to_bitmap(ctl
, end
);
2201 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2205 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, end
);
2206 j
= find_next_zero_bit(bitmap
->bitmap
, BITS_PER_BITMAP
, i
);
2209 bytes
= (j
- i
) * ctl
->unit
;
2210 info
->bytes
+= bytes
;
2213 bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2215 __bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2218 free_bitmap(ctl
, bitmap
);
2223 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl
*ctl
,
2224 struct btrfs_free_space
*info
,
2227 struct btrfs_free_space
*bitmap
;
2231 unsigned long prev_j
;
2234 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
);
2235 /* If we're on a boundary, try the previous logical bitmap. */
2236 if (bitmap_offset
== info
->offset
) {
2237 if (info
->offset
== 0)
2239 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
- 1);
2242 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2246 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, info
->offset
) - 1;
2248 prev_j
= (unsigned long)-1;
2249 for_each_clear_bit_from(j
, bitmap
->bitmap
, BITS_PER_BITMAP
) {
2257 if (prev_j
== (unsigned long)-1)
2258 bytes
= (i
+ 1) * ctl
->unit
;
2260 bytes
= (i
- prev_j
) * ctl
->unit
;
2262 info
->offset
-= bytes
;
2263 info
->bytes
+= bytes
;
2266 bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2268 __bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2271 free_bitmap(ctl
, bitmap
);
2277 * We prefer always to allocate from extent entries, both for clustered and
2278 * non-clustered allocation requests. So when attempting to add a new extent
2279 * entry, try to see if there's adjacent free space in bitmap entries, and if
2280 * there is, migrate that space from the bitmaps to the extent.
2281 * Like this we get better chances of satisfying space allocation requests
2282 * because we attempt to satisfy them based on a single cache entry, and never
2283 * on 2 or more entries - even if the entries represent a contiguous free space
2284 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2287 static void steal_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
2288 struct btrfs_free_space
*info
,
2292 * Only work with disconnected entries, as we can change their offset,
2293 * and must be extent entries.
2295 ASSERT(!info
->bitmap
);
2296 ASSERT(RB_EMPTY_NODE(&info
->offset_index
));
2298 if (ctl
->total_bitmaps
> 0) {
2300 bool stole_front
= false;
2302 stole_end
= steal_from_bitmap_to_end(ctl
, info
, update_stat
);
2303 if (ctl
->total_bitmaps
> 0)
2304 stole_front
= steal_from_bitmap_to_front(ctl
, info
,
2307 if (stole_end
|| stole_front
)
2308 try_merge_free_space(ctl
, info
, update_stat
);
2312 int __btrfs_add_free_space(struct btrfs_fs_info
*fs_info
,
2313 struct btrfs_free_space_ctl
*ctl
,
2314 u64 offset
, u64 bytes
)
2316 struct btrfs_free_space
*info
;
2319 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
2323 info
->offset
= offset
;
2324 info
->bytes
= bytes
;
2325 RB_CLEAR_NODE(&info
->offset_index
);
2327 spin_lock(&ctl
->tree_lock
);
2329 if (try_merge_free_space(ctl
, info
, true))
2333 * There was no extent directly to the left or right of this new
2334 * extent then we know we're going to have to allocate a new extent, so
2335 * before we do that see if we need to drop this into a bitmap
2337 ret
= insert_into_bitmap(ctl
, info
);
2346 * Only steal free space from adjacent bitmaps if we're sure we're not
2347 * going to add the new free space to existing bitmap entries - because
2348 * that would mean unnecessary work that would be reverted. Therefore
2349 * attempt to steal space from bitmaps if we're adding an extent entry.
2351 steal_from_bitmap(ctl
, info
, true);
2353 ret
= link_free_space(ctl
, info
);
2355 kmem_cache_free(btrfs_free_space_cachep
, info
);
2357 spin_unlock(&ctl
->tree_lock
);
2360 btrfs_crit(fs_info
, "unable to add free space :%d", ret
);
2361 ASSERT(ret
!= -EEXIST
);
2367 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
2368 u64 offset
, u64 bytes
)
2370 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2371 struct btrfs_free_space
*info
;
2373 bool re_search
= false;
2375 spin_lock(&ctl
->tree_lock
);
2382 info
= tree_search_offset(ctl
, offset
, 0, 0);
2385 * oops didn't find an extent that matched the space we wanted
2386 * to remove, look for a bitmap instead
2388 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2392 * If we found a partial bit of our free space in a
2393 * bitmap but then couldn't find the other part this may
2394 * be a problem, so WARN about it.
2402 if (!info
->bitmap
) {
2403 unlink_free_space(ctl
, info
);
2404 if (offset
== info
->offset
) {
2405 u64 to_free
= min(bytes
, info
->bytes
);
2407 info
->bytes
-= to_free
;
2408 info
->offset
+= to_free
;
2410 ret
= link_free_space(ctl
, info
);
2413 kmem_cache_free(btrfs_free_space_cachep
, info
);
2420 u64 old_end
= info
->bytes
+ info
->offset
;
2422 info
->bytes
= offset
- info
->offset
;
2423 ret
= link_free_space(ctl
, info
);
2428 /* Not enough bytes in this entry to satisfy us */
2429 if (old_end
< offset
+ bytes
) {
2430 bytes
-= old_end
- offset
;
2433 } else if (old_end
== offset
+ bytes
) {
2437 spin_unlock(&ctl
->tree_lock
);
2439 ret
= btrfs_add_free_space(block_group
, offset
+ bytes
,
2440 old_end
- (offset
+ bytes
));
2446 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
2447 if (ret
== -EAGAIN
) {
2452 spin_unlock(&ctl
->tree_lock
);
2457 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
2460 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2461 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2462 struct btrfs_free_space
*info
;
2466 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
2467 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
2468 if (info
->bytes
>= bytes
&& !block_group
->ro
)
2470 btrfs_crit(fs_info
, "entry offset %llu, bytes %llu, bitmap %s",
2471 info
->offset
, info
->bytes
,
2472 (info
->bitmap
) ? "yes" : "no");
2474 btrfs_info(fs_info
, "block group has cluster?: %s",
2475 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2477 "%d blocks of free space at or bigger than bytes is", count
);
2480 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2482 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2483 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2485 spin_lock_init(&ctl
->tree_lock
);
2486 ctl
->unit
= fs_info
->sectorsize
;
2487 ctl
->start
= block_group
->key
.objectid
;
2488 ctl
->private = block_group
;
2489 ctl
->op
= &free_space_op
;
2490 INIT_LIST_HEAD(&ctl
->trimming_ranges
);
2491 mutex_init(&ctl
->cache_writeout_mutex
);
2494 * we only want to have 32k of ram per block group for keeping
2495 * track of free space, and if we pass 1/2 of that we want to
2496 * start converting things over to using bitmaps
2498 ctl
->extents_thresh
= (SZ_32K
/ 2) / sizeof(struct btrfs_free_space
);
2502 * for a given cluster, put all of its extents back into the free
2503 * space cache. If the block group passed doesn't match the block group
2504 * pointed to by the cluster, someone else raced in and freed the
2505 * cluster already. In that case, we just return without changing anything
2508 __btrfs_return_cluster_to_free_space(
2509 struct btrfs_block_group_cache
*block_group
,
2510 struct btrfs_free_cluster
*cluster
)
2512 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2513 struct btrfs_free_space
*entry
;
2514 struct rb_node
*node
;
2516 spin_lock(&cluster
->lock
);
2517 if (cluster
->block_group
!= block_group
)
2520 cluster
->block_group
= NULL
;
2521 cluster
->window_start
= 0;
2522 list_del_init(&cluster
->block_group_list
);
2524 node
= rb_first(&cluster
->root
);
2528 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2529 node
= rb_next(&entry
->offset_index
);
2530 rb_erase(&entry
->offset_index
, &cluster
->root
);
2531 RB_CLEAR_NODE(&entry
->offset_index
);
2533 bitmap
= (entry
->bitmap
!= NULL
);
2535 try_merge_free_space(ctl
, entry
, false);
2536 steal_from_bitmap(ctl
, entry
, false);
2538 tree_insert_offset(&ctl
->free_space_offset
,
2539 entry
->offset
, &entry
->offset_index
, bitmap
);
2541 cluster
->root
= RB_ROOT
;
2544 spin_unlock(&cluster
->lock
);
2545 btrfs_put_block_group(block_group
);
2549 static void __btrfs_remove_free_space_cache_locked(
2550 struct btrfs_free_space_ctl
*ctl
)
2552 struct btrfs_free_space
*info
;
2553 struct rb_node
*node
;
2555 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2556 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2557 if (!info
->bitmap
) {
2558 unlink_free_space(ctl
, info
);
2559 kmem_cache_free(btrfs_free_space_cachep
, info
);
2561 free_bitmap(ctl
, info
);
2564 cond_resched_lock(&ctl
->tree_lock
);
2568 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2570 spin_lock(&ctl
->tree_lock
);
2571 __btrfs_remove_free_space_cache_locked(ctl
);
2572 spin_unlock(&ctl
->tree_lock
);
2575 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2577 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2578 struct btrfs_free_cluster
*cluster
;
2579 struct list_head
*head
;
2581 spin_lock(&ctl
->tree_lock
);
2582 while ((head
= block_group
->cluster_list
.next
) !=
2583 &block_group
->cluster_list
) {
2584 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2587 WARN_ON(cluster
->block_group
!= block_group
);
2588 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2590 cond_resched_lock(&ctl
->tree_lock
);
2592 __btrfs_remove_free_space_cache_locked(ctl
);
2593 spin_unlock(&ctl
->tree_lock
);
2597 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2598 u64 offset
, u64 bytes
, u64 empty_size
,
2599 u64
*max_extent_size
)
2601 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2602 struct btrfs_free_space
*entry
= NULL
;
2603 u64 bytes_search
= bytes
+ empty_size
;
2606 u64 align_gap_len
= 0;
2608 spin_lock(&ctl
->tree_lock
);
2609 entry
= find_free_space(ctl
, &offset
, &bytes_search
,
2610 block_group
->full_stripe_len
, max_extent_size
);
2615 if (entry
->bitmap
) {
2616 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2618 free_bitmap(ctl
, entry
);
2620 unlink_free_space(ctl
, entry
);
2621 align_gap_len
= offset
- entry
->offset
;
2622 align_gap
= entry
->offset
;
2624 entry
->offset
= offset
+ bytes
;
2625 WARN_ON(entry
->bytes
< bytes
+ align_gap_len
);
2627 entry
->bytes
-= bytes
+ align_gap_len
;
2629 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2631 link_free_space(ctl
, entry
);
2634 spin_unlock(&ctl
->tree_lock
);
2637 __btrfs_add_free_space(block_group
->fs_info
, ctl
,
2638 align_gap
, align_gap_len
);
2643 * given a cluster, put all of its extents back into the free space
2644 * cache. If a block group is passed, this function will only free
2645 * a cluster that belongs to the passed block group.
2647 * Otherwise, it'll get a reference on the block group pointed to by the
2648 * cluster and remove the cluster from it.
2650 int btrfs_return_cluster_to_free_space(
2651 struct btrfs_block_group_cache
*block_group
,
2652 struct btrfs_free_cluster
*cluster
)
2654 struct btrfs_free_space_ctl
*ctl
;
2657 /* first, get a safe pointer to the block group */
2658 spin_lock(&cluster
->lock
);
2660 block_group
= cluster
->block_group
;
2662 spin_unlock(&cluster
->lock
);
2665 } else if (cluster
->block_group
!= block_group
) {
2666 /* someone else has already freed it don't redo their work */
2667 spin_unlock(&cluster
->lock
);
2670 atomic_inc(&block_group
->count
);
2671 spin_unlock(&cluster
->lock
);
2673 ctl
= block_group
->free_space_ctl
;
2675 /* now return any extents the cluster had on it */
2676 spin_lock(&ctl
->tree_lock
);
2677 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2678 spin_unlock(&ctl
->tree_lock
);
2680 /* finally drop our ref */
2681 btrfs_put_block_group(block_group
);
2685 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2686 struct btrfs_free_cluster
*cluster
,
2687 struct btrfs_free_space
*entry
,
2688 u64 bytes
, u64 min_start
,
2689 u64
*max_extent_size
)
2691 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2693 u64 search_start
= cluster
->window_start
;
2694 u64 search_bytes
= bytes
;
2697 search_start
= min_start
;
2698 search_bytes
= bytes
;
2700 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
, true);
2702 if (search_bytes
> *max_extent_size
)
2703 *max_extent_size
= search_bytes
;
2708 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2714 * given a cluster, try to allocate 'bytes' from it, returns 0
2715 * if it couldn't find anything suitably large, or a logical disk offset
2716 * if things worked out
2718 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2719 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2720 u64 min_start
, u64
*max_extent_size
)
2722 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2723 struct btrfs_free_space
*entry
= NULL
;
2724 struct rb_node
*node
;
2727 spin_lock(&cluster
->lock
);
2728 if (bytes
> cluster
->max_size
)
2731 if (cluster
->block_group
!= block_group
)
2734 node
= rb_first(&cluster
->root
);
2738 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2740 if (entry
->bytes
< bytes
&& entry
->bytes
> *max_extent_size
)
2741 *max_extent_size
= entry
->bytes
;
2743 if (entry
->bytes
< bytes
||
2744 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2745 node
= rb_next(&entry
->offset_index
);
2748 entry
= rb_entry(node
, struct btrfs_free_space
,
2753 if (entry
->bitmap
) {
2754 ret
= btrfs_alloc_from_bitmap(block_group
,
2755 cluster
, entry
, bytes
,
2756 cluster
->window_start
,
2759 node
= rb_next(&entry
->offset_index
);
2762 entry
= rb_entry(node
, struct btrfs_free_space
,
2766 cluster
->window_start
+= bytes
;
2768 ret
= entry
->offset
;
2770 entry
->offset
+= bytes
;
2771 entry
->bytes
-= bytes
;
2774 if (entry
->bytes
== 0)
2775 rb_erase(&entry
->offset_index
, &cluster
->root
);
2779 spin_unlock(&cluster
->lock
);
2784 spin_lock(&ctl
->tree_lock
);
2786 ctl
->free_space
-= bytes
;
2787 if (entry
->bytes
== 0) {
2788 ctl
->free_extents
--;
2789 if (entry
->bitmap
) {
2790 kfree(entry
->bitmap
);
2791 ctl
->total_bitmaps
--;
2792 ctl
->op
->recalc_thresholds(ctl
);
2794 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2797 spin_unlock(&ctl
->tree_lock
);
2802 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2803 struct btrfs_free_space
*entry
,
2804 struct btrfs_free_cluster
*cluster
,
2805 u64 offset
, u64 bytes
,
2806 u64 cont1_bytes
, u64 min_bytes
)
2808 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2809 unsigned long next_zero
;
2811 unsigned long want_bits
;
2812 unsigned long min_bits
;
2813 unsigned long found_bits
;
2814 unsigned long max_bits
= 0;
2815 unsigned long start
= 0;
2816 unsigned long total_found
= 0;
2819 i
= offset_to_bit(entry
->offset
, ctl
->unit
,
2820 max_t(u64
, offset
, entry
->offset
));
2821 want_bits
= bytes_to_bits(bytes
, ctl
->unit
);
2822 min_bits
= bytes_to_bits(min_bytes
, ctl
->unit
);
2825 * Don't bother looking for a cluster in this bitmap if it's heavily
2828 if (entry
->max_extent_size
&&
2829 entry
->max_extent_size
< cont1_bytes
)
2833 for_each_set_bit_from(i
, entry
->bitmap
, BITS_PER_BITMAP
) {
2834 next_zero
= find_next_zero_bit(entry
->bitmap
,
2835 BITS_PER_BITMAP
, i
);
2836 if (next_zero
- i
>= min_bits
) {
2837 found_bits
= next_zero
- i
;
2838 if (found_bits
> max_bits
)
2839 max_bits
= found_bits
;
2842 if (next_zero
- i
> max_bits
)
2843 max_bits
= next_zero
- i
;
2848 entry
->max_extent_size
= (u64
)max_bits
* ctl
->unit
;
2854 cluster
->max_size
= 0;
2857 total_found
+= found_bits
;
2859 if (cluster
->max_size
< found_bits
* ctl
->unit
)
2860 cluster
->max_size
= found_bits
* ctl
->unit
;
2862 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2867 cluster
->window_start
= start
* ctl
->unit
+ entry
->offset
;
2868 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2869 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2870 &entry
->offset_index
, 1);
2871 ASSERT(!ret
); /* -EEXIST; Logic error */
2873 trace_btrfs_setup_cluster(block_group
, cluster
,
2874 total_found
* ctl
->unit
, 1);
2879 * This searches the block group for just extents to fill the cluster with.
2880 * Try to find a cluster with at least bytes total bytes, at least one
2881 * extent of cont1_bytes, and other clusters of at least min_bytes.
2884 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2885 struct btrfs_free_cluster
*cluster
,
2886 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2887 u64 cont1_bytes
, u64 min_bytes
)
2889 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2890 struct btrfs_free_space
*first
= NULL
;
2891 struct btrfs_free_space
*entry
= NULL
;
2892 struct btrfs_free_space
*last
;
2893 struct rb_node
*node
;
2898 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2903 * We don't want bitmaps, so just move along until we find a normal
2906 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2907 if (entry
->bitmap
&& list_empty(&entry
->list
))
2908 list_add_tail(&entry
->list
, bitmaps
);
2909 node
= rb_next(&entry
->offset_index
);
2912 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2915 window_free
= entry
->bytes
;
2916 max_extent
= entry
->bytes
;
2920 for (node
= rb_next(&entry
->offset_index
); node
;
2921 node
= rb_next(&entry
->offset_index
)) {
2922 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2924 if (entry
->bitmap
) {
2925 if (list_empty(&entry
->list
))
2926 list_add_tail(&entry
->list
, bitmaps
);
2930 if (entry
->bytes
< min_bytes
)
2934 window_free
+= entry
->bytes
;
2935 if (entry
->bytes
> max_extent
)
2936 max_extent
= entry
->bytes
;
2939 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2942 cluster
->window_start
= first
->offset
;
2944 node
= &first
->offset_index
;
2947 * now we've found our entries, pull them out of the free space
2948 * cache and put them into the cluster rbtree
2953 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2954 node
= rb_next(&entry
->offset_index
);
2955 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2958 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2959 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2960 &entry
->offset_index
, 0);
2961 total_size
+= entry
->bytes
;
2962 ASSERT(!ret
); /* -EEXIST; Logic error */
2963 } while (node
&& entry
!= last
);
2965 cluster
->max_size
= max_extent
;
2966 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2971 * This specifically looks for bitmaps that may work in the cluster, we assume
2972 * that we have already failed to find extents that will work.
2975 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2976 struct btrfs_free_cluster
*cluster
,
2977 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2978 u64 cont1_bytes
, u64 min_bytes
)
2980 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2981 struct btrfs_free_space
*entry
= NULL
;
2983 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
2985 if (ctl
->total_bitmaps
== 0)
2989 * The bitmap that covers offset won't be in the list unless offset
2990 * is just its start offset.
2992 if (!list_empty(bitmaps
))
2993 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
2995 if (!entry
|| entry
->offset
!= bitmap_offset
) {
2996 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2997 if (entry
&& list_empty(&entry
->list
))
2998 list_add(&entry
->list
, bitmaps
);
3001 list_for_each_entry(entry
, bitmaps
, list
) {
3002 if (entry
->bytes
< bytes
)
3004 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
3005 bytes
, cont1_bytes
, min_bytes
);
3011 * The bitmaps list has all the bitmaps that record free space
3012 * starting after offset, so no more search is required.
3018 * here we try to find a cluster of blocks in a block group. The goal
3019 * is to find at least bytes+empty_size.
3020 * We might not find them all in one contiguous area.
3022 * returns zero and sets up cluster if things worked out, otherwise
3023 * it returns -enospc
3025 int btrfs_find_space_cluster(struct btrfs_fs_info
*fs_info
,
3026 struct btrfs_block_group_cache
*block_group
,
3027 struct btrfs_free_cluster
*cluster
,
3028 u64 offset
, u64 bytes
, u64 empty_size
)
3030 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3031 struct btrfs_free_space
*entry
, *tmp
;
3038 * Choose the minimum extent size we'll require for this
3039 * cluster. For SSD_SPREAD, don't allow any fragmentation.
3040 * For metadata, allow allocates with smaller extents. For
3041 * data, keep it dense.
3043 if (btrfs_test_opt(fs_info
, SSD_SPREAD
)) {
3044 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
3045 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3046 cont1_bytes
= bytes
;
3047 min_bytes
= fs_info
->sectorsize
;
3049 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
3050 min_bytes
= fs_info
->sectorsize
;
3053 spin_lock(&ctl
->tree_lock
);
3056 * If we know we don't have enough space to make a cluster don't even
3057 * bother doing all the work to try and find one.
3059 if (ctl
->free_space
< bytes
) {
3060 spin_unlock(&ctl
->tree_lock
);
3064 spin_lock(&cluster
->lock
);
3066 /* someone already found a cluster, hooray */
3067 if (cluster
->block_group
) {
3072 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
3075 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
3077 cont1_bytes
, min_bytes
);
3079 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
3080 offset
, bytes
+ empty_size
,
3081 cont1_bytes
, min_bytes
);
3083 /* Clear our temporary list */
3084 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
3085 list_del_init(&entry
->list
);
3088 atomic_inc(&block_group
->count
);
3089 list_add_tail(&cluster
->block_group_list
,
3090 &block_group
->cluster_list
);
3091 cluster
->block_group
= block_group
;
3093 trace_btrfs_failed_cluster_setup(block_group
);
3096 spin_unlock(&cluster
->lock
);
3097 spin_unlock(&ctl
->tree_lock
);
3103 * simple code to zero out a cluster
3105 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
3107 spin_lock_init(&cluster
->lock
);
3108 spin_lock_init(&cluster
->refill_lock
);
3109 cluster
->root
= RB_ROOT
;
3110 cluster
->max_size
= 0;
3111 cluster
->fragmented
= false;
3112 INIT_LIST_HEAD(&cluster
->block_group_list
);
3113 cluster
->block_group
= NULL
;
3116 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
3117 u64
*total_trimmed
, u64 start
, u64 bytes
,
3118 u64 reserved_start
, u64 reserved_bytes
,
3119 struct btrfs_trim_range
*trim_entry
)
3121 struct btrfs_space_info
*space_info
= block_group
->space_info
;
3122 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3123 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3128 spin_lock(&space_info
->lock
);
3129 spin_lock(&block_group
->lock
);
3130 if (!block_group
->ro
) {
3131 block_group
->reserved
+= reserved_bytes
;
3132 space_info
->bytes_reserved
+= reserved_bytes
;
3135 spin_unlock(&block_group
->lock
);
3136 spin_unlock(&space_info
->lock
);
3138 ret
= btrfs_discard_extent(fs_info
, start
, bytes
, &trimmed
);
3140 *total_trimmed
+= trimmed
;
3142 mutex_lock(&ctl
->cache_writeout_mutex
);
3143 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
3144 list_del(&trim_entry
->list
);
3145 mutex_unlock(&ctl
->cache_writeout_mutex
);
3148 spin_lock(&space_info
->lock
);
3149 spin_lock(&block_group
->lock
);
3150 if (block_group
->ro
)
3151 space_info
->bytes_readonly
+= reserved_bytes
;
3152 block_group
->reserved
-= reserved_bytes
;
3153 space_info
->bytes_reserved
-= reserved_bytes
;
3154 spin_unlock(&space_info
->lock
);
3155 spin_unlock(&block_group
->lock
);
3161 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
3162 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3164 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3165 struct btrfs_free_space
*entry
;
3166 struct rb_node
*node
;
3172 while (start
< end
) {
3173 struct btrfs_trim_range trim_entry
;
3175 mutex_lock(&ctl
->cache_writeout_mutex
);
3176 spin_lock(&ctl
->tree_lock
);
3178 if (ctl
->free_space
< minlen
) {
3179 spin_unlock(&ctl
->tree_lock
);
3180 mutex_unlock(&ctl
->cache_writeout_mutex
);
3184 entry
= tree_search_offset(ctl
, start
, 0, 1);
3186 spin_unlock(&ctl
->tree_lock
);
3187 mutex_unlock(&ctl
->cache_writeout_mutex
);
3192 while (entry
->bitmap
) {
3193 node
= rb_next(&entry
->offset_index
);
3195 spin_unlock(&ctl
->tree_lock
);
3196 mutex_unlock(&ctl
->cache_writeout_mutex
);
3199 entry
= rb_entry(node
, struct btrfs_free_space
,
3203 if (entry
->offset
>= end
) {
3204 spin_unlock(&ctl
->tree_lock
);
3205 mutex_unlock(&ctl
->cache_writeout_mutex
);
3209 extent_start
= entry
->offset
;
3210 extent_bytes
= entry
->bytes
;
3211 start
= max(start
, extent_start
);
3212 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
3213 if (bytes
< minlen
) {
3214 spin_unlock(&ctl
->tree_lock
);
3215 mutex_unlock(&ctl
->cache_writeout_mutex
);
3219 unlink_free_space(ctl
, entry
);
3220 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3222 spin_unlock(&ctl
->tree_lock
);
3223 trim_entry
.start
= extent_start
;
3224 trim_entry
.bytes
= extent_bytes
;
3225 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3226 mutex_unlock(&ctl
->cache_writeout_mutex
);
3228 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3229 extent_start
, extent_bytes
, &trim_entry
);
3235 if (fatal_signal_pending(current
)) {
3246 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
3247 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3249 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3250 struct btrfs_free_space
*entry
;
3254 u64 offset
= offset_to_bitmap(ctl
, start
);
3256 while (offset
< end
) {
3257 bool next_bitmap
= false;
3258 struct btrfs_trim_range trim_entry
;
3260 mutex_lock(&ctl
->cache_writeout_mutex
);
3261 spin_lock(&ctl
->tree_lock
);
3263 if (ctl
->free_space
< minlen
) {
3264 spin_unlock(&ctl
->tree_lock
);
3265 mutex_unlock(&ctl
->cache_writeout_mutex
);
3269 entry
= tree_search_offset(ctl
, offset
, 1, 0);
3271 spin_unlock(&ctl
->tree_lock
);
3272 mutex_unlock(&ctl
->cache_writeout_mutex
);
3278 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
, false);
3279 if (ret2
|| start
>= end
) {
3280 spin_unlock(&ctl
->tree_lock
);
3281 mutex_unlock(&ctl
->cache_writeout_mutex
);
3286 bytes
= min(bytes
, end
- start
);
3287 if (bytes
< minlen
) {
3288 spin_unlock(&ctl
->tree_lock
);
3289 mutex_unlock(&ctl
->cache_writeout_mutex
);
3293 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
3294 if (entry
->bytes
== 0)
3295 free_bitmap(ctl
, entry
);
3297 spin_unlock(&ctl
->tree_lock
);
3298 trim_entry
.start
= start
;
3299 trim_entry
.bytes
= bytes
;
3300 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3301 mutex_unlock(&ctl
->cache_writeout_mutex
);
3303 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3304 start
, bytes
, &trim_entry
);
3309 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3312 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
3313 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3316 if (fatal_signal_pending(current
)) {
3327 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache
*cache
)
3329 atomic_inc(&cache
->trimming
);
3332 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache
*block_group
)
3334 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3335 struct extent_map_tree
*em_tree
;
3336 struct extent_map
*em
;
3339 spin_lock(&block_group
->lock
);
3340 cleanup
= (atomic_dec_and_test(&block_group
->trimming
) &&
3341 block_group
->removed
);
3342 spin_unlock(&block_group
->lock
);
3345 mutex_lock(&fs_info
->chunk_mutex
);
3346 em_tree
= &fs_info
->mapping_tree
.map_tree
;
3347 write_lock(&em_tree
->lock
);
3348 em
= lookup_extent_mapping(em_tree
, block_group
->key
.objectid
,
3350 BUG_ON(!em
); /* logic error, can't happen */
3352 * remove_extent_mapping() will delete us from the pinned_chunks
3353 * list, which is protected by the chunk mutex.
3355 remove_extent_mapping(em_tree
, em
);
3356 write_unlock(&em_tree
->lock
);
3357 mutex_unlock(&fs_info
->chunk_mutex
);
3359 /* once for us and once for the tree */
3360 free_extent_map(em
);
3361 free_extent_map(em
);
3364 * We've left one free space entry and other tasks trimming
3365 * this block group have left 1 entry each one. Free them.
3367 __btrfs_remove_free_space_cache(block_group
->free_space_ctl
);
3371 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
3372 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
3378 spin_lock(&block_group
->lock
);
3379 if (block_group
->removed
) {
3380 spin_unlock(&block_group
->lock
);
3383 btrfs_get_block_group_trimming(block_group
);
3384 spin_unlock(&block_group
->lock
);
3386 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
3390 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
3392 btrfs_put_block_group_trimming(block_group
);
3397 * Find the left-most item in the cache tree, and then return the
3398 * smallest inode number in the item.
3400 * Note: the returned inode number may not be the smallest one in
3401 * the tree, if the left-most item is a bitmap.
3403 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
3405 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
3406 struct btrfs_free_space
*entry
= NULL
;
3409 spin_lock(&ctl
->tree_lock
);
3411 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
3414 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
3415 struct btrfs_free_space
, offset_index
);
3417 if (!entry
->bitmap
) {
3418 ino
= entry
->offset
;
3420 unlink_free_space(ctl
, entry
);
3424 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3426 link_free_space(ctl
, entry
);
3432 ret
= search_bitmap(ctl
, entry
, &offset
, &count
, true);
3433 /* Logic error; Should be empty if it can't find anything */
3437 bitmap_clear_bits(ctl
, entry
, offset
, 1);
3438 if (entry
->bytes
== 0)
3439 free_bitmap(ctl
, entry
);
3442 spin_unlock(&ctl
->tree_lock
);
3447 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
3448 struct btrfs_path
*path
)
3450 struct inode
*inode
= NULL
;
3452 spin_lock(&root
->ino_cache_lock
);
3453 if (root
->ino_cache_inode
)
3454 inode
= igrab(root
->ino_cache_inode
);
3455 spin_unlock(&root
->ino_cache_lock
);
3459 inode
= __lookup_free_space_inode(root
, path
, 0);
3463 spin_lock(&root
->ino_cache_lock
);
3464 if (!btrfs_fs_closing(root
->fs_info
))
3465 root
->ino_cache_inode
= igrab(inode
);
3466 spin_unlock(&root
->ino_cache_lock
);
3471 int create_free_ino_inode(struct btrfs_root
*root
,
3472 struct btrfs_trans_handle
*trans
,
3473 struct btrfs_path
*path
)
3475 return __create_free_space_inode(root
, trans
, path
,
3476 BTRFS_FREE_INO_OBJECTID
, 0);
3479 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
3481 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3482 struct btrfs_path
*path
;
3483 struct inode
*inode
;
3485 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
3487 if (!btrfs_test_opt(fs_info
, INODE_MAP_CACHE
))
3491 * If we're unmounting then just return, since this does a search on the
3492 * normal root and not the commit root and we could deadlock.
3494 if (btrfs_fs_closing(fs_info
))
3497 path
= btrfs_alloc_path();
3501 inode
= lookup_free_ino_inode(root
, path
);
3505 if (root_gen
!= BTRFS_I(inode
)->generation
)
3508 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
3512 "failed to load free ino cache for root %llu",
3513 root
->root_key
.objectid
);
3517 btrfs_free_path(path
);
3521 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
3522 struct btrfs_trans_handle
*trans
,
3523 struct btrfs_path
*path
,
3524 struct inode
*inode
)
3526 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3527 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3529 struct btrfs_io_ctl io_ctl
;
3530 bool release_metadata
= true;
3532 if (!btrfs_test_opt(fs_info
, INODE_MAP_CACHE
))
3535 memset(&io_ctl
, 0, sizeof(io_ctl
));
3536 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, &io_ctl
, trans
);
3539 * At this point writepages() didn't error out, so our metadata
3540 * reservation is released when the writeback finishes, at
3541 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
3542 * with or without an error.
3544 release_metadata
= false;
3545 ret
= btrfs_wait_cache_io_root(root
, trans
, &io_ctl
, path
);
3549 if (release_metadata
)
3550 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
3554 "failed to write free ino cache for root %llu",
3555 root
->root_key
.objectid
);
3562 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3564 * Use this if you need to make a bitmap or extent entry specifically, it
3565 * doesn't do any of the merging that add_free_space does, this acts a lot like
3566 * how the free space cache loading stuff works, so you can get really weird
3569 int test_add_free_space_entry(struct btrfs_block_group_cache
*cache
,
3570 u64 offset
, u64 bytes
, bool bitmap
)
3572 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3573 struct btrfs_free_space
*info
= NULL
, *bitmap_info
;
3580 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
3586 spin_lock(&ctl
->tree_lock
);
3587 info
->offset
= offset
;
3588 info
->bytes
= bytes
;
3589 info
->max_extent_size
= 0;
3590 ret
= link_free_space(ctl
, info
);
3591 spin_unlock(&ctl
->tree_lock
);
3593 kmem_cache_free(btrfs_free_space_cachep
, info
);
3598 map
= kzalloc(PAGE_SIZE
, GFP_NOFS
);
3600 kmem_cache_free(btrfs_free_space_cachep
, info
);
3605 spin_lock(&ctl
->tree_lock
);
3606 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3611 add_new_bitmap(ctl
, info
, offset
);
3616 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
3618 bytes
-= bytes_added
;
3619 offset
+= bytes_added
;
3620 spin_unlock(&ctl
->tree_lock
);
3626 kmem_cache_free(btrfs_free_space_cachep
, info
);
3633 * Checks to see if the given range is in the free space cache. This is really
3634 * just used to check the absence of space, so if there is free space in the
3635 * range at all we will return 1.
3637 int test_check_exists(struct btrfs_block_group_cache
*cache
,
3638 u64 offset
, u64 bytes
)
3640 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3641 struct btrfs_free_space
*info
;
3644 spin_lock(&ctl
->tree_lock
);
3645 info
= tree_search_offset(ctl
, offset
, 0, 0);
3647 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3655 u64 bit_off
, bit_bytes
;
3657 struct btrfs_free_space
*tmp
;
3660 bit_bytes
= ctl
->unit
;
3661 ret
= search_bitmap(ctl
, info
, &bit_off
, &bit_bytes
, false);
3663 if (bit_off
== offset
) {
3666 } else if (bit_off
> offset
&&
3667 offset
+ bytes
> bit_off
) {
3673 n
= rb_prev(&info
->offset_index
);
3675 tmp
= rb_entry(n
, struct btrfs_free_space
,
3677 if (tmp
->offset
+ tmp
->bytes
< offset
)
3679 if (offset
+ bytes
< tmp
->offset
) {
3680 n
= rb_prev(&tmp
->offset_index
);
3687 n
= rb_next(&info
->offset_index
);
3689 tmp
= rb_entry(n
, struct btrfs_free_space
,
3691 if (offset
+ bytes
< tmp
->offset
)
3693 if (tmp
->offset
+ tmp
->bytes
< offset
) {
3694 n
= rb_next(&tmp
->offset_index
);
3705 if (info
->offset
== offset
) {
3710 if (offset
> info
->offset
&& offset
< info
->offset
+ info
->bytes
)
3713 spin_unlock(&ctl
->tree_lock
);
3716 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */