2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
32 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
33 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
35 struct btrfs_trim_range
{
38 struct list_head list
;
41 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
42 struct btrfs_free_space
*info
);
43 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
44 struct btrfs_free_space
*info
);
46 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
47 struct btrfs_path
*path
,
51 struct btrfs_key location
;
52 struct btrfs_disk_key disk_key
;
53 struct btrfs_free_space_header
*header
;
54 struct extent_buffer
*leaf
;
55 struct inode
*inode
= NULL
;
58 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
62 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
66 btrfs_release_path(path
);
67 return ERR_PTR(-ENOENT
);
70 leaf
= path
->nodes
[0];
71 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
72 struct btrfs_free_space_header
);
73 btrfs_free_space_key(leaf
, header
, &disk_key
);
74 btrfs_disk_key_to_cpu(&location
, &disk_key
);
75 btrfs_release_path(path
);
77 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
79 return ERR_PTR(-ENOENT
);
82 if (is_bad_inode(inode
)) {
84 return ERR_PTR(-ENOENT
);
87 mapping_set_gfp_mask(inode
->i_mapping
,
88 mapping_gfp_constraint(inode
->i_mapping
,
89 ~(__GFP_FS
| __GFP_HIGHMEM
)));
94 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
95 struct btrfs_block_group_cache
96 *block_group
, struct btrfs_path
*path
)
98 struct inode
*inode
= NULL
;
99 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
101 spin_lock(&block_group
->lock
);
102 if (block_group
->inode
)
103 inode
= igrab(block_group
->inode
);
104 spin_unlock(&block_group
->lock
);
108 inode
= __lookup_free_space_inode(root
, path
,
109 block_group
->key
.objectid
);
113 spin_lock(&block_group
->lock
);
114 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
115 btrfs_info(root
->fs_info
,
116 "Old style space inode found, converting.");
117 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
118 BTRFS_INODE_NODATACOW
;
119 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
122 if (!block_group
->iref
) {
123 block_group
->inode
= igrab(inode
);
124 block_group
->iref
= 1;
126 spin_unlock(&block_group
->lock
);
131 static int __create_free_space_inode(struct btrfs_root
*root
,
132 struct btrfs_trans_handle
*trans
,
133 struct btrfs_path
*path
,
136 struct btrfs_key key
;
137 struct btrfs_disk_key disk_key
;
138 struct btrfs_free_space_header
*header
;
139 struct btrfs_inode_item
*inode_item
;
140 struct extent_buffer
*leaf
;
141 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
144 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
148 /* We inline crc's for the free disk space cache */
149 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
150 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
152 leaf
= path
->nodes
[0];
153 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
154 struct btrfs_inode_item
);
155 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
156 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
157 sizeof(*inode_item
));
158 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
159 btrfs_set_inode_size(leaf
, inode_item
, 0);
160 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
161 btrfs_set_inode_uid(leaf
, inode_item
, 0);
162 btrfs_set_inode_gid(leaf
, inode_item
, 0);
163 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
164 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
165 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
166 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
167 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
168 btrfs_mark_buffer_dirty(leaf
);
169 btrfs_release_path(path
);
171 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
174 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
175 sizeof(struct btrfs_free_space_header
));
177 btrfs_release_path(path
);
181 leaf
= path
->nodes
[0];
182 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
183 struct btrfs_free_space_header
);
184 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
185 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
186 btrfs_mark_buffer_dirty(leaf
);
187 btrfs_release_path(path
);
192 int create_free_space_inode(struct btrfs_root
*root
,
193 struct btrfs_trans_handle
*trans
,
194 struct btrfs_block_group_cache
*block_group
,
195 struct btrfs_path
*path
)
200 ret
= btrfs_find_free_objectid(root
, &ino
);
204 return __create_free_space_inode(root
, trans
, path
, ino
,
205 block_group
->key
.objectid
);
208 int btrfs_check_trunc_cache_free_space(struct btrfs_root
*root
,
209 struct btrfs_block_rsv
*rsv
)
214 /* 1 for slack space, 1 for updating the inode */
215 needed_bytes
= btrfs_calc_trunc_metadata_size(root
, 1) +
216 btrfs_calc_trans_metadata_size(root
, 1);
218 spin_lock(&rsv
->lock
);
219 if (rsv
->reserved
< needed_bytes
)
223 spin_unlock(&rsv
->lock
);
227 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
228 struct btrfs_trans_handle
*trans
,
229 struct btrfs_block_group_cache
*block_group
,
233 struct btrfs_path
*path
= btrfs_alloc_path();
243 mutex_lock(&trans
->transaction
->cache_write_mutex
);
244 if (!list_empty(&block_group
->io_list
)) {
245 list_del_init(&block_group
->io_list
);
247 btrfs_wait_cache_io(root
, trans
, block_group
,
248 &block_group
->io_ctl
, path
,
249 block_group
->key
.objectid
);
250 btrfs_put_block_group(block_group
);
254 * now that we've truncated the cache away, its no longer
257 spin_lock(&block_group
->lock
);
258 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
259 spin_unlock(&block_group
->lock
);
261 btrfs_free_path(path
);
263 btrfs_i_size_write(inode
, 0);
264 truncate_pagecache(inode
, 0);
267 * We don't need an orphan item because truncating the free space cache
268 * will never be split across transactions.
269 * We don't need to check for -EAGAIN because we're a free space
272 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
273 0, BTRFS_EXTENT_DATA_KEY
);
277 ret
= btrfs_update_inode(trans
, root
, inode
);
281 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
283 btrfs_abort_transaction(trans
, root
, ret
);
288 static int readahead_cache(struct inode
*inode
)
290 struct file_ra_state
*ra
;
291 unsigned long last_index
;
293 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
297 file_ra_state_init(ra
, inode
->i_mapping
);
298 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
300 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
307 static int io_ctl_init(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
308 struct btrfs_root
*root
, int write
)
313 num_pages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_CACHE_SIZE
);
315 if (btrfs_ino(inode
) != BTRFS_FREE_INO_OBJECTID
)
318 /* Make sure we can fit our crcs into the first page */
319 if (write
&& check_crcs
&&
320 (num_pages
* sizeof(u32
)) >= PAGE_CACHE_SIZE
)
323 memset(io_ctl
, 0, sizeof(struct btrfs_io_ctl
));
325 io_ctl
->pages
= kcalloc(num_pages
, sizeof(struct page
*), GFP_NOFS
);
329 io_ctl
->num_pages
= num_pages
;
331 io_ctl
->check_crcs
= check_crcs
;
332 io_ctl
->inode
= inode
;
337 static void io_ctl_free(struct btrfs_io_ctl
*io_ctl
)
339 kfree(io_ctl
->pages
);
340 io_ctl
->pages
= NULL
;
343 static void io_ctl_unmap_page(struct btrfs_io_ctl
*io_ctl
)
351 static void io_ctl_map_page(struct btrfs_io_ctl
*io_ctl
, int clear
)
353 ASSERT(io_ctl
->index
< io_ctl
->num_pages
);
354 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
355 io_ctl
->cur
= page_address(io_ctl
->page
);
356 io_ctl
->orig
= io_ctl
->cur
;
357 io_ctl
->size
= PAGE_CACHE_SIZE
;
359 memset(io_ctl
->cur
, 0, PAGE_CACHE_SIZE
);
362 static void io_ctl_drop_pages(struct btrfs_io_ctl
*io_ctl
)
366 io_ctl_unmap_page(io_ctl
);
368 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
369 if (io_ctl
->pages
[i
]) {
370 ClearPageChecked(io_ctl
->pages
[i
]);
371 unlock_page(io_ctl
->pages
[i
]);
372 page_cache_release(io_ctl
->pages
[i
]);
377 static int io_ctl_prepare_pages(struct btrfs_io_ctl
*io_ctl
, struct inode
*inode
,
381 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
384 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
385 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
387 io_ctl_drop_pages(io_ctl
);
390 io_ctl
->pages
[i
] = page
;
391 if (uptodate
&& !PageUptodate(page
)) {
392 btrfs_readpage(NULL
, page
);
394 if (page
->mapping
!= inode
->i_mapping
) {
395 btrfs_err(BTRFS_I(inode
)->root
->fs_info
,
396 "free space cache page truncated");
397 io_ctl_drop_pages(io_ctl
);
400 if (!PageUptodate(page
)) {
401 btrfs_err(BTRFS_I(inode
)->root
->fs_info
,
402 "error reading free space cache");
403 io_ctl_drop_pages(io_ctl
);
409 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
410 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
411 set_page_extent_mapped(io_ctl
->pages
[i
]);
417 static void io_ctl_set_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
421 io_ctl_map_page(io_ctl
, 1);
424 * Skip the csum areas. If we don't check crcs then we just have a
425 * 64bit chunk at the front of the first page.
427 if (io_ctl
->check_crcs
) {
428 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
429 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
431 io_ctl
->cur
+= sizeof(u64
);
432 io_ctl
->size
-= sizeof(u64
) * 2;
436 *val
= cpu_to_le64(generation
);
437 io_ctl
->cur
+= sizeof(u64
);
440 static int io_ctl_check_generation(struct btrfs_io_ctl
*io_ctl
, u64 generation
)
445 * Skip the crc area. If we don't check crcs then we just have a 64bit
446 * chunk at the front of the first page.
448 if (io_ctl
->check_crcs
) {
449 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
450 io_ctl
->size
-= sizeof(u64
) +
451 (sizeof(u32
) * io_ctl
->num_pages
);
453 io_ctl
->cur
+= sizeof(u64
);
454 io_ctl
->size
-= sizeof(u64
) * 2;
458 if (le64_to_cpu(*gen
) != generation
) {
459 btrfs_err_rl(io_ctl
->root
->fs_info
,
460 "space cache generation (%llu) does not match inode (%llu)",
462 io_ctl_unmap_page(io_ctl
);
465 io_ctl
->cur
+= sizeof(u64
);
469 static void io_ctl_set_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
475 if (!io_ctl
->check_crcs
) {
476 io_ctl_unmap_page(io_ctl
);
481 offset
= sizeof(u32
) * io_ctl
->num_pages
;
483 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
484 PAGE_CACHE_SIZE
- offset
);
485 btrfs_csum_final(crc
, (char *)&crc
);
486 io_ctl_unmap_page(io_ctl
);
487 tmp
= page_address(io_ctl
->pages
[0]);
492 static int io_ctl_check_crc(struct btrfs_io_ctl
*io_ctl
, int index
)
498 if (!io_ctl
->check_crcs
) {
499 io_ctl_map_page(io_ctl
, 0);
504 offset
= sizeof(u32
) * io_ctl
->num_pages
;
506 tmp
= page_address(io_ctl
->pages
[0]);
510 io_ctl_map_page(io_ctl
, 0);
511 crc
= btrfs_csum_data(io_ctl
->orig
+ offset
, crc
,
512 PAGE_CACHE_SIZE
- offset
);
513 btrfs_csum_final(crc
, (char *)&crc
);
515 btrfs_err_rl(io_ctl
->root
->fs_info
,
516 "csum mismatch on free space cache");
517 io_ctl_unmap_page(io_ctl
);
524 static int io_ctl_add_entry(struct btrfs_io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
527 struct btrfs_free_space_entry
*entry
;
533 entry
->offset
= cpu_to_le64(offset
);
534 entry
->bytes
= cpu_to_le64(bytes
);
535 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
536 BTRFS_FREE_SPACE_EXTENT
;
537 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
538 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
540 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
543 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
545 /* No more pages to map */
546 if (io_ctl
->index
>= io_ctl
->num_pages
)
549 /* map the next page */
550 io_ctl_map_page(io_ctl
, 1);
554 static int io_ctl_add_bitmap(struct btrfs_io_ctl
*io_ctl
, void *bitmap
)
560 * If we aren't at the start of the current page, unmap this one and
561 * map the next one if there is any left.
563 if (io_ctl
->cur
!= io_ctl
->orig
) {
564 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
565 if (io_ctl
->index
>= io_ctl
->num_pages
)
567 io_ctl_map_page(io_ctl
, 0);
570 memcpy(io_ctl
->cur
, bitmap
, PAGE_CACHE_SIZE
);
571 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
572 if (io_ctl
->index
< io_ctl
->num_pages
)
573 io_ctl_map_page(io_ctl
, 0);
577 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl
*io_ctl
)
580 * If we're not on the boundary we know we've modified the page and we
581 * need to crc the page.
583 if (io_ctl
->cur
!= io_ctl
->orig
)
584 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
586 io_ctl_unmap_page(io_ctl
);
588 while (io_ctl
->index
< io_ctl
->num_pages
) {
589 io_ctl_map_page(io_ctl
, 1);
590 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
594 static int io_ctl_read_entry(struct btrfs_io_ctl
*io_ctl
,
595 struct btrfs_free_space
*entry
, u8
*type
)
597 struct btrfs_free_space_entry
*e
;
601 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
607 entry
->offset
= le64_to_cpu(e
->offset
);
608 entry
->bytes
= le64_to_cpu(e
->bytes
);
610 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
611 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
613 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
616 io_ctl_unmap_page(io_ctl
);
621 static int io_ctl_read_bitmap(struct btrfs_io_ctl
*io_ctl
,
622 struct btrfs_free_space
*entry
)
626 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
630 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_CACHE_SIZE
);
631 io_ctl_unmap_page(io_ctl
);
637 * Since we attach pinned extents after the fact we can have contiguous sections
638 * of free space that are split up in entries. This poses a problem with the
639 * tree logging stuff since it could have allocated across what appears to be 2
640 * entries since we would have merged the entries when adding the pinned extents
641 * back to the free space cache. So run through the space cache that we just
642 * loaded and merge contiguous entries. This will make the log replay stuff not
643 * blow up and it will make for nicer allocator behavior.
645 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
647 struct btrfs_free_space
*e
, *prev
= NULL
;
651 spin_lock(&ctl
->tree_lock
);
652 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
653 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
656 if (e
->bitmap
|| prev
->bitmap
)
658 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
659 unlink_free_space(ctl
, prev
);
660 unlink_free_space(ctl
, e
);
661 prev
->bytes
+= e
->bytes
;
662 kmem_cache_free(btrfs_free_space_cachep
, e
);
663 link_free_space(ctl
, prev
);
665 spin_unlock(&ctl
->tree_lock
);
671 spin_unlock(&ctl
->tree_lock
);
674 static int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
675 struct btrfs_free_space_ctl
*ctl
,
676 struct btrfs_path
*path
, u64 offset
)
678 struct btrfs_free_space_header
*header
;
679 struct extent_buffer
*leaf
;
680 struct btrfs_io_ctl io_ctl
;
681 struct btrfs_key key
;
682 struct btrfs_free_space
*e
, *n
;
690 /* Nothing in the space cache, goodbye */
691 if (!i_size_read(inode
))
694 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
698 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
702 btrfs_release_path(path
);
708 leaf
= path
->nodes
[0];
709 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
710 struct btrfs_free_space_header
);
711 num_entries
= btrfs_free_space_entries(leaf
, header
);
712 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
713 generation
= btrfs_free_space_generation(leaf
, header
);
714 btrfs_release_path(path
);
716 if (!BTRFS_I(inode
)->generation
) {
717 btrfs_info(root
->fs_info
,
718 "The free space cache file (%llu) is invalid. skip it\n",
723 if (BTRFS_I(inode
)->generation
!= generation
) {
724 btrfs_err(root
->fs_info
,
725 "free space inode generation (%llu) "
726 "did not match free space cache generation (%llu)",
727 BTRFS_I(inode
)->generation
, generation
);
734 ret
= io_ctl_init(&io_ctl
, inode
, root
, 0);
738 ret
= readahead_cache(inode
);
742 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
746 ret
= io_ctl_check_crc(&io_ctl
, 0);
750 ret
= io_ctl_check_generation(&io_ctl
, generation
);
754 while (num_entries
) {
755 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
760 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
762 kmem_cache_free(btrfs_free_space_cachep
, e
);
767 kmem_cache_free(btrfs_free_space_cachep
, e
);
771 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
772 spin_lock(&ctl
->tree_lock
);
773 ret
= link_free_space(ctl
, e
);
774 spin_unlock(&ctl
->tree_lock
);
776 btrfs_err(root
->fs_info
,
777 "Duplicate entries in free space cache, dumping");
778 kmem_cache_free(btrfs_free_space_cachep
, e
);
784 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
787 btrfs_free_space_cachep
, e
);
790 spin_lock(&ctl
->tree_lock
);
791 ret
= link_free_space(ctl
, e
);
792 ctl
->total_bitmaps
++;
793 ctl
->op
->recalc_thresholds(ctl
);
794 spin_unlock(&ctl
->tree_lock
);
796 btrfs_err(root
->fs_info
,
797 "Duplicate entries in free space cache, dumping");
798 kmem_cache_free(btrfs_free_space_cachep
, e
);
801 list_add_tail(&e
->list
, &bitmaps
);
807 io_ctl_unmap_page(&io_ctl
);
810 * We add the bitmaps at the end of the entries in order that
811 * the bitmap entries are added to the cache.
813 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
814 list_del_init(&e
->list
);
815 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
820 io_ctl_drop_pages(&io_ctl
);
821 merge_space_tree(ctl
);
824 io_ctl_free(&io_ctl
);
827 io_ctl_drop_pages(&io_ctl
);
828 __btrfs_remove_free_space_cache(ctl
);
832 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
833 struct btrfs_block_group_cache
*block_group
)
835 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
836 struct btrfs_root
*root
= fs_info
->tree_root
;
838 struct btrfs_path
*path
;
841 u64 used
= btrfs_block_group_used(&block_group
->item
);
844 * If this block group has been marked to be cleared for one reason or
845 * another then we can't trust the on disk cache, so just return.
847 spin_lock(&block_group
->lock
);
848 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
849 spin_unlock(&block_group
->lock
);
852 spin_unlock(&block_group
->lock
);
854 path
= btrfs_alloc_path();
857 path
->search_commit_root
= 1;
858 path
->skip_locking
= 1;
860 inode
= lookup_free_space_inode(root
, block_group
, path
);
862 btrfs_free_path(path
);
866 /* We may have converted the inode and made the cache invalid. */
867 spin_lock(&block_group
->lock
);
868 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
869 spin_unlock(&block_group
->lock
);
870 btrfs_free_path(path
);
873 spin_unlock(&block_group
->lock
);
875 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
876 path
, block_group
->key
.objectid
);
877 btrfs_free_path(path
);
881 spin_lock(&ctl
->tree_lock
);
882 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
883 block_group
->bytes_super
));
884 spin_unlock(&ctl
->tree_lock
);
887 __btrfs_remove_free_space_cache(ctl
);
888 btrfs_warn(fs_info
, "block group %llu has wrong amount of free space",
889 block_group
->key
.objectid
);
894 /* This cache is bogus, make sure it gets cleared */
895 spin_lock(&block_group
->lock
);
896 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
897 spin_unlock(&block_group
->lock
);
900 btrfs_warn(fs_info
, "failed to load free space cache for block group %llu, rebuilding it now",
901 block_group
->key
.objectid
);
908 static noinline_for_stack
909 int write_cache_extent_entries(struct btrfs_io_ctl
*io_ctl
,
910 struct btrfs_free_space_ctl
*ctl
,
911 struct btrfs_block_group_cache
*block_group
,
912 int *entries
, int *bitmaps
,
913 struct list_head
*bitmap_list
)
916 struct btrfs_free_cluster
*cluster
= NULL
;
917 struct btrfs_free_cluster
*cluster_locked
= NULL
;
918 struct rb_node
*node
= rb_first(&ctl
->free_space_offset
);
919 struct btrfs_trim_range
*trim_entry
;
921 /* Get the cluster for this block_group if it exists */
922 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
923 cluster
= list_entry(block_group
->cluster_list
.next
,
924 struct btrfs_free_cluster
,
928 if (!node
&& cluster
) {
929 cluster_locked
= cluster
;
930 spin_lock(&cluster_locked
->lock
);
931 node
= rb_first(&cluster
->root
);
935 /* Write out the extent entries */
937 struct btrfs_free_space
*e
;
939 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
942 ret
= io_ctl_add_entry(io_ctl
, e
->offset
, e
->bytes
,
948 list_add_tail(&e
->list
, bitmap_list
);
951 node
= rb_next(node
);
952 if (!node
&& cluster
) {
953 node
= rb_first(&cluster
->root
);
954 cluster_locked
= cluster
;
955 spin_lock(&cluster_locked
->lock
);
959 if (cluster_locked
) {
960 spin_unlock(&cluster_locked
->lock
);
961 cluster_locked
= NULL
;
965 * Make sure we don't miss any range that was removed from our rbtree
966 * because trimming is running. Otherwise after a umount+mount (or crash
967 * after committing the transaction) we would leak free space and get
968 * an inconsistent free space cache report from fsck.
970 list_for_each_entry(trim_entry
, &ctl
->trimming_ranges
, list
) {
971 ret
= io_ctl_add_entry(io_ctl
, trim_entry
->start
,
972 trim_entry
->bytes
, NULL
);
981 spin_unlock(&cluster_locked
->lock
);
985 static noinline_for_stack
int
986 update_cache_item(struct btrfs_trans_handle
*trans
,
987 struct btrfs_root
*root
,
989 struct btrfs_path
*path
, u64 offset
,
990 int entries
, int bitmaps
)
992 struct btrfs_key key
;
993 struct btrfs_free_space_header
*header
;
994 struct extent_buffer
*leaf
;
997 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
1001 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1003 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1004 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1008 leaf
= path
->nodes
[0];
1010 struct btrfs_key found_key
;
1011 ASSERT(path
->slots
[0]);
1013 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1014 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
1015 found_key
.offset
!= offset
) {
1016 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
1018 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
1020 btrfs_release_path(path
);
1025 BTRFS_I(inode
)->generation
= trans
->transid
;
1026 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
1027 struct btrfs_free_space_header
);
1028 btrfs_set_free_space_entries(leaf
, header
, entries
);
1029 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1030 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1031 btrfs_mark_buffer_dirty(leaf
);
1032 btrfs_release_path(path
);
1040 static noinline_for_stack
int
1041 write_pinned_extent_entries(struct btrfs_root
*root
,
1042 struct btrfs_block_group_cache
*block_group
,
1043 struct btrfs_io_ctl
*io_ctl
,
1046 u64 start
, extent_start
, extent_end
, len
;
1047 struct extent_io_tree
*unpin
= NULL
;
1054 * We want to add any pinned extents to our free space cache
1055 * so we don't leak the space
1057 * We shouldn't have switched the pinned extents yet so this is the
1060 unpin
= root
->fs_info
->pinned_extents
;
1062 start
= block_group
->key
.objectid
;
1064 while (start
< block_group
->key
.objectid
+ block_group
->key
.offset
) {
1065 ret
= find_first_extent_bit(unpin
, start
,
1066 &extent_start
, &extent_end
,
1067 EXTENT_DIRTY
, NULL
);
1071 /* This pinned extent is out of our range */
1072 if (extent_start
>= block_group
->key
.objectid
+
1073 block_group
->key
.offset
)
1076 extent_start
= max(extent_start
, start
);
1077 extent_end
= min(block_group
->key
.objectid
+
1078 block_group
->key
.offset
, extent_end
+ 1);
1079 len
= extent_end
- extent_start
;
1082 ret
= io_ctl_add_entry(io_ctl
, extent_start
, len
, NULL
);
1092 static noinline_for_stack
int
1093 write_bitmap_entries(struct btrfs_io_ctl
*io_ctl
, struct list_head
*bitmap_list
)
1095 struct list_head
*pos
, *n
;
1098 /* Write out the bitmaps */
1099 list_for_each_safe(pos
, n
, bitmap_list
) {
1100 struct btrfs_free_space
*entry
=
1101 list_entry(pos
, struct btrfs_free_space
, list
);
1103 ret
= io_ctl_add_bitmap(io_ctl
, entry
->bitmap
);
1106 list_del_init(&entry
->list
);
1112 static int flush_dirty_cache(struct inode
*inode
)
1116 ret
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1118 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1119 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1125 static void noinline_for_stack
1126 cleanup_bitmap_list(struct list_head
*bitmap_list
)
1128 struct list_head
*pos
, *n
;
1130 list_for_each_safe(pos
, n
, bitmap_list
) {
1131 struct btrfs_free_space
*entry
=
1132 list_entry(pos
, struct btrfs_free_space
, list
);
1133 list_del_init(&entry
->list
);
1137 static void noinline_for_stack
1138 cleanup_write_cache_enospc(struct inode
*inode
,
1139 struct btrfs_io_ctl
*io_ctl
,
1140 struct extent_state
**cached_state
,
1141 struct list_head
*bitmap_list
)
1143 io_ctl_drop_pages(io_ctl
);
1144 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1145 i_size_read(inode
) - 1, cached_state
,
1149 int btrfs_wait_cache_io(struct btrfs_root
*root
,
1150 struct btrfs_trans_handle
*trans
,
1151 struct btrfs_block_group_cache
*block_group
,
1152 struct btrfs_io_ctl
*io_ctl
,
1153 struct btrfs_path
*path
, u64 offset
)
1156 struct inode
*inode
= io_ctl
->inode
;
1162 root
= root
->fs_info
->tree_root
;
1164 /* Flush the dirty pages in the cache file. */
1165 ret
= flush_dirty_cache(inode
);
1169 /* Update the cache item to tell everyone this cache file is valid. */
1170 ret
= update_cache_item(trans
, root
, inode
, path
, offset
,
1171 io_ctl
->entries
, io_ctl
->bitmaps
);
1173 io_ctl_free(io_ctl
);
1175 invalidate_inode_pages2(inode
->i_mapping
);
1176 BTRFS_I(inode
)->generation
= 0;
1179 btrfs_err(root
->fs_info
,
1180 "failed to write free space cache for block group %llu",
1181 block_group
->key
.objectid
);
1185 btrfs_update_inode(trans
, root
, inode
);
1188 /* the dirty list is protected by the dirty_bgs_lock */
1189 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
1191 /* the disk_cache_state is protected by the block group lock */
1192 spin_lock(&block_group
->lock
);
1195 * only mark this as written if we didn't get put back on
1196 * the dirty list while waiting for IO. Otherwise our
1197 * cache state won't be right, and we won't get written again
1199 if (!ret
&& list_empty(&block_group
->dirty_list
))
1200 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1202 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1204 spin_unlock(&block_group
->lock
);
1205 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
1206 io_ctl
->inode
= NULL
;
1215 * __btrfs_write_out_cache - write out cached info to an inode
1216 * @root - the root the inode belongs to
1217 * @ctl - the free space cache we are going to write out
1218 * @block_group - the block_group for this cache if it belongs to a block_group
1219 * @trans - the trans handle
1220 * @path - the path to use
1221 * @offset - the offset for the key we'll insert
1223 * This function writes out a free space cache struct to disk for quick recovery
1224 * on mount. This will return 0 if it was successful in writing the cache out,
1225 * or an errno if it was not.
1227 static int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
1228 struct btrfs_free_space_ctl
*ctl
,
1229 struct btrfs_block_group_cache
*block_group
,
1230 struct btrfs_io_ctl
*io_ctl
,
1231 struct btrfs_trans_handle
*trans
,
1232 struct btrfs_path
*path
, u64 offset
)
1234 struct extent_state
*cached_state
= NULL
;
1235 LIST_HEAD(bitmap_list
);
1241 if (!i_size_read(inode
))
1244 WARN_ON(io_ctl
->pages
);
1245 ret
= io_ctl_init(io_ctl
, inode
, root
, 1);
1249 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
)) {
1250 down_write(&block_group
->data_rwsem
);
1251 spin_lock(&block_group
->lock
);
1252 if (block_group
->delalloc_bytes
) {
1253 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
1254 spin_unlock(&block_group
->lock
);
1255 up_write(&block_group
->data_rwsem
);
1256 BTRFS_I(inode
)->generation
= 0;
1261 spin_unlock(&block_group
->lock
);
1264 /* Lock all pages first so we can lock the extent safely. */
1265 ret
= io_ctl_prepare_pages(io_ctl
, inode
, 0);
1269 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
1272 io_ctl_set_generation(io_ctl
, trans
->transid
);
1274 mutex_lock(&ctl
->cache_writeout_mutex
);
1275 /* Write out the extent entries in the free space cache */
1276 spin_lock(&ctl
->tree_lock
);
1277 ret
= write_cache_extent_entries(io_ctl
, ctl
,
1278 block_group
, &entries
, &bitmaps
,
1281 goto out_nospc_locked
;
1284 * Some spaces that are freed in the current transaction are pinned,
1285 * they will be added into free space cache after the transaction is
1286 * committed, we shouldn't lose them.
1288 * If this changes while we are working we'll get added back to
1289 * the dirty list and redo it. No locking needed
1291 ret
= write_pinned_extent_entries(root
, block_group
, io_ctl
, &entries
);
1293 goto out_nospc_locked
;
1296 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1297 * locked while doing it because a concurrent trim can be manipulating
1298 * or freeing the bitmap.
1300 ret
= write_bitmap_entries(io_ctl
, &bitmap_list
);
1301 spin_unlock(&ctl
->tree_lock
);
1302 mutex_unlock(&ctl
->cache_writeout_mutex
);
1306 /* Zero out the rest of the pages just to make sure */
1307 io_ctl_zero_remaining_pages(io_ctl
);
1309 /* Everything is written out, now we dirty the pages in the file. */
1310 ret
= btrfs_dirty_pages(root
, inode
, io_ctl
->pages
, io_ctl
->num_pages
,
1311 0, i_size_read(inode
), &cached_state
);
1315 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1316 up_write(&block_group
->data_rwsem
);
1318 * Release the pages and unlock the extent, we will flush
1321 io_ctl_drop_pages(io_ctl
);
1323 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1324 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1327 * at this point the pages are under IO and we're happy,
1328 * The caller is responsible for waiting on them and updating the
1329 * the cache and the inode
1331 io_ctl
->entries
= entries
;
1332 io_ctl
->bitmaps
= bitmaps
;
1334 ret
= btrfs_fdatawrite_range(inode
, 0, (u64
)-1);
1341 io_ctl
->inode
= NULL
;
1342 io_ctl_free(io_ctl
);
1344 invalidate_inode_pages2(inode
->i_mapping
);
1345 BTRFS_I(inode
)->generation
= 0;
1347 btrfs_update_inode(trans
, root
, inode
);
1353 cleanup_bitmap_list(&bitmap_list
);
1354 spin_unlock(&ctl
->tree_lock
);
1355 mutex_unlock(&ctl
->cache_writeout_mutex
);
1358 cleanup_write_cache_enospc(inode
, io_ctl
, &cached_state
, &bitmap_list
);
1361 if (block_group
&& (block_group
->flags
& BTRFS_BLOCK_GROUP_DATA
))
1362 up_write(&block_group
->data_rwsem
);
1367 int btrfs_write_out_cache(struct btrfs_root
*root
,
1368 struct btrfs_trans_handle
*trans
,
1369 struct btrfs_block_group_cache
*block_group
,
1370 struct btrfs_path
*path
)
1372 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1373 struct inode
*inode
;
1376 root
= root
->fs_info
->tree_root
;
1378 spin_lock(&block_group
->lock
);
1379 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1380 spin_unlock(&block_group
->lock
);
1383 spin_unlock(&block_group
->lock
);
1385 inode
= lookup_free_space_inode(root
, block_group
, path
);
1389 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
,
1390 &block_group
->io_ctl
, trans
,
1391 path
, block_group
->key
.objectid
);
1394 btrfs_err(root
->fs_info
,
1395 "failed to write free space cache for block group %llu",
1396 block_group
->key
.objectid
);
1398 spin_lock(&block_group
->lock
);
1399 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1400 spin_unlock(&block_group
->lock
);
1402 block_group
->io_ctl
.inode
= NULL
;
1407 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1408 * to wait for IO and put the inode
1414 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1417 ASSERT(offset
>= bitmap_start
);
1418 offset
-= bitmap_start
;
1419 return (unsigned long)(div_u64(offset
, unit
));
1422 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1424 return (unsigned long)(div_u64(bytes
, unit
));
1427 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1431 u32 bytes_per_bitmap
;
1433 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1434 bitmap_start
= offset
- ctl
->start
;
1435 bitmap_start
= div_u64(bitmap_start
, bytes_per_bitmap
);
1436 bitmap_start
*= bytes_per_bitmap
;
1437 bitmap_start
+= ctl
->start
;
1439 return bitmap_start
;
1442 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1443 struct rb_node
*node
, int bitmap
)
1445 struct rb_node
**p
= &root
->rb_node
;
1446 struct rb_node
*parent
= NULL
;
1447 struct btrfs_free_space
*info
;
1451 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1453 if (offset
< info
->offset
) {
1455 } else if (offset
> info
->offset
) {
1456 p
= &(*p
)->rb_right
;
1459 * we could have a bitmap entry and an extent entry
1460 * share the same offset. If this is the case, we want
1461 * the extent entry to always be found first if we do a
1462 * linear search through the tree, since we want to have
1463 * the quickest allocation time, and allocating from an
1464 * extent is faster than allocating from a bitmap. So
1465 * if we're inserting a bitmap and we find an entry at
1466 * this offset, we want to go right, or after this entry
1467 * logically. If we are inserting an extent and we've
1468 * found a bitmap, we want to go left, or before
1476 p
= &(*p
)->rb_right
;
1478 if (!info
->bitmap
) {
1487 rb_link_node(node
, parent
, p
);
1488 rb_insert_color(node
, root
);
1494 * searches the tree for the given offset.
1496 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1497 * want a section that has at least bytes size and comes at or after the given
1500 static struct btrfs_free_space
*
1501 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1502 u64 offset
, int bitmap_only
, int fuzzy
)
1504 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1505 struct btrfs_free_space
*entry
, *prev
= NULL
;
1507 /* find entry that is closest to the 'offset' */
1514 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1517 if (offset
< entry
->offset
)
1519 else if (offset
> entry
->offset
)
1532 * bitmap entry and extent entry may share same offset,
1533 * in that case, bitmap entry comes after extent entry.
1538 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1539 if (entry
->offset
!= offset
)
1542 WARN_ON(!entry
->bitmap
);
1545 if (entry
->bitmap
) {
1547 * if previous extent entry covers the offset,
1548 * we should return it instead of the bitmap entry
1550 n
= rb_prev(&entry
->offset_index
);
1552 prev
= rb_entry(n
, struct btrfs_free_space
,
1554 if (!prev
->bitmap
&&
1555 prev
->offset
+ prev
->bytes
> offset
)
1565 /* find last entry before the 'offset' */
1567 if (entry
->offset
> offset
) {
1568 n
= rb_prev(&entry
->offset_index
);
1570 entry
= rb_entry(n
, struct btrfs_free_space
,
1572 ASSERT(entry
->offset
<= offset
);
1581 if (entry
->bitmap
) {
1582 n
= rb_prev(&entry
->offset_index
);
1584 prev
= rb_entry(n
, struct btrfs_free_space
,
1586 if (!prev
->bitmap
&&
1587 prev
->offset
+ prev
->bytes
> offset
)
1590 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1592 } else if (entry
->offset
+ entry
->bytes
> offset
)
1599 if (entry
->bitmap
) {
1600 if (entry
->offset
+ BITS_PER_BITMAP
*
1604 if (entry
->offset
+ entry
->bytes
> offset
)
1608 n
= rb_next(&entry
->offset_index
);
1611 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1617 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1618 struct btrfs_free_space
*info
)
1620 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1621 ctl
->free_extents
--;
1624 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1625 struct btrfs_free_space
*info
)
1627 __unlink_free_space(ctl
, info
);
1628 ctl
->free_space
-= info
->bytes
;
1631 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1632 struct btrfs_free_space
*info
)
1636 ASSERT(info
->bytes
|| info
->bitmap
);
1637 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1638 &info
->offset_index
, (info
->bitmap
!= NULL
));
1642 ctl
->free_space
+= info
->bytes
;
1643 ctl
->free_extents
++;
1647 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1649 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1653 u64 size
= block_group
->key
.offset
;
1654 u32 bytes_per_bg
= BITS_PER_BITMAP
* ctl
->unit
;
1655 u32 max_bitmaps
= div_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1657 max_bitmaps
= max_t(u32
, max_bitmaps
, 1);
1659 ASSERT(ctl
->total_bitmaps
<= max_bitmaps
);
1662 * The goal is to keep the total amount of memory used per 1gb of space
1663 * at or below 32k, so we need to adjust how much memory we allow to be
1664 * used by extent based free space tracking
1666 if (size
< 1024 * 1024 * 1024)
1667 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1669 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1670 div_u64(size
, 1024 * 1024 * 1024);
1673 * we want to account for 1 more bitmap than what we have so we can make
1674 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1675 * we add more bitmaps.
1677 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1679 if (bitmap_bytes
>= max_bytes
) {
1680 ctl
->extents_thresh
= 0;
1685 * we want the extent entry threshold to always be at most 1/2 the max
1686 * bytes we can have, or whatever is less than that.
1688 extent_bytes
= max_bytes
- bitmap_bytes
;
1689 extent_bytes
= min_t(u64
, extent_bytes
, max_bytes
>> 1);
1691 ctl
->extents_thresh
=
1692 div_u64(extent_bytes
, sizeof(struct btrfs_free_space
));
1695 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1696 struct btrfs_free_space
*info
,
1697 u64 offset
, u64 bytes
)
1699 unsigned long start
, count
;
1701 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1702 count
= bytes_to_bits(bytes
, ctl
->unit
);
1703 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1705 bitmap_clear(info
->bitmap
, start
, count
);
1707 info
->bytes
-= bytes
;
1708 if (info
->max_extent_size
> ctl
->unit
)
1709 info
->max_extent_size
= 0;
1712 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1713 struct btrfs_free_space
*info
, u64 offset
,
1716 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1717 ctl
->free_space
-= bytes
;
1720 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1721 struct btrfs_free_space
*info
, u64 offset
,
1724 unsigned long start
, count
;
1726 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1727 count
= bytes_to_bits(bytes
, ctl
->unit
);
1728 ASSERT(start
+ count
<= BITS_PER_BITMAP
);
1730 bitmap_set(info
->bitmap
, start
, count
);
1732 info
->bytes
+= bytes
;
1733 ctl
->free_space
+= bytes
;
1737 * If we can not find suitable extent, we will use bytes to record
1738 * the size of the max extent.
1740 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1741 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1742 u64
*bytes
, bool for_alloc
)
1744 unsigned long found_bits
= 0;
1745 unsigned long max_bits
= 0;
1746 unsigned long bits
, i
;
1747 unsigned long next_zero
;
1748 unsigned long extent_bits
;
1751 * Skip searching the bitmap if we don't have a contiguous section that
1752 * is large enough for this allocation.
1755 bitmap_info
->max_extent_size
&&
1756 bitmap_info
->max_extent_size
< *bytes
) {
1757 *bytes
= bitmap_info
->max_extent_size
;
1761 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1762 max_t(u64
, *offset
, bitmap_info
->offset
));
1763 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1765 for_each_set_bit_from(i
, bitmap_info
->bitmap
, BITS_PER_BITMAP
) {
1766 if (for_alloc
&& bits
== 1) {
1770 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1771 BITS_PER_BITMAP
, i
);
1772 extent_bits
= next_zero
- i
;
1773 if (extent_bits
>= bits
) {
1774 found_bits
= extent_bits
;
1776 } else if (extent_bits
> max_bits
) {
1777 max_bits
= extent_bits
;
1783 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1784 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1788 *bytes
= (u64
)(max_bits
) * ctl
->unit
;
1789 bitmap_info
->max_extent_size
= *bytes
;
1793 static inline u64
get_max_extent_size(struct btrfs_free_space
*entry
)
1796 return entry
->max_extent_size
;
1797 return entry
->bytes
;
1800 /* Cache the size of the max extent in bytes */
1801 static struct btrfs_free_space
*
1802 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
,
1803 unsigned long align
, u64
*max_extent_size
)
1805 struct btrfs_free_space
*entry
;
1806 struct rb_node
*node
;
1811 if (!ctl
->free_space_offset
.rb_node
)
1814 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1818 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1819 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1820 if (entry
->bytes
< *bytes
) {
1821 *max_extent_size
= max(get_max_extent_size(entry
),
1826 /* make sure the space returned is big enough
1827 * to match our requested alignment
1829 if (*bytes
>= align
) {
1830 tmp
= entry
->offset
- ctl
->start
+ align
- 1;
1831 tmp
= div64_u64(tmp
, align
);
1832 tmp
= tmp
* align
+ ctl
->start
;
1833 align_off
= tmp
- entry
->offset
;
1836 tmp
= entry
->offset
;
1839 if (entry
->bytes
< *bytes
+ align_off
) {
1840 *max_extent_size
= max(get_max_extent_size(entry
),
1845 if (entry
->bitmap
) {
1848 ret
= search_bitmap(ctl
, entry
, &tmp
, &size
, true);
1855 max(get_max_extent_size(entry
),
1862 *bytes
= entry
->bytes
- align_off
;
1869 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1870 struct btrfs_free_space
*info
, u64 offset
)
1872 info
->offset
= offset_to_bitmap(ctl
, offset
);
1874 INIT_LIST_HEAD(&info
->list
);
1875 link_free_space(ctl
, info
);
1876 ctl
->total_bitmaps
++;
1878 ctl
->op
->recalc_thresholds(ctl
);
1881 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1882 struct btrfs_free_space
*bitmap_info
)
1884 unlink_free_space(ctl
, bitmap_info
);
1885 kfree(bitmap_info
->bitmap
);
1886 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1887 ctl
->total_bitmaps
--;
1888 ctl
->op
->recalc_thresholds(ctl
);
1891 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1892 struct btrfs_free_space
*bitmap_info
,
1893 u64
*offset
, u64
*bytes
)
1896 u64 search_start
, search_bytes
;
1900 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1903 * We need to search for bits in this bitmap. We could only cover some
1904 * of the extent in this bitmap thanks to how we add space, so we need
1905 * to search for as much as it as we can and clear that amount, and then
1906 * go searching for the next bit.
1908 search_start
= *offset
;
1909 search_bytes
= ctl
->unit
;
1910 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1911 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
,
1913 if (ret
< 0 || search_start
!= *offset
)
1916 /* We may have found more bits than what we need */
1917 search_bytes
= min(search_bytes
, *bytes
);
1919 /* Cannot clear past the end of the bitmap */
1920 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1922 bitmap_clear_bits(ctl
, bitmap_info
, search_start
, search_bytes
);
1923 *offset
+= search_bytes
;
1924 *bytes
-= search_bytes
;
1927 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1928 if (!bitmap_info
->bytes
)
1929 free_bitmap(ctl
, bitmap_info
);
1932 * no entry after this bitmap, but we still have bytes to
1933 * remove, so something has gone wrong.
1938 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1942 * if the next entry isn't a bitmap we need to return to let the
1943 * extent stuff do its work.
1945 if (!bitmap_info
->bitmap
)
1949 * Ok the next item is a bitmap, but it may not actually hold
1950 * the information for the rest of this free space stuff, so
1951 * look for it, and if we don't find it return so we can try
1952 * everything over again.
1954 search_start
= *offset
;
1955 search_bytes
= ctl
->unit
;
1956 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1957 &search_bytes
, false);
1958 if (ret
< 0 || search_start
!= *offset
)
1962 } else if (!bitmap_info
->bytes
)
1963 free_bitmap(ctl
, bitmap_info
);
1968 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1969 struct btrfs_free_space
*info
, u64 offset
,
1972 u64 bytes_to_set
= 0;
1975 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1977 bytes_to_set
= min(end
- offset
, bytes
);
1979 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1982 * We set some bytes, we have no idea what the max extent size is
1985 info
->max_extent_size
= 0;
1987 return bytes_to_set
;
1991 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1992 struct btrfs_free_space
*info
)
1994 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1995 bool forced
= false;
1997 #ifdef CONFIG_BTRFS_DEBUG
1998 if (btrfs_should_fragment_free_space(block_group
->fs_info
->extent_root
,
2004 * If we are below the extents threshold then we can add this as an
2005 * extent, and don't have to deal with the bitmap
2007 if (!forced
&& ctl
->free_extents
< ctl
->extents_thresh
) {
2009 * If this block group has some small extents we don't want to
2010 * use up all of our free slots in the cache with them, we want
2011 * to reserve them to larger extents, however if we have plent
2012 * of cache left then go ahead an dadd them, no sense in adding
2013 * the overhead of a bitmap if we don't have to.
2015 if (info
->bytes
<= block_group
->sectorsize
* 4) {
2016 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
2024 * The original block groups from mkfs can be really small, like 8
2025 * megabytes, so don't bother with a bitmap for those entries. However
2026 * some block groups can be smaller than what a bitmap would cover but
2027 * are still large enough that they could overflow the 32k memory limit,
2028 * so allow those block groups to still be allowed to have a bitmap
2031 if (((BITS_PER_BITMAP
* ctl
->unit
) >> 1) > block_group
->key
.offset
)
2037 static struct btrfs_free_space_op free_space_op
= {
2038 .recalc_thresholds
= recalculate_thresholds
,
2039 .use_bitmap
= use_bitmap
,
2042 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
2043 struct btrfs_free_space
*info
)
2045 struct btrfs_free_space
*bitmap_info
;
2046 struct btrfs_block_group_cache
*block_group
= NULL
;
2048 u64 bytes
, offset
, bytes_added
;
2051 bytes
= info
->bytes
;
2052 offset
= info
->offset
;
2054 if (!ctl
->op
->use_bitmap(ctl
, info
))
2057 if (ctl
->op
== &free_space_op
)
2058 block_group
= ctl
->private;
2061 * Since we link bitmaps right into the cluster we need to see if we
2062 * have a cluster here, and if so and it has our bitmap we need to add
2063 * the free space to that bitmap.
2065 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
2066 struct btrfs_free_cluster
*cluster
;
2067 struct rb_node
*node
;
2068 struct btrfs_free_space
*entry
;
2070 cluster
= list_entry(block_group
->cluster_list
.next
,
2071 struct btrfs_free_cluster
,
2073 spin_lock(&cluster
->lock
);
2074 node
= rb_first(&cluster
->root
);
2076 spin_unlock(&cluster
->lock
);
2077 goto no_cluster_bitmap
;
2080 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2081 if (!entry
->bitmap
) {
2082 spin_unlock(&cluster
->lock
);
2083 goto no_cluster_bitmap
;
2086 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
2087 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
2089 bytes
-= bytes_added
;
2090 offset
+= bytes_added
;
2092 spin_unlock(&cluster
->lock
);
2100 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2107 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
2108 bytes
-= bytes_added
;
2109 offset
+= bytes_added
;
2119 if (info
&& info
->bitmap
) {
2120 add_new_bitmap(ctl
, info
, offset
);
2125 spin_unlock(&ctl
->tree_lock
);
2127 /* no pre-allocated info, allocate a new one */
2129 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
2132 spin_lock(&ctl
->tree_lock
);
2138 /* allocate the bitmap */
2139 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
2140 spin_lock(&ctl
->tree_lock
);
2141 if (!info
->bitmap
) {
2151 kfree(info
->bitmap
);
2152 kmem_cache_free(btrfs_free_space_cachep
, info
);
2158 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
2159 struct btrfs_free_space
*info
, bool update_stat
)
2161 struct btrfs_free_space
*left_info
= NULL
;
2162 struct btrfs_free_space
*right_info
;
2163 bool merged
= false;
2164 u64 offset
= info
->offset
;
2165 u64 bytes
= info
->bytes
;
2168 * first we want to see if there is free space adjacent to the range we
2169 * are adding, if there is remove that struct and add a new one to
2170 * cover the entire range
2172 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
2173 if (right_info
&& rb_prev(&right_info
->offset_index
))
2174 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
2175 struct btrfs_free_space
, offset_index
);
2176 else if (!right_info
)
2177 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
2179 if (right_info
&& !right_info
->bitmap
) {
2181 unlink_free_space(ctl
, right_info
);
2183 __unlink_free_space(ctl
, right_info
);
2184 info
->bytes
+= right_info
->bytes
;
2185 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
2189 if (left_info
&& !left_info
->bitmap
&&
2190 left_info
->offset
+ left_info
->bytes
== offset
) {
2192 unlink_free_space(ctl
, left_info
);
2194 __unlink_free_space(ctl
, left_info
);
2195 info
->offset
= left_info
->offset
;
2196 info
->bytes
+= left_info
->bytes
;
2197 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
2204 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl
*ctl
,
2205 struct btrfs_free_space
*info
,
2208 struct btrfs_free_space
*bitmap
;
2211 const u64 end
= info
->offset
+ info
->bytes
;
2212 const u64 bitmap_offset
= offset_to_bitmap(ctl
, end
);
2215 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2219 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, end
);
2220 j
= find_next_zero_bit(bitmap
->bitmap
, BITS_PER_BITMAP
, i
);
2223 bytes
= (j
- i
) * ctl
->unit
;
2224 info
->bytes
+= bytes
;
2227 bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2229 __bitmap_clear_bits(ctl
, bitmap
, end
, bytes
);
2232 free_bitmap(ctl
, bitmap
);
2237 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl
*ctl
,
2238 struct btrfs_free_space
*info
,
2241 struct btrfs_free_space
*bitmap
;
2245 unsigned long prev_j
;
2248 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
);
2249 /* If we're on a boundary, try the previous logical bitmap. */
2250 if (bitmap_offset
== info
->offset
) {
2251 if (info
->offset
== 0)
2253 bitmap_offset
= offset_to_bitmap(ctl
, info
->offset
- 1);
2256 bitmap
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2260 i
= offset_to_bit(bitmap
->offset
, ctl
->unit
, info
->offset
) - 1;
2262 prev_j
= (unsigned long)-1;
2263 for_each_clear_bit_from(j
, bitmap
->bitmap
, BITS_PER_BITMAP
) {
2271 if (prev_j
== (unsigned long)-1)
2272 bytes
= (i
+ 1) * ctl
->unit
;
2274 bytes
= (i
- prev_j
) * ctl
->unit
;
2276 info
->offset
-= bytes
;
2277 info
->bytes
+= bytes
;
2280 bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2282 __bitmap_clear_bits(ctl
, bitmap
, info
->offset
, bytes
);
2285 free_bitmap(ctl
, bitmap
);
2291 * We prefer always to allocate from extent entries, both for clustered and
2292 * non-clustered allocation requests. So when attempting to add a new extent
2293 * entry, try to see if there's adjacent free space in bitmap entries, and if
2294 * there is, migrate that space from the bitmaps to the extent.
2295 * Like this we get better chances of satisfying space allocation requests
2296 * because we attempt to satisfy them based on a single cache entry, and never
2297 * on 2 or more entries - even if the entries represent a contiguous free space
2298 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2301 static void steal_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
2302 struct btrfs_free_space
*info
,
2306 * Only work with disconnected entries, as we can change their offset,
2307 * and must be extent entries.
2309 ASSERT(!info
->bitmap
);
2310 ASSERT(RB_EMPTY_NODE(&info
->offset_index
));
2312 if (ctl
->total_bitmaps
> 0) {
2314 bool stole_front
= false;
2316 stole_end
= steal_from_bitmap_to_end(ctl
, info
, update_stat
);
2317 if (ctl
->total_bitmaps
> 0)
2318 stole_front
= steal_from_bitmap_to_front(ctl
, info
,
2321 if (stole_end
|| stole_front
)
2322 try_merge_free_space(ctl
, info
, update_stat
);
2326 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
2327 u64 offset
, u64 bytes
)
2329 struct btrfs_free_space
*info
;
2332 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
2336 info
->offset
= offset
;
2337 info
->bytes
= bytes
;
2338 RB_CLEAR_NODE(&info
->offset_index
);
2340 spin_lock(&ctl
->tree_lock
);
2342 if (try_merge_free_space(ctl
, info
, true))
2346 * There was no extent directly to the left or right of this new
2347 * extent then we know we're going to have to allocate a new extent, so
2348 * before we do that see if we need to drop this into a bitmap
2350 ret
= insert_into_bitmap(ctl
, info
);
2359 * Only steal free space from adjacent bitmaps if we're sure we're not
2360 * going to add the new free space to existing bitmap entries - because
2361 * that would mean unnecessary work that would be reverted. Therefore
2362 * attempt to steal space from bitmaps if we're adding an extent entry.
2364 steal_from_bitmap(ctl
, info
, true);
2366 ret
= link_free_space(ctl
, info
);
2368 kmem_cache_free(btrfs_free_space_cachep
, info
);
2370 spin_unlock(&ctl
->tree_lock
);
2373 printk(KERN_CRIT
"BTRFS: unable to add free space :%d\n", ret
);
2374 ASSERT(ret
!= -EEXIST
);
2380 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
2381 u64 offset
, u64 bytes
)
2383 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2384 struct btrfs_free_space
*info
;
2386 bool re_search
= false;
2388 spin_lock(&ctl
->tree_lock
);
2395 info
= tree_search_offset(ctl
, offset
, 0, 0);
2398 * oops didn't find an extent that matched the space we wanted
2399 * to remove, look for a bitmap instead
2401 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
2405 * If we found a partial bit of our free space in a
2406 * bitmap but then couldn't find the other part this may
2407 * be a problem, so WARN about it.
2415 if (!info
->bitmap
) {
2416 unlink_free_space(ctl
, info
);
2417 if (offset
== info
->offset
) {
2418 u64 to_free
= min(bytes
, info
->bytes
);
2420 info
->bytes
-= to_free
;
2421 info
->offset
+= to_free
;
2423 ret
= link_free_space(ctl
, info
);
2426 kmem_cache_free(btrfs_free_space_cachep
, info
);
2433 u64 old_end
= info
->bytes
+ info
->offset
;
2435 info
->bytes
= offset
- info
->offset
;
2436 ret
= link_free_space(ctl
, info
);
2441 /* Not enough bytes in this entry to satisfy us */
2442 if (old_end
< offset
+ bytes
) {
2443 bytes
-= old_end
- offset
;
2446 } else if (old_end
== offset
+ bytes
) {
2450 spin_unlock(&ctl
->tree_lock
);
2452 ret
= btrfs_add_free_space(block_group
, offset
+ bytes
,
2453 old_end
- (offset
+ bytes
));
2459 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
2460 if (ret
== -EAGAIN
) {
2465 spin_unlock(&ctl
->tree_lock
);
2470 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
2473 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2474 struct btrfs_free_space
*info
;
2478 spin_lock(&ctl
->tree_lock
);
2479 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
2480 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
2481 if (info
->bytes
>= bytes
&& !block_group
->ro
)
2483 btrfs_crit(block_group
->fs_info
,
2484 "entry offset %llu, bytes %llu, bitmap %s",
2485 info
->offset
, info
->bytes
,
2486 (info
->bitmap
) ? "yes" : "no");
2488 spin_unlock(&ctl
->tree_lock
);
2489 btrfs_info(block_group
->fs_info
, "block group has cluster?: %s",
2490 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2491 btrfs_info(block_group
->fs_info
,
2492 "%d blocks of free space at or bigger than bytes is", count
);
2495 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2497 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2499 spin_lock_init(&ctl
->tree_lock
);
2500 ctl
->unit
= block_group
->sectorsize
;
2501 ctl
->start
= block_group
->key
.objectid
;
2502 ctl
->private = block_group
;
2503 ctl
->op
= &free_space_op
;
2504 INIT_LIST_HEAD(&ctl
->trimming_ranges
);
2505 mutex_init(&ctl
->cache_writeout_mutex
);
2508 * we only want to have 32k of ram per block group for keeping
2509 * track of free space, and if we pass 1/2 of that we want to
2510 * start converting things over to using bitmaps
2512 ctl
->extents_thresh
= ((1024 * 32) / 2) /
2513 sizeof(struct btrfs_free_space
);
2517 * for a given cluster, put all of its extents back into the free
2518 * space cache. If the block group passed doesn't match the block group
2519 * pointed to by the cluster, someone else raced in and freed the
2520 * cluster already. In that case, we just return without changing anything
2523 __btrfs_return_cluster_to_free_space(
2524 struct btrfs_block_group_cache
*block_group
,
2525 struct btrfs_free_cluster
*cluster
)
2527 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2528 struct btrfs_free_space
*entry
;
2529 struct rb_node
*node
;
2531 spin_lock(&cluster
->lock
);
2532 if (cluster
->block_group
!= block_group
)
2535 cluster
->block_group
= NULL
;
2536 cluster
->window_start
= 0;
2537 list_del_init(&cluster
->block_group_list
);
2539 node
= rb_first(&cluster
->root
);
2543 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2544 node
= rb_next(&entry
->offset_index
);
2545 rb_erase(&entry
->offset_index
, &cluster
->root
);
2546 RB_CLEAR_NODE(&entry
->offset_index
);
2548 bitmap
= (entry
->bitmap
!= NULL
);
2550 try_merge_free_space(ctl
, entry
, false);
2551 steal_from_bitmap(ctl
, entry
, false);
2553 tree_insert_offset(&ctl
->free_space_offset
,
2554 entry
->offset
, &entry
->offset_index
, bitmap
);
2556 cluster
->root
= RB_ROOT
;
2559 spin_unlock(&cluster
->lock
);
2560 btrfs_put_block_group(block_group
);
2564 static void __btrfs_remove_free_space_cache_locked(
2565 struct btrfs_free_space_ctl
*ctl
)
2567 struct btrfs_free_space
*info
;
2568 struct rb_node
*node
;
2570 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2571 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2572 if (!info
->bitmap
) {
2573 unlink_free_space(ctl
, info
);
2574 kmem_cache_free(btrfs_free_space_cachep
, info
);
2576 free_bitmap(ctl
, info
);
2579 cond_resched_lock(&ctl
->tree_lock
);
2583 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2585 spin_lock(&ctl
->tree_lock
);
2586 __btrfs_remove_free_space_cache_locked(ctl
);
2587 spin_unlock(&ctl
->tree_lock
);
2590 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2592 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2593 struct btrfs_free_cluster
*cluster
;
2594 struct list_head
*head
;
2596 spin_lock(&ctl
->tree_lock
);
2597 while ((head
= block_group
->cluster_list
.next
) !=
2598 &block_group
->cluster_list
) {
2599 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2602 WARN_ON(cluster
->block_group
!= block_group
);
2603 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2605 cond_resched_lock(&ctl
->tree_lock
);
2607 __btrfs_remove_free_space_cache_locked(ctl
);
2608 spin_unlock(&ctl
->tree_lock
);
2612 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2613 u64 offset
, u64 bytes
, u64 empty_size
,
2614 u64
*max_extent_size
)
2616 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2617 struct btrfs_free_space
*entry
= NULL
;
2618 u64 bytes_search
= bytes
+ empty_size
;
2621 u64 align_gap_len
= 0;
2623 spin_lock(&ctl
->tree_lock
);
2624 entry
= find_free_space(ctl
, &offset
, &bytes_search
,
2625 block_group
->full_stripe_len
, max_extent_size
);
2630 if (entry
->bitmap
) {
2631 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2633 free_bitmap(ctl
, entry
);
2635 unlink_free_space(ctl
, entry
);
2636 align_gap_len
= offset
- entry
->offset
;
2637 align_gap
= entry
->offset
;
2639 entry
->offset
= offset
+ bytes
;
2640 WARN_ON(entry
->bytes
< bytes
+ align_gap_len
);
2642 entry
->bytes
-= bytes
+ align_gap_len
;
2644 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2646 link_free_space(ctl
, entry
);
2649 spin_unlock(&ctl
->tree_lock
);
2652 __btrfs_add_free_space(ctl
, align_gap
, align_gap_len
);
2657 * given a cluster, put all of its extents back into the free space
2658 * cache. If a block group is passed, this function will only free
2659 * a cluster that belongs to the passed block group.
2661 * Otherwise, it'll get a reference on the block group pointed to by the
2662 * cluster and remove the cluster from it.
2664 int btrfs_return_cluster_to_free_space(
2665 struct btrfs_block_group_cache
*block_group
,
2666 struct btrfs_free_cluster
*cluster
)
2668 struct btrfs_free_space_ctl
*ctl
;
2671 /* first, get a safe pointer to the block group */
2672 spin_lock(&cluster
->lock
);
2674 block_group
= cluster
->block_group
;
2676 spin_unlock(&cluster
->lock
);
2679 } else if (cluster
->block_group
!= block_group
) {
2680 /* someone else has already freed it don't redo their work */
2681 spin_unlock(&cluster
->lock
);
2684 atomic_inc(&block_group
->count
);
2685 spin_unlock(&cluster
->lock
);
2687 ctl
= block_group
->free_space_ctl
;
2689 /* now return any extents the cluster had on it */
2690 spin_lock(&ctl
->tree_lock
);
2691 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2692 spin_unlock(&ctl
->tree_lock
);
2694 /* finally drop our ref */
2695 btrfs_put_block_group(block_group
);
2699 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2700 struct btrfs_free_cluster
*cluster
,
2701 struct btrfs_free_space
*entry
,
2702 u64 bytes
, u64 min_start
,
2703 u64
*max_extent_size
)
2705 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2707 u64 search_start
= cluster
->window_start
;
2708 u64 search_bytes
= bytes
;
2711 search_start
= min_start
;
2712 search_bytes
= bytes
;
2714 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
, true);
2716 *max_extent_size
= max(get_max_extent_size(entry
),
2722 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2728 * given a cluster, try to allocate 'bytes' from it, returns 0
2729 * if it couldn't find anything suitably large, or a logical disk offset
2730 * if things worked out
2732 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2733 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2734 u64 min_start
, u64
*max_extent_size
)
2736 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2737 struct btrfs_free_space
*entry
= NULL
;
2738 struct rb_node
*node
;
2741 spin_lock(&cluster
->lock
);
2742 if (bytes
> cluster
->max_size
)
2745 if (cluster
->block_group
!= block_group
)
2748 node
= rb_first(&cluster
->root
);
2752 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2754 if (entry
->bytes
< bytes
)
2755 *max_extent_size
= max(get_max_extent_size(entry
),
2758 if (entry
->bytes
< bytes
||
2759 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2760 node
= rb_next(&entry
->offset_index
);
2763 entry
= rb_entry(node
, struct btrfs_free_space
,
2768 if (entry
->bitmap
) {
2769 ret
= btrfs_alloc_from_bitmap(block_group
,
2770 cluster
, entry
, bytes
,
2771 cluster
->window_start
,
2774 node
= rb_next(&entry
->offset_index
);
2777 entry
= rb_entry(node
, struct btrfs_free_space
,
2781 cluster
->window_start
+= bytes
;
2783 ret
= entry
->offset
;
2785 entry
->offset
+= bytes
;
2786 entry
->bytes
-= bytes
;
2789 if (entry
->bytes
== 0)
2790 rb_erase(&entry
->offset_index
, &cluster
->root
);
2794 spin_unlock(&cluster
->lock
);
2799 spin_lock(&ctl
->tree_lock
);
2801 ctl
->free_space
-= bytes
;
2802 if (entry
->bytes
== 0) {
2803 ctl
->free_extents
--;
2804 if (entry
->bitmap
) {
2805 kfree(entry
->bitmap
);
2806 ctl
->total_bitmaps
--;
2807 ctl
->op
->recalc_thresholds(ctl
);
2809 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2812 spin_unlock(&ctl
->tree_lock
);
2817 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2818 struct btrfs_free_space
*entry
,
2819 struct btrfs_free_cluster
*cluster
,
2820 u64 offset
, u64 bytes
,
2821 u64 cont1_bytes
, u64 min_bytes
)
2823 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2824 unsigned long next_zero
;
2826 unsigned long want_bits
;
2827 unsigned long min_bits
;
2828 unsigned long found_bits
;
2829 unsigned long max_bits
= 0;
2830 unsigned long start
= 0;
2831 unsigned long total_found
= 0;
2834 i
= offset_to_bit(entry
->offset
, ctl
->unit
,
2835 max_t(u64
, offset
, entry
->offset
));
2836 want_bits
= bytes_to_bits(bytes
, ctl
->unit
);
2837 min_bits
= bytes_to_bits(min_bytes
, ctl
->unit
);
2840 * Don't bother looking for a cluster in this bitmap if it's heavily
2843 if (entry
->max_extent_size
&&
2844 entry
->max_extent_size
< cont1_bytes
)
2848 for_each_set_bit_from(i
, entry
->bitmap
, BITS_PER_BITMAP
) {
2849 next_zero
= find_next_zero_bit(entry
->bitmap
,
2850 BITS_PER_BITMAP
, i
);
2851 if (next_zero
- i
>= min_bits
) {
2852 found_bits
= next_zero
- i
;
2853 if (found_bits
> max_bits
)
2854 max_bits
= found_bits
;
2857 if (next_zero
- i
> max_bits
)
2858 max_bits
= next_zero
- i
;
2863 entry
->max_extent_size
= (u64
)max_bits
* ctl
->unit
;
2869 cluster
->max_size
= 0;
2872 total_found
+= found_bits
;
2874 if (cluster
->max_size
< found_bits
* ctl
->unit
)
2875 cluster
->max_size
= found_bits
* ctl
->unit
;
2877 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2882 cluster
->window_start
= start
* ctl
->unit
+ entry
->offset
;
2883 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2884 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2885 &entry
->offset_index
, 1);
2886 ASSERT(!ret
); /* -EEXIST; Logic error */
2888 trace_btrfs_setup_cluster(block_group
, cluster
,
2889 total_found
* ctl
->unit
, 1);
2894 * This searches the block group for just extents to fill the cluster with.
2895 * Try to find a cluster with at least bytes total bytes, at least one
2896 * extent of cont1_bytes, and other clusters of at least min_bytes.
2899 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2900 struct btrfs_free_cluster
*cluster
,
2901 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2902 u64 cont1_bytes
, u64 min_bytes
)
2904 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2905 struct btrfs_free_space
*first
= NULL
;
2906 struct btrfs_free_space
*entry
= NULL
;
2907 struct btrfs_free_space
*last
;
2908 struct rb_node
*node
;
2913 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2918 * We don't want bitmaps, so just move along until we find a normal
2921 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2922 if (entry
->bitmap
&& list_empty(&entry
->list
))
2923 list_add_tail(&entry
->list
, bitmaps
);
2924 node
= rb_next(&entry
->offset_index
);
2927 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2930 window_free
= entry
->bytes
;
2931 max_extent
= entry
->bytes
;
2935 for (node
= rb_next(&entry
->offset_index
); node
;
2936 node
= rb_next(&entry
->offset_index
)) {
2937 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2939 if (entry
->bitmap
) {
2940 if (list_empty(&entry
->list
))
2941 list_add_tail(&entry
->list
, bitmaps
);
2945 if (entry
->bytes
< min_bytes
)
2949 window_free
+= entry
->bytes
;
2950 if (entry
->bytes
> max_extent
)
2951 max_extent
= entry
->bytes
;
2954 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2957 cluster
->window_start
= first
->offset
;
2959 node
= &first
->offset_index
;
2962 * now we've found our entries, pull them out of the free space
2963 * cache and put them into the cluster rbtree
2968 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2969 node
= rb_next(&entry
->offset_index
);
2970 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2973 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2974 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2975 &entry
->offset_index
, 0);
2976 total_size
+= entry
->bytes
;
2977 ASSERT(!ret
); /* -EEXIST; Logic error */
2978 } while (node
&& entry
!= last
);
2980 cluster
->max_size
= max_extent
;
2981 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2986 * This specifically looks for bitmaps that may work in the cluster, we assume
2987 * that we have already failed to find extents that will work.
2990 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2991 struct btrfs_free_cluster
*cluster
,
2992 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2993 u64 cont1_bytes
, u64 min_bytes
)
2995 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2996 struct btrfs_free_space
*entry
= NULL
;
2998 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
3000 if (ctl
->total_bitmaps
== 0)
3004 * The bitmap that covers offset won't be in the list unless offset
3005 * is just its start offset.
3007 if (!list_empty(bitmaps
))
3008 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
3010 if (!entry
|| entry
->offset
!= bitmap_offset
) {
3011 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
3012 if (entry
&& list_empty(&entry
->list
))
3013 list_add(&entry
->list
, bitmaps
);
3016 list_for_each_entry(entry
, bitmaps
, list
) {
3017 if (entry
->bytes
< bytes
)
3019 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
3020 bytes
, cont1_bytes
, min_bytes
);
3026 * The bitmaps list has all the bitmaps that record free space
3027 * starting after offset, so no more search is required.
3033 * here we try to find a cluster of blocks in a block group. The goal
3034 * is to find at least bytes+empty_size.
3035 * We might not find them all in one contiguous area.
3037 * returns zero and sets up cluster if things worked out, otherwise
3038 * it returns -enospc
3040 int btrfs_find_space_cluster(struct btrfs_root
*root
,
3041 struct btrfs_block_group_cache
*block_group
,
3042 struct btrfs_free_cluster
*cluster
,
3043 u64 offset
, u64 bytes
, u64 empty_size
)
3045 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3046 struct btrfs_free_space
*entry
, *tmp
;
3053 * Choose the minimum extent size we'll require for this
3054 * cluster. For SSD_SPREAD, don't allow any fragmentation.
3055 * For metadata, allow allocates with smaller extents. For
3056 * data, keep it dense.
3058 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
3059 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
3060 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3061 cont1_bytes
= bytes
;
3062 min_bytes
= block_group
->sectorsize
;
3064 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
3065 min_bytes
= block_group
->sectorsize
;
3068 spin_lock(&ctl
->tree_lock
);
3071 * If we know we don't have enough space to make a cluster don't even
3072 * bother doing all the work to try and find one.
3074 if (ctl
->free_space
< bytes
) {
3075 spin_unlock(&ctl
->tree_lock
);
3079 spin_lock(&cluster
->lock
);
3081 /* someone already found a cluster, hooray */
3082 if (cluster
->block_group
) {
3087 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
3090 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
3092 cont1_bytes
, min_bytes
);
3094 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
3095 offset
, bytes
+ empty_size
,
3096 cont1_bytes
, min_bytes
);
3098 /* Clear our temporary list */
3099 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
3100 list_del_init(&entry
->list
);
3103 atomic_inc(&block_group
->count
);
3104 list_add_tail(&cluster
->block_group_list
,
3105 &block_group
->cluster_list
);
3106 cluster
->block_group
= block_group
;
3108 trace_btrfs_failed_cluster_setup(block_group
);
3111 spin_unlock(&cluster
->lock
);
3112 spin_unlock(&ctl
->tree_lock
);
3118 * simple code to zero out a cluster
3120 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
3122 spin_lock_init(&cluster
->lock
);
3123 spin_lock_init(&cluster
->refill_lock
);
3124 cluster
->root
= RB_ROOT
;
3125 cluster
->max_size
= 0;
3126 cluster
->fragmented
= false;
3127 INIT_LIST_HEAD(&cluster
->block_group_list
);
3128 cluster
->block_group
= NULL
;
3131 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
3132 u64
*total_trimmed
, u64 start
, u64 bytes
,
3133 u64 reserved_start
, u64 reserved_bytes
,
3134 struct btrfs_trim_range
*trim_entry
)
3136 struct btrfs_space_info
*space_info
= block_group
->space_info
;
3137 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3138 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3143 spin_lock(&space_info
->lock
);
3144 spin_lock(&block_group
->lock
);
3145 if (!block_group
->ro
) {
3146 block_group
->reserved
+= reserved_bytes
;
3147 space_info
->bytes_reserved
+= reserved_bytes
;
3150 spin_unlock(&block_group
->lock
);
3151 spin_unlock(&space_info
->lock
);
3153 ret
= btrfs_discard_extent(fs_info
->extent_root
,
3154 start
, bytes
, &trimmed
);
3156 *total_trimmed
+= trimmed
;
3158 mutex_lock(&ctl
->cache_writeout_mutex
);
3159 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
3160 list_del(&trim_entry
->list
);
3161 mutex_unlock(&ctl
->cache_writeout_mutex
);
3164 spin_lock(&space_info
->lock
);
3165 spin_lock(&block_group
->lock
);
3166 if (block_group
->ro
)
3167 space_info
->bytes_readonly
+= reserved_bytes
;
3168 block_group
->reserved
-= reserved_bytes
;
3169 space_info
->bytes_reserved
-= reserved_bytes
;
3170 spin_unlock(&space_info
->lock
);
3171 spin_unlock(&block_group
->lock
);
3177 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
3178 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3180 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3181 struct btrfs_free_space
*entry
;
3182 struct rb_node
*node
;
3188 while (start
< end
) {
3189 struct btrfs_trim_range trim_entry
;
3191 mutex_lock(&ctl
->cache_writeout_mutex
);
3192 spin_lock(&ctl
->tree_lock
);
3194 if (ctl
->free_space
< minlen
) {
3195 spin_unlock(&ctl
->tree_lock
);
3196 mutex_unlock(&ctl
->cache_writeout_mutex
);
3200 entry
= tree_search_offset(ctl
, start
, 0, 1);
3202 spin_unlock(&ctl
->tree_lock
);
3203 mutex_unlock(&ctl
->cache_writeout_mutex
);
3208 while (entry
->bitmap
) {
3209 node
= rb_next(&entry
->offset_index
);
3211 spin_unlock(&ctl
->tree_lock
);
3212 mutex_unlock(&ctl
->cache_writeout_mutex
);
3215 entry
= rb_entry(node
, struct btrfs_free_space
,
3219 if (entry
->offset
>= end
) {
3220 spin_unlock(&ctl
->tree_lock
);
3221 mutex_unlock(&ctl
->cache_writeout_mutex
);
3225 extent_start
= entry
->offset
;
3226 extent_bytes
= entry
->bytes
;
3227 start
= max(start
, extent_start
);
3228 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
3229 if (bytes
< minlen
) {
3230 spin_unlock(&ctl
->tree_lock
);
3231 mutex_unlock(&ctl
->cache_writeout_mutex
);
3235 unlink_free_space(ctl
, entry
);
3236 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3238 spin_unlock(&ctl
->tree_lock
);
3239 trim_entry
.start
= extent_start
;
3240 trim_entry
.bytes
= extent_bytes
;
3241 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3242 mutex_unlock(&ctl
->cache_writeout_mutex
);
3244 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3245 extent_start
, extent_bytes
, &trim_entry
);
3251 if (fatal_signal_pending(current
)) {
3262 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
3263 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
3265 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
3266 struct btrfs_free_space
*entry
;
3270 u64 offset
= offset_to_bitmap(ctl
, start
);
3272 while (offset
< end
) {
3273 bool next_bitmap
= false;
3274 struct btrfs_trim_range trim_entry
;
3276 mutex_lock(&ctl
->cache_writeout_mutex
);
3277 spin_lock(&ctl
->tree_lock
);
3279 if (ctl
->free_space
< minlen
) {
3280 spin_unlock(&ctl
->tree_lock
);
3281 mutex_unlock(&ctl
->cache_writeout_mutex
);
3285 entry
= tree_search_offset(ctl
, offset
, 1, 0);
3287 spin_unlock(&ctl
->tree_lock
);
3288 mutex_unlock(&ctl
->cache_writeout_mutex
);
3294 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
, false);
3295 if (ret2
|| start
>= end
) {
3296 spin_unlock(&ctl
->tree_lock
);
3297 mutex_unlock(&ctl
->cache_writeout_mutex
);
3302 bytes
= min(bytes
, end
- start
);
3303 if (bytes
< minlen
) {
3304 spin_unlock(&ctl
->tree_lock
);
3305 mutex_unlock(&ctl
->cache_writeout_mutex
);
3309 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
3310 if (entry
->bytes
== 0)
3311 free_bitmap(ctl
, entry
);
3313 spin_unlock(&ctl
->tree_lock
);
3314 trim_entry
.start
= start
;
3315 trim_entry
.bytes
= bytes
;
3316 list_add_tail(&trim_entry
.list
, &ctl
->trimming_ranges
);
3317 mutex_unlock(&ctl
->cache_writeout_mutex
);
3319 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
3320 start
, bytes
, &trim_entry
);
3325 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3328 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
3329 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
3332 if (fatal_signal_pending(current
)) {
3343 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache
*cache
)
3345 atomic_inc(&cache
->trimming
);
3348 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache
*block_group
)
3350 struct extent_map_tree
*em_tree
;
3351 struct extent_map
*em
;
3354 spin_lock(&block_group
->lock
);
3355 cleanup
= (atomic_dec_and_test(&block_group
->trimming
) &&
3356 block_group
->removed
);
3357 spin_unlock(&block_group
->lock
);
3360 lock_chunks(block_group
->fs_info
->chunk_root
);
3361 em_tree
= &block_group
->fs_info
->mapping_tree
.map_tree
;
3362 write_lock(&em_tree
->lock
);
3363 em
= lookup_extent_mapping(em_tree
, block_group
->key
.objectid
,
3365 BUG_ON(!em
); /* logic error, can't happen */
3367 * remove_extent_mapping() will delete us from the pinned_chunks
3368 * list, which is protected by the chunk mutex.
3370 remove_extent_mapping(em_tree
, em
);
3371 write_unlock(&em_tree
->lock
);
3372 unlock_chunks(block_group
->fs_info
->chunk_root
);
3374 /* once for us and once for the tree */
3375 free_extent_map(em
);
3376 free_extent_map(em
);
3379 * We've left one free space entry and other tasks trimming
3380 * this block group have left 1 entry each one. Free them.
3382 __btrfs_remove_free_space_cache(block_group
->free_space_ctl
);
3386 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
3387 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
3393 spin_lock(&block_group
->lock
);
3394 if (block_group
->removed
) {
3395 spin_unlock(&block_group
->lock
);
3398 btrfs_get_block_group_trimming(block_group
);
3399 spin_unlock(&block_group
->lock
);
3401 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
3405 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
3407 btrfs_put_block_group_trimming(block_group
);
3412 * Find the left-most item in the cache tree, and then return the
3413 * smallest inode number in the item.
3415 * Note: the returned inode number may not be the smallest one in
3416 * the tree, if the left-most item is a bitmap.
3418 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
3420 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
3421 struct btrfs_free_space
*entry
= NULL
;
3424 spin_lock(&ctl
->tree_lock
);
3426 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
3429 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
3430 struct btrfs_free_space
, offset_index
);
3432 if (!entry
->bitmap
) {
3433 ino
= entry
->offset
;
3435 unlink_free_space(ctl
, entry
);
3439 kmem_cache_free(btrfs_free_space_cachep
, entry
);
3441 link_free_space(ctl
, entry
);
3447 ret
= search_bitmap(ctl
, entry
, &offset
, &count
, true);
3448 /* Logic error; Should be empty if it can't find anything */
3452 bitmap_clear_bits(ctl
, entry
, offset
, 1);
3453 if (entry
->bytes
== 0)
3454 free_bitmap(ctl
, entry
);
3457 spin_unlock(&ctl
->tree_lock
);
3462 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
3463 struct btrfs_path
*path
)
3465 struct inode
*inode
= NULL
;
3467 spin_lock(&root
->ino_cache_lock
);
3468 if (root
->ino_cache_inode
)
3469 inode
= igrab(root
->ino_cache_inode
);
3470 spin_unlock(&root
->ino_cache_lock
);
3474 inode
= __lookup_free_space_inode(root
, path
, 0);
3478 spin_lock(&root
->ino_cache_lock
);
3479 if (!btrfs_fs_closing(root
->fs_info
))
3480 root
->ino_cache_inode
= igrab(inode
);
3481 spin_unlock(&root
->ino_cache_lock
);
3486 int create_free_ino_inode(struct btrfs_root
*root
,
3487 struct btrfs_trans_handle
*trans
,
3488 struct btrfs_path
*path
)
3490 return __create_free_space_inode(root
, trans
, path
,
3491 BTRFS_FREE_INO_OBJECTID
, 0);
3494 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
3496 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3497 struct btrfs_path
*path
;
3498 struct inode
*inode
;
3500 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
3502 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
3506 * If we're unmounting then just return, since this does a search on the
3507 * normal root and not the commit root and we could deadlock.
3509 if (btrfs_fs_closing(fs_info
))
3512 path
= btrfs_alloc_path();
3516 inode
= lookup_free_ino_inode(root
, path
);
3520 if (root_gen
!= BTRFS_I(inode
)->generation
)
3523 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
3527 "failed to load free ino cache for root %llu",
3528 root
->root_key
.objectid
);
3532 btrfs_free_path(path
);
3536 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
3537 struct btrfs_trans_handle
*trans
,
3538 struct btrfs_path
*path
,
3539 struct inode
*inode
)
3541 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
3543 struct btrfs_io_ctl io_ctl
;
3544 bool release_metadata
= true;
3546 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
3549 memset(&io_ctl
, 0, sizeof(io_ctl
));
3550 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, &io_ctl
,
3554 * At this point writepages() didn't error out, so our metadata
3555 * reservation is released when the writeback finishes, at
3556 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
3557 * with or without an error.
3559 release_metadata
= false;
3560 ret
= btrfs_wait_cache_io(root
, trans
, NULL
, &io_ctl
, path
, 0);
3564 if (release_metadata
)
3565 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
3567 btrfs_err(root
->fs_info
,
3568 "failed to write free ino cache for root %llu",
3569 root
->root_key
.objectid
);
3576 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3578 * Use this if you need to make a bitmap or extent entry specifically, it
3579 * doesn't do any of the merging that add_free_space does, this acts a lot like
3580 * how the free space cache loading stuff works, so you can get really weird
3583 int test_add_free_space_entry(struct btrfs_block_group_cache
*cache
,
3584 u64 offset
, u64 bytes
, bool bitmap
)
3586 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3587 struct btrfs_free_space
*info
= NULL
, *bitmap_info
;
3594 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
3600 spin_lock(&ctl
->tree_lock
);
3601 info
->offset
= offset
;
3602 info
->bytes
= bytes
;
3603 info
->max_extent_size
= 0;
3604 ret
= link_free_space(ctl
, info
);
3605 spin_unlock(&ctl
->tree_lock
);
3607 kmem_cache_free(btrfs_free_space_cachep
, info
);
3612 map
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
3614 kmem_cache_free(btrfs_free_space_cachep
, info
);
3619 spin_lock(&ctl
->tree_lock
);
3620 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3625 add_new_bitmap(ctl
, info
, offset
);
3630 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
3632 bytes
-= bytes_added
;
3633 offset
+= bytes_added
;
3634 spin_unlock(&ctl
->tree_lock
);
3640 kmem_cache_free(btrfs_free_space_cachep
, info
);
3647 * Checks to see if the given range is in the free space cache. This is really
3648 * just used to check the absence of space, so if there is free space in the
3649 * range at all we will return 1.
3651 int test_check_exists(struct btrfs_block_group_cache
*cache
,
3652 u64 offset
, u64 bytes
)
3654 struct btrfs_free_space_ctl
*ctl
= cache
->free_space_ctl
;
3655 struct btrfs_free_space
*info
;
3658 spin_lock(&ctl
->tree_lock
);
3659 info
= tree_search_offset(ctl
, offset
, 0, 0);
3661 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
3669 u64 bit_off
, bit_bytes
;
3671 struct btrfs_free_space
*tmp
;
3674 bit_bytes
= ctl
->unit
;
3675 ret
= search_bitmap(ctl
, info
, &bit_off
, &bit_bytes
, false);
3677 if (bit_off
== offset
) {
3680 } else if (bit_off
> offset
&&
3681 offset
+ bytes
> bit_off
) {
3687 n
= rb_prev(&info
->offset_index
);
3689 tmp
= rb_entry(n
, struct btrfs_free_space
,
3691 if (tmp
->offset
+ tmp
->bytes
< offset
)
3693 if (offset
+ bytes
< tmp
->offset
) {
3694 n
= rb_prev(&info
->offset_index
);
3701 n
= rb_next(&info
->offset_index
);
3703 tmp
= rb_entry(n
, struct btrfs_free_space
,
3705 if (offset
+ bytes
< tmp
->offset
)
3707 if (tmp
->offset
+ tmp
->bytes
< offset
) {
3708 n
= rb_next(&info
->offset_index
);
3719 if (info
->offset
== offset
) {
3724 if (offset
> info
->offset
&& offset
< info
->offset
+ info
->bytes
)
3727 spin_unlock(&ctl
->tree_lock
);
3730 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */