1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/writeback.h>
9 #include <linux/sched/mm.h>
12 #include "transaction.h"
13 #include "btrfs_inode.h"
14 #include "extent_io.h"
16 #include "compression.h"
17 #include "delalloc-space.h"
20 static struct kmem_cache
*btrfs_ordered_extent_cache
;
22 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
24 if (entry
->file_offset
+ entry
->num_bytes
< entry
->file_offset
)
26 return entry
->file_offset
+ entry
->num_bytes
;
29 /* returns NULL if the insertion worked, or it returns the node it did find
32 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
35 struct rb_node
**p
= &root
->rb_node
;
36 struct rb_node
*parent
= NULL
;
37 struct btrfs_ordered_extent
*entry
;
41 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
43 if (file_offset
< entry
->file_offset
)
45 else if (file_offset
>= entry_end(entry
))
51 rb_link_node(node
, parent
, p
);
52 rb_insert_color(node
, root
);
57 * look for a given offset in the tree, and if it can't be found return the
60 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
61 struct rb_node
**prev_ret
)
63 struct rb_node
*n
= root
->rb_node
;
64 struct rb_node
*prev
= NULL
;
66 struct btrfs_ordered_extent
*entry
;
67 struct btrfs_ordered_extent
*prev_entry
= NULL
;
70 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
74 if (file_offset
< entry
->file_offset
)
76 else if (file_offset
>= entry_end(entry
))
84 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
88 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
90 if (file_offset
< entry_end(prev_entry
))
96 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
98 while (prev
&& file_offset
< entry_end(prev_entry
)) {
102 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
111 * helper to check if a given offset is inside a given entry
113 static int offset_in_entry(struct btrfs_ordered_extent
*entry
, u64 file_offset
)
115 if (file_offset
< entry
->file_offset
||
116 entry
->file_offset
+ entry
->num_bytes
<= file_offset
)
121 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
124 if (file_offset
+ len
<= entry
->file_offset
||
125 entry
->file_offset
+ entry
->num_bytes
<= file_offset
)
131 * look find the first ordered struct that has this offset, otherwise
132 * the first one less than this offset
134 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
137 struct rb_root
*root
= &tree
->tree
;
138 struct rb_node
*prev
= NULL
;
140 struct btrfs_ordered_extent
*entry
;
143 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
145 if (offset_in_entry(entry
, file_offset
))
148 ret
= __tree_search(root
, file_offset
, &prev
);
157 * Allocate and add a new ordered_extent into the per-inode tree.
159 * The tree is given a single reference on the ordered extent that was
162 static int __btrfs_add_ordered_extent(struct btrfs_inode
*inode
, u64 file_offset
,
163 u64 disk_bytenr
, u64 num_bytes
,
164 u64 disk_num_bytes
, int type
, int dio
,
167 struct btrfs_root
*root
= inode
->root
;
168 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
169 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
170 struct rb_node
*node
;
171 struct btrfs_ordered_extent
*entry
;
174 if (type
== BTRFS_ORDERED_NOCOW
|| type
== BTRFS_ORDERED_PREALLOC
) {
175 /* For nocow write, we can release the qgroup rsv right now */
176 ret
= btrfs_qgroup_free_data(inode
, NULL
, file_offset
, num_bytes
);
182 * The ordered extent has reserved qgroup space, release now
183 * and pass the reserved number for qgroup_record to free.
185 ret
= btrfs_qgroup_release_data(inode
, file_offset
, num_bytes
);
189 entry
= kmem_cache_zalloc(btrfs_ordered_extent_cache
, GFP_NOFS
);
193 entry
->file_offset
= file_offset
;
194 entry
->disk_bytenr
= disk_bytenr
;
195 entry
->num_bytes
= num_bytes
;
196 entry
->disk_num_bytes
= disk_num_bytes
;
197 entry
->bytes_left
= num_bytes
;
198 entry
->inode
= igrab(&inode
->vfs_inode
);
199 entry
->compress_type
= compress_type
;
200 entry
->truncated_len
= (u64
)-1;
201 entry
->qgroup_rsv
= ret
;
202 if (type
!= BTRFS_ORDERED_IO_DONE
&& type
!= BTRFS_ORDERED_COMPLETE
)
203 set_bit(type
, &entry
->flags
);
206 percpu_counter_add_batch(&fs_info
->dio_bytes
, num_bytes
,
207 fs_info
->delalloc_batch
);
208 set_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
);
211 /* one ref for the tree */
212 refcount_set(&entry
->refs
, 1);
213 init_waitqueue_head(&entry
->wait
);
214 INIT_LIST_HEAD(&entry
->list
);
215 INIT_LIST_HEAD(&entry
->log_list
);
216 INIT_LIST_HEAD(&entry
->root_extent_list
);
217 INIT_LIST_HEAD(&entry
->work_list
);
218 init_completion(&entry
->completion
);
220 trace_btrfs_ordered_extent_add(inode
, entry
);
222 spin_lock_irq(&tree
->lock
);
223 node
= tree_insert(&tree
->tree
, file_offset
,
226 btrfs_panic(fs_info
, -EEXIST
,
227 "inconsistency in ordered tree at offset %llu",
229 spin_unlock_irq(&tree
->lock
);
231 spin_lock(&root
->ordered_extent_lock
);
232 list_add_tail(&entry
->root_extent_list
,
233 &root
->ordered_extents
);
234 root
->nr_ordered_extents
++;
235 if (root
->nr_ordered_extents
== 1) {
236 spin_lock(&fs_info
->ordered_root_lock
);
237 BUG_ON(!list_empty(&root
->ordered_root
));
238 list_add_tail(&root
->ordered_root
, &fs_info
->ordered_roots
);
239 spin_unlock(&fs_info
->ordered_root_lock
);
241 spin_unlock(&root
->ordered_extent_lock
);
244 * We don't need the count_max_extents here, we can assume that all of
245 * that work has been done at higher layers, so this is truly the
246 * smallest the extent is going to get.
248 spin_lock(&inode
->lock
);
249 btrfs_mod_outstanding_extents(inode
, 1);
250 spin_unlock(&inode
->lock
);
255 int btrfs_add_ordered_extent(struct btrfs_inode
*inode
, u64 file_offset
,
256 u64 disk_bytenr
, u64 num_bytes
, u64 disk_num_bytes
,
259 return __btrfs_add_ordered_extent(inode
, file_offset
, disk_bytenr
,
260 num_bytes
, disk_num_bytes
, type
, 0,
261 BTRFS_COMPRESS_NONE
);
264 int btrfs_add_ordered_extent_dio(struct btrfs_inode
*inode
, u64 file_offset
,
265 u64 disk_bytenr
, u64 num_bytes
,
266 u64 disk_num_bytes
, int type
)
268 return __btrfs_add_ordered_extent(inode
, file_offset
, disk_bytenr
,
269 num_bytes
, disk_num_bytes
, type
, 1,
270 BTRFS_COMPRESS_NONE
);
273 int btrfs_add_ordered_extent_compress(struct btrfs_inode
*inode
, u64 file_offset
,
274 u64 disk_bytenr
, u64 num_bytes
,
275 u64 disk_num_bytes
, int type
,
278 return __btrfs_add_ordered_extent(inode
, file_offset
, disk_bytenr
,
279 num_bytes
, disk_num_bytes
, type
, 0,
284 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
285 * when an ordered extent is finished. If the list covers more than one
286 * ordered extent, it is split across multiples.
288 void btrfs_add_ordered_sum(struct btrfs_ordered_extent
*entry
,
289 struct btrfs_ordered_sum
*sum
)
291 struct btrfs_ordered_inode_tree
*tree
;
293 tree
= &BTRFS_I(entry
->inode
)->ordered_tree
;
294 spin_lock_irq(&tree
->lock
);
295 list_add_tail(&sum
->list
, &entry
->list
);
296 spin_unlock_irq(&tree
->lock
);
300 * this is used to account for finished IO across a given range
301 * of the file. The IO may span ordered extents. If
302 * a given ordered_extent is completely done, 1 is returned, otherwise
305 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
306 * to make sure this function only returns 1 once for a given ordered extent.
308 * file_offset is updated to one byte past the range that is recorded as
309 * complete. This allows you to walk forward in the file.
311 int btrfs_dec_test_first_ordered_pending(struct btrfs_inode
*inode
,
312 struct btrfs_ordered_extent
**cached
,
313 u64
*file_offset
, u64 io_size
, int uptodate
)
315 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
316 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
317 struct rb_node
*node
;
318 struct btrfs_ordered_extent
*entry
= NULL
;
325 spin_lock_irqsave(&tree
->lock
, flags
);
326 node
= tree_search(tree
, *file_offset
);
332 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
333 if (!offset_in_entry(entry
, *file_offset
)) {
338 dec_start
= max(*file_offset
, entry
->file_offset
);
339 dec_end
= min(*file_offset
+ io_size
,
340 entry
->file_offset
+ entry
->num_bytes
);
341 *file_offset
= dec_end
;
342 if (dec_start
> dec_end
) {
343 btrfs_crit(fs_info
, "bad ordering dec_start %llu end %llu",
346 to_dec
= dec_end
- dec_start
;
347 if (to_dec
> entry
->bytes_left
) {
349 "bad ordered accounting left %llu size %llu",
350 entry
->bytes_left
, to_dec
);
352 entry
->bytes_left
-= to_dec
;
354 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
356 if (entry
->bytes_left
== 0) {
357 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
358 /* test_and_set_bit implies a barrier */
359 cond_wake_up_nomb(&entry
->wait
);
364 if (!ret
&& cached
&& entry
) {
366 refcount_inc(&entry
->refs
);
368 spin_unlock_irqrestore(&tree
->lock
, flags
);
373 * this is used to account for finished IO across a given range
374 * of the file. The IO should not span ordered extents. If
375 * a given ordered_extent is completely done, 1 is returned, otherwise
378 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
379 * to make sure this function only returns 1 once for a given ordered extent.
381 int btrfs_dec_test_ordered_pending(struct btrfs_inode
*inode
,
382 struct btrfs_ordered_extent
**cached
,
383 u64 file_offset
, u64 io_size
, int uptodate
)
385 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
386 struct rb_node
*node
;
387 struct btrfs_ordered_extent
*entry
= NULL
;
391 spin_lock_irqsave(&tree
->lock
, flags
);
392 if (cached
&& *cached
) {
397 node
= tree_search(tree
, file_offset
);
403 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
405 if (!offset_in_entry(entry
, file_offset
)) {
410 if (io_size
> entry
->bytes_left
) {
411 btrfs_crit(inode
->root
->fs_info
,
412 "bad ordered accounting left %llu size %llu",
413 entry
->bytes_left
, io_size
);
415 entry
->bytes_left
-= io_size
;
417 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
419 if (entry
->bytes_left
== 0) {
420 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
421 /* test_and_set_bit implies a barrier */
422 cond_wake_up_nomb(&entry
->wait
);
427 if (!ret
&& cached
&& entry
) {
429 refcount_inc(&entry
->refs
);
431 spin_unlock_irqrestore(&tree
->lock
, flags
);
436 * used to drop a reference on an ordered extent. This will free
437 * the extent if the last reference is dropped
439 void btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
441 struct list_head
*cur
;
442 struct btrfs_ordered_sum
*sum
;
444 trace_btrfs_ordered_extent_put(BTRFS_I(entry
->inode
), entry
);
446 if (refcount_dec_and_test(&entry
->refs
)) {
447 ASSERT(list_empty(&entry
->root_extent_list
));
448 ASSERT(list_empty(&entry
->log_list
));
449 ASSERT(RB_EMPTY_NODE(&entry
->rb_node
));
451 btrfs_add_delayed_iput(entry
->inode
);
452 while (!list_empty(&entry
->list
)) {
453 cur
= entry
->list
.next
;
454 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
455 list_del(&sum
->list
);
458 kmem_cache_free(btrfs_ordered_extent_cache
, entry
);
463 * remove an ordered extent from the tree. No references are dropped
464 * and waiters are woken up.
466 void btrfs_remove_ordered_extent(struct btrfs_inode
*btrfs_inode
,
467 struct btrfs_ordered_extent
*entry
)
469 struct btrfs_ordered_inode_tree
*tree
;
470 struct btrfs_root
*root
= btrfs_inode
->root
;
471 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
472 struct rb_node
*node
;
475 /* This is paired with btrfs_add_ordered_extent. */
476 spin_lock(&btrfs_inode
->lock
);
477 btrfs_mod_outstanding_extents(btrfs_inode
, -1);
478 spin_unlock(&btrfs_inode
->lock
);
479 if (root
!= fs_info
->tree_root
)
480 btrfs_delalloc_release_metadata(btrfs_inode
, entry
->num_bytes
,
483 if (test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
484 percpu_counter_add_batch(&fs_info
->dio_bytes
, -entry
->num_bytes
,
485 fs_info
->delalloc_batch
);
487 tree
= &btrfs_inode
->ordered_tree
;
488 spin_lock_irq(&tree
->lock
);
489 node
= &entry
->rb_node
;
490 rb_erase(node
, &tree
->tree
);
492 if (tree
->last
== node
)
494 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
495 pending
= test_and_clear_bit(BTRFS_ORDERED_PENDING
, &entry
->flags
);
496 spin_unlock_irq(&tree
->lock
);
499 * The current running transaction is waiting on us, we need to let it
500 * know that we're complete and wake it up.
503 struct btrfs_transaction
*trans
;
506 * The checks for trans are just a formality, it should be set,
507 * but if it isn't we don't want to deref/assert under the spin
508 * lock, so be nice and check if trans is set, but ASSERT() so
509 * if it isn't set a developer will notice.
511 spin_lock(&fs_info
->trans_lock
);
512 trans
= fs_info
->running_transaction
;
514 refcount_inc(&trans
->use_count
);
515 spin_unlock(&fs_info
->trans_lock
);
519 if (atomic_dec_and_test(&trans
->pending_ordered
))
520 wake_up(&trans
->pending_wait
);
521 btrfs_put_transaction(trans
);
525 spin_lock(&root
->ordered_extent_lock
);
526 list_del_init(&entry
->root_extent_list
);
527 root
->nr_ordered_extents
--;
529 trace_btrfs_ordered_extent_remove(btrfs_inode
, entry
);
531 if (!root
->nr_ordered_extents
) {
532 spin_lock(&fs_info
->ordered_root_lock
);
533 BUG_ON(list_empty(&root
->ordered_root
));
534 list_del_init(&root
->ordered_root
);
535 spin_unlock(&fs_info
->ordered_root_lock
);
537 spin_unlock(&root
->ordered_extent_lock
);
538 wake_up(&entry
->wait
);
541 static void btrfs_run_ordered_extent_work(struct btrfs_work
*work
)
543 struct btrfs_ordered_extent
*ordered
;
545 ordered
= container_of(work
, struct btrfs_ordered_extent
, flush_work
);
546 btrfs_start_ordered_extent(ordered
, 1);
547 complete(&ordered
->completion
);
551 * wait for all the ordered extents in a root. This is done when balancing
552 * space between drives.
554 u64
btrfs_wait_ordered_extents(struct btrfs_root
*root
, u64 nr
,
555 const u64 range_start
, const u64 range_len
)
557 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
561 struct btrfs_ordered_extent
*ordered
, *next
;
563 const u64 range_end
= range_start
+ range_len
;
565 mutex_lock(&root
->ordered_extent_mutex
);
566 spin_lock(&root
->ordered_extent_lock
);
567 list_splice_init(&root
->ordered_extents
, &splice
);
568 while (!list_empty(&splice
) && nr
) {
569 ordered
= list_first_entry(&splice
, struct btrfs_ordered_extent
,
572 if (range_end
<= ordered
->disk_bytenr
||
573 ordered
->disk_bytenr
+ ordered
->disk_num_bytes
<= range_start
) {
574 list_move_tail(&ordered
->root_extent_list
, &skipped
);
575 cond_resched_lock(&root
->ordered_extent_lock
);
579 list_move_tail(&ordered
->root_extent_list
,
580 &root
->ordered_extents
);
581 refcount_inc(&ordered
->refs
);
582 spin_unlock(&root
->ordered_extent_lock
);
584 btrfs_init_work(&ordered
->flush_work
,
585 btrfs_run_ordered_extent_work
, NULL
, NULL
);
586 list_add_tail(&ordered
->work_list
, &works
);
587 btrfs_queue_work(fs_info
->flush_workers
, &ordered
->flush_work
);
590 spin_lock(&root
->ordered_extent_lock
);
595 list_splice_tail(&skipped
, &root
->ordered_extents
);
596 list_splice_tail(&splice
, &root
->ordered_extents
);
597 spin_unlock(&root
->ordered_extent_lock
);
599 list_for_each_entry_safe(ordered
, next
, &works
, work_list
) {
600 list_del_init(&ordered
->work_list
);
601 wait_for_completion(&ordered
->completion
);
602 btrfs_put_ordered_extent(ordered
);
605 mutex_unlock(&root
->ordered_extent_mutex
);
610 void btrfs_wait_ordered_roots(struct btrfs_fs_info
*fs_info
, u64 nr
,
611 const u64 range_start
, const u64 range_len
)
613 struct btrfs_root
*root
;
614 struct list_head splice
;
617 INIT_LIST_HEAD(&splice
);
619 mutex_lock(&fs_info
->ordered_operations_mutex
);
620 spin_lock(&fs_info
->ordered_root_lock
);
621 list_splice_init(&fs_info
->ordered_roots
, &splice
);
622 while (!list_empty(&splice
) && nr
) {
623 root
= list_first_entry(&splice
, struct btrfs_root
,
625 root
= btrfs_grab_root(root
);
627 list_move_tail(&root
->ordered_root
,
628 &fs_info
->ordered_roots
);
629 spin_unlock(&fs_info
->ordered_root_lock
);
631 done
= btrfs_wait_ordered_extents(root
, nr
,
632 range_start
, range_len
);
633 btrfs_put_root(root
);
635 spin_lock(&fs_info
->ordered_root_lock
);
640 list_splice_tail(&splice
, &fs_info
->ordered_roots
);
641 spin_unlock(&fs_info
->ordered_root_lock
);
642 mutex_unlock(&fs_info
->ordered_operations_mutex
);
646 * Used to start IO or wait for a given ordered extent to finish.
648 * If wait is one, this effectively waits on page writeback for all the pages
649 * in the extent, and it waits on the io completion code to insert
650 * metadata into the btree corresponding to the extent
652 void btrfs_start_ordered_extent(struct btrfs_ordered_extent
*entry
, int wait
)
654 u64 start
= entry
->file_offset
;
655 u64 end
= start
+ entry
->num_bytes
- 1;
656 struct btrfs_inode
*inode
= BTRFS_I(entry
->inode
);
658 trace_btrfs_ordered_extent_start(inode
, entry
);
661 * pages in the range can be dirty, clean or writeback. We
662 * start IO on any dirty ones so the wait doesn't stall waiting
663 * for the flusher thread to find them
665 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
666 filemap_fdatawrite_range(inode
->vfs_inode
.i_mapping
, start
, end
);
668 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
674 * Used to wait on ordered extents across a large range of bytes.
676 int btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
682 struct btrfs_ordered_extent
*ordered
;
684 if (start
+ len
< start
) {
685 orig_end
= INT_LIMIT(loff_t
);
687 orig_end
= start
+ len
- 1;
688 if (orig_end
> INT_LIMIT(loff_t
))
689 orig_end
= INT_LIMIT(loff_t
);
692 /* start IO across the range first to instantiate any delalloc
695 ret
= btrfs_fdatawrite_range(inode
, start
, orig_end
);
700 * If we have a writeback error don't return immediately. Wait first
701 * for any ordered extents that haven't completed yet. This is to make
702 * sure no one can dirty the same page ranges and call writepages()
703 * before the ordered extents complete - to avoid failures (-EEXIST)
704 * when adding the new ordered extents to the ordered tree.
706 ret_wb
= filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
710 ordered
= btrfs_lookup_first_ordered_extent(BTRFS_I(inode
), end
);
713 if (ordered
->file_offset
> orig_end
) {
714 btrfs_put_ordered_extent(ordered
);
717 if (ordered
->file_offset
+ ordered
->num_bytes
<= start
) {
718 btrfs_put_ordered_extent(ordered
);
721 btrfs_start_ordered_extent(ordered
, 1);
722 end
= ordered
->file_offset
;
724 * If the ordered extent had an error save the error but don't
725 * exit without waiting first for all other ordered extents in
726 * the range to complete.
728 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
))
730 btrfs_put_ordered_extent(ordered
);
731 if (end
== 0 || end
== start
)
735 return ret_wb
? ret_wb
: ret
;
739 * find an ordered extent corresponding to file_offset. return NULL if
740 * nothing is found, otherwise take a reference on the extent and return it
742 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct btrfs_inode
*inode
,
745 struct btrfs_ordered_inode_tree
*tree
;
746 struct rb_node
*node
;
747 struct btrfs_ordered_extent
*entry
= NULL
;
749 tree
= &inode
->ordered_tree
;
750 spin_lock_irq(&tree
->lock
);
751 node
= tree_search(tree
, file_offset
);
755 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
756 if (!offset_in_entry(entry
, file_offset
))
759 refcount_inc(&entry
->refs
);
761 spin_unlock_irq(&tree
->lock
);
765 /* Since the DIO code tries to lock a wide area we need to look for any ordered
766 * extents that exist in the range, rather than just the start of the range.
768 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(
769 struct btrfs_inode
*inode
, u64 file_offset
, u64 len
)
771 struct btrfs_ordered_inode_tree
*tree
;
772 struct rb_node
*node
;
773 struct btrfs_ordered_extent
*entry
= NULL
;
775 tree
= &inode
->ordered_tree
;
776 spin_lock_irq(&tree
->lock
);
777 node
= tree_search(tree
, file_offset
);
779 node
= tree_search(tree
, file_offset
+ len
);
785 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
786 if (range_overlaps(entry
, file_offset
, len
))
789 if (entry
->file_offset
>= file_offset
+ len
) {
794 node
= rb_next(node
);
800 refcount_inc(&entry
->refs
);
801 spin_unlock_irq(&tree
->lock
);
806 * Adds all ordered extents to the given list. The list ends up sorted by the
807 * file_offset of the ordered extents.
809 void btrfs_get_ordered_extents_for_logging(struct btrfs_inode
*inode
,
810 struct list_head
*list
)
812 struct btrfs_ordered_inode_tree
*tree
= &inode
->ordered_tree
;
815 ASSERT(inode_is_locked(&inode
->vfs_inode
));
817 spin_lock_irq(&tree
->lock
);
818 for (n
= rb_first(&tree
->tree
); n
; n
= rb_next(n
)) {
819 struct btrfs_ordered_extent
*ordered
;
821 ordered
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
823 if (test_bit(BTRFS_ORDERED_LOGGED
, &ordered
->flags
))
826 ASSERT(list_empty(&ordered
->log_list
));
827 list_add_tail(&ordered
->log_list
, list
);
828 refcount_inc(&ordered
->refs
);
830 spin_unlock_irq(&tree
->lock
);
834 * lookup and return any extent before 'file_offset'. NULL is returned
837 struct btrfs_ordered_extent
*
838 btrfs_lookup_first_ordered_extent(struct btrfs_inode
*inode
, u64 file_offset
)
840 struct btrfs_ordered_inode_tree
*tree
;
841 struct rb_node
*node
;
842 struct btrfs_ordered_extent
*entry
= NULL
;
844 tree
= &inode
->ordered_tree
;
845 spin_lock_irq(&tree
->lock
);
846 node
= tree_search(tree
, file_offset
);
850 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
851 refcount_inc(&entry
->refs
);
853 spin_unlock_irq(&tree
->lock
);
858 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
859 * ordered extents in it are run to completion.
861 * @inode: Inode whose ordered tree is to be searched
862 * @start: Beginning of range to flush
863 * @end: Last byte of range to lock
864 * @cached_state: If passed, will return the extent state responsible for the
865 * locked range. It's the caller's responsibility to free the cached state.
867 * This function always returns with the given range locked, ensuring after it's
868 * called no order extent can be pending.
870 void btrfs_lock_and_flush_ordered_range(struct btrfs_inode
*inode
, u64 start
,
872 struct extent_state
**cached_state
)
874 struct btrfs_ordered_extent
*ordered
;
875 struct extent_state
*cache
= NULL
;
876 struct extent_state
**cachedp
= &cache
;
879 cachedp
= cached_state
;
882 lock_extent_bits(&inode
->io_tree
, start
, end
, cachedp
);
883 ordered
= btrfs_lookup_ordered_range(inode
, start
,
887 * If no external cached_state has been passed then
888 * decrement the extra ref taken for cachedp since we
889 * aren't exposing it outside of this function
892 refcount_dec(&cache
->refs
);
895 unlock_extent_cached(&inode
->io_tree
, start
, end
, cachedp
);
896 btrfs_start_ordered_extent(ordered
, 1);
897 btrfs_put_ordered_extent(ordered
);
901 int __init
ordered_data_init(void)
903 btrfs_ordered_extent_cache
= kmem_cache_create("btrfs_ordered_extent",
904 sizeof(struct btrfs_ordered_extent
), 0,
907 if (!btrfs_ordered_extent_cache
)
913 void __cold
ordered_data_exit(void)
915 kmem_cache_destroy(btrfs_ordered_extent_cache
);