2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
29 static struct kmem_cache
*btrfs_ordered_extent_cache
;
31 static u64
entry_end(struct btrfs_ordered_extent
*entry
)
33 if (entry
->file_offset
+ entry
->len
< entry
->file_offset
)
35 return entry
->file_offset
+ entry
->len
;
38 /* returns NULL if the insertion worked, or it returns the node it did find
41 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 file_offset
,
44 struct rb_node
**p
= &root
->rb_node
;
45 struct rb_node
*parent
= NULL
;
46 struct btrfs_ordered_extent
*entry
;
50 entry
= rb_entry(parent
, struct btrfs_ordered_extent
, rb_node
);
52 if (file_offset
< entry
->file_offset
)
54 else if (file_offset
>= entry_end(entry
))
60 rb_link_node(node
, parent
, p
);
61 rb_insert_color(node
, root
);
65 static void ordered_data_tree_panic(struct inode
*inode
, int errno
,
68 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
69 btrfs_panic(fs_info
, errno
, "Inconsistency in ordered tree at offset "
74 * look for a given offset in the tree, and if it can't be found return the
77 static struct rb_node
*__tree_search(struct rb_root
*root
, u64 file_offset
,
78 struct rb_node
**prev_ret
)
80 struct rb_node
*n
= root
->rb_node
;
81 struct rb_node
*prev
= NULL
;
83 struct btrfs_ordered_extent
*entry
;
84 struct btrfs_ordered_extent
*prev_entry
= NULL
;
87 entry
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
91 if (file_offset
< entry
->file_offset
)
93 else if (file_offset
>= entry_end(entry
))
101 while (prev
&& file_offset
>= entry_end(prev_entry
)) {
102 test
= rb_next(prev
);
105 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
107 if (file_offset
< entry_end(prev_entry
))
113 prev_entry
= rb_entry(prev
, struct btrfs_ordered_extent
,
115 while (prev
&& file_offset
< entry_end(prev_entry
)) {
116 test
= rb_prev(prev
);
119 prev_entry
= rb_entry(test
, struct btrfs_ordered_extent
,
128 * helper to check if a given offset is inside a given entry
130 static int offset_in_entry(struct btrfs_ordered_extent
*entry
, u64 file_offset
)
132 if (file_offset
< entry
->file_offset
||
133 entry
->file_offset
+ entry
->len
<= file_offset
)
138 static int range_overlaps(struct btrfs_ordered_extent
*entry
, u64 file_offset
,
141 if (file_offset
+ len
<= entry
->file_offset
||
142 entry
->file_offset
+ entry
->len
<= file_offset
)
148 * look find the first ordered struct that has this offset, otherwise
149 * the first one less than this offset
151 static inline struct rb_node
*tree_search(struct btrfs_ordered_inode_tree
*tree
,
154 struct rb_root
*root
= &tree
->tree
;
155 struct rb_node
*prev
= NULL
;
157 struct btrfs_ordered_extent
*entry
;
160 entry
= rb_entry(tree
->last
, struct btrfs_ordered_extent
,
162 if (offset_in_entry(entry
, file_offset
))
165 ret
= __tree_search(root
, file_offset
, &prev
);
173 /* allocate and add a new ordered_extent into the per-inode tree.
174 * file_offset is the logical offset in the file
176 * start is the disk block number of an extent already reserved in the
177 * extent allocation tree
179 * len is the length of the extent
181 * The tree is given a single reference on the ordered extent that was
184 static int __btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
185 u64 start
, u64 len
, u64 disk_len
,
186 int type
, int dio
, int compress_type
)
188 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
189 struct btrfs_ordered_inode_tree
*tree
;
190 struct rb_node
*node
;
191 struct btrfs_ordered_extent
*entry
;
193 tree
= &BTRFS_I(inode
)->ordered_tree
;
194 entry
= kmem_cache_zalloc(btrfs_ordered_extent_cache
, GFP_NOFS
);
198 entry
->file_offset
= file_offset
;
199 entry
->start
= start
;
201 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
) &&
202 !(type
== BTRFS_ORDERED_NOCOW
))
203 entry
->csum_bytes_left
= disk_len
;
204 entry
->disk_len
= disk_len
;
205 entry
->bytes_left
= len
;
206 entry
->inode
= igrab(inode
);
207 entry
->compress_type
= compress_type
;
208 entry
->truncated_len
= (u64
)-1;
209 if (type
!= BTRFS_ORDERED_IO_DONE
&& type
!= BTRFS_ORDERED_COMPLETE
)
210 set_bit(type
, &entry
->flags
);
213 set_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
);
215 /* one ref for the tree */
216 atomic_set(&entry
->refs
, 1);
217 init_waitqueue_head(&entry
->wait
);
218 INIT_LIST_HEAD(&entry
->list
);
219 INIT_LIST_HEAD(&entry
->root_extent_list
);
220 INIT_LIST_HEAD(&entry
->work_list
);
221 init_completion(&entry
->completion
);
222 INIT_LIST_HEAD(&entry
->log_list
);
224 trace_btrfs_ordered_extent_add(inode
, entry
);
226 spin_lock_irq(&tree
->lock
);
227 node
= tree_insert(&tree
->tree
, file_offset
,
230 ordered_data_tree_panic(inode
, -EEXIST
, file_offset
);
231 spin_unlock_irq(&tree
->lock
);
233 spin_lock(&root
->ordered_extent_lock
);
234 list_add_tail(&entry
->root_extent_list
,
235 &root
->ordered_extents
);
236 root
->nr_ordered_extents
++;
237 if (root
->nr_ordered_extents
== 1) {
238 spin_lock(&root
->fs_info
->ordered_root_lock
);
239 BUG_ON(!list_empty(&root
->ordered_root
));
240 list_add_tail(&root
->ordered_root
,
241 &root
->fs_info
->ordered_roots
);
242 spin_unlock(&root
->fs_info
->ordered_root_lock
);
244 spin_unlock(&root
->ordered_extent_lock
);
249 int btrfs_add_ordered_extent(struct inode
*inode
, u64 file_offset
,
250 u64 start
, u64 len
, u64 disk_len
, int type
)
252 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
254 BTRFS_COMPRESS_NONE
);
257 int btrfs_add_ordered_extent_dio(struct inode
*inode
, u64 file_offset
,
258 u64 start
, u64 len
, u64 disk_len
, int type
)
260 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
262 BTRFS_COMPRESS_NONE
);
265 int btrfs_add_ordered_extent_compress(struct inode
*inode
, u64 file_offset
,
266 u64 start
, u64 len
, u64 disk_len
,
267 int type
, int compress_type
)
269 return __btrfs_add_ordered_extent(inode
, file_offset
, start
, len
,
275 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
276 * when an ordered extent is finished. If the list covers more than one
277 * ordered extent, it is split across multiples.
279 void btrfs_add_ordered_sum(struct inode
*inode
,
280 struct btrfs_ordered_extent
*entry
,
281 struct btrfs_ordered_sum
*sum
)
283 struct btrfs_ordered_inode_tree
*tree
;
285 tree
= &BTRFS_I(inode
)->ordered_tree
;
286 spin_lock_irq(&tree
->lock
);
287 list_add_tail(&sum
->list
, &entry
->list
);
288 WARN_ON(entry
->csum_bytes_left
< sum
->len
);
289 entry
->csum_bytes_left
-= sum
->len
;
290 if (entry
->csum_bytes_left
== 0)
291 wake_up(&entry
->wait
);
292 spin_unlock_irq(&tree
->lock
);
296 * this is used to account for finished IO across a given range
297 * of the file. The IO may span ordered extents. If
298 * a given ordered_extent is completely done, 1 is returned, otherwise
301 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
302 * to make sure this function only returns 1 once for a given ordered extent.
304 * file_offset is updated to one byte past the range that is recorded as
305 * complete. This allows you to walk forward in the file.
307 int btrfs_dec_test_first_ordered_pending(struct inode
*inode
,
308 struct btrfs_ordered_extent
**cached
,
309 u64
*file_offset
, u64 io_size
, int uptodate
)
311 struct btrfs_ordered_inode_tree
*tree
;
312 struct rb_node
*node
;
313 struct btrfs_ordered_extent
*entry
= NULL
;
320 tree
= &BTRFS_I(inode
)->ordered_tree
;
321 spin_lock_irqsave(&tree
->lock
, flags
);
322 node
= tree_search(tree
, *file_offset
);
328 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
329 if (!offset_in_entry(entry
, *file_offset
)) {
334 dec_start
= max(*file_offset
, entry
->file_offset
);
335 dec_end
= min(*file_offset
+ io_size
, entry
->file_offset
+
337 *file_offset
= dec_end
;
338 if (dec_start
> dec_end
) {
339 btrfs_crit(BTRFS_I(inode
)->root
->fs_info
,
340 "bad ordering dec_start %llu end %llu", dec_start
, dec_end
);
342 to_dec
= dec_end
- dec_start
;
343 if (to_dec
> entry
->bytes_left
) {
344 btrfs_crit(BTRFS_I(inode
)->root
->fs_info
,
345 "bad ordered accounting left %llu size %llu",
346 entry
->bytes_left
, to_dec
);
348 entry
->bytes_left
-= to_dec
;
350 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
352 if (entry
->bytes_left
== 0)
353 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
357 if (!ret
&& cached
&& entry
) {
359 atomic_inc(&entry
->refs
);
361 spin_unlock_irqrestore(&tree
->lock
, flags
);
366 * this is used to account for finished IO across a given range
367 * of the file. The IO should not span ordered extents. If
368 * a given ordered_extent is completely done, 1 is returned, otherwise
371 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
372 * to make sure this function only returns 1 once for a given ordered extent.
374 int btrfs_dec_test_ordered_pending(struct inode
*inode
,
375 struct btrfs_ordered_extent
**cached
,
376 u64 file_offset
, u64 io_size
, int uptodate
)
378 struct btrfs_ordered_inode_tree
*tree
;
379 struct rb_node
*node
;
380 struct btrfs_ordered_extent
*entry
= NULL
;
384 tree
= &BTRFS_I(inode
)->ordered_tree
;
385 spin_lock_irqsave(&tree
->lock
, flags
);
386 if (cached
&& *cached
) {
391 node
= tree_search(tree
, file_offset
);
397 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
399 if (!offset_in_entry(entry
, file_offset
)) {
404 if (io_size
> entry
->bytes_left
) {
405 btrfs_crit(BTRFS_I(inode
)->root
->fs_info
,
406 "bad ordered accounting left %llu size %llu",
407 entry
->bytes_left
, io_size
);
409 entry
->bytes_left
-= io_size
;
411 set_bit(BTRFS_ORDERED_IOERR
, &entry
->flags
);
413 if (entry
->bytes_left
== 0)
414 ret
= test_and_set_bit(BTRFS_ORDERED_IO_DONE
, &entry
->flags
);
418 if (!ret
&& cached
&& entry
) {
420 atomic_inc(&entry
->refs
);
422 spin_unlock_irqrestore(&tree
->lock
, flags
);
426 /* Needs to either be called under a log transaction or the log_mutex */
427 void btrfs_get_logged_extents(struct btrfs_root
*log
, struct inode
*inode
)
429 struct btrfs_ordered_inode_tree
*tree
;
430 struct btrfs_ordered_extent
*ordered
;
432 int index
= log
->log_transid
% 2;
434 tree
= &BTRFS_I(inode
)->ordered_tree
;
435 spin_lock_irq(&tree
->lock
);
436 for (n
= rb_first(&tree
->tree
); n
; n
= rb_next(n
)) {
437 ordered
= rb_entry(n
, struct btrfs_ordered_extent
, rb_node
);
438 spin_lock(&log
->log_extents_lock
[index
]);
439 if (list_empty(&ordered
->log_list
)) {
440 list_add_tail(&ordered
->log_list
, &log
->logged_list
[index
]);
441 atomic_inc(&ordered
->refs
);
443 spin_unlock(&log
->log_extents_lock
[index
]);
445 spin_unlock_irq(&tree
->lock
);
448 void btrfs_wait_logged_extents(struct btrfs_root
*log
, u64 transid
)
450 struct btrfs_ordered_extent
*ordered
;
451 int index
= transid
% 2;
453 spin_lock_irq(&log
->log_extents_lock
[index
]);
454 while (!list_empty(&log
->logged_list
[index
])) {
455 ordered
= list_first_entry(&log
->logged_list
[index
],
456 struct btrfs_ordered_extent
,
458 list_del_init(&ordered
->log_list
);
459 spin_unlock_irq(&log
->log_extents_lock
[index
]);
460 wait_event(ordered
->wait
, test_bit(BTRFS_ORDERED_IO_DONE
,
462 btrfs_put_ordered_extent(ordered
);
463 spin_lock_irq(&log
->log_extents_lock
[index
]);
465 spin_unlock_irq(&log
->log_extents_lock
[index
]);
468 void btrfs_free_logged_extents(struct btrfs_root
*log
, u64 transid
)
470 struct btrfs_ordered_extent
*ordered
;
471 int index
= transid
% 2;
473 spin_lock_irq(&log
->log_extents_lock
[index
]);
474 while (!list_empty(&log
->logged_list
[index
])) {
475 ordered
= list_first_entry(&log
->logged_list
[index
],
476 struct btrfs_ordered_extent
,
478 list_del_init(&ordered
->log_list
);
479 spin_unlock_irq(&log
->log_extents_lock
[index
]);
480 btrfs_put_ordered_extent(ordered
);
481 spin_lock_irq(&log
->log_extents_lock
[index
]);
483 spin_unlock_irq(&log
->log_extents_lock
[index
]);
487 * used to drop a reference on an ordered extent. This will free
488 * the extent if the last reference is dropped
490 void btrfs_put_ordered_extent(struct btrfs_ordered_extent
*entry
)
492 struct list_head
*cur
;
493 struct btrfs_ordered_sum
*sum
;
495 trace_btrfs_ordered_extent_put(entry
->inode
, entry
);
497 if (atomic_dec_and_test(&entry
->refs
)) {
499 btrfs_add_delayed_iput(entry
->inode
);
500 while (!list_empty(&entry
->list
)) {
501 cur
= entry
->list
.next
;
502 sum
= list_entry(cur
, struct btrfs_ordered_sum
, list
);
503 list_del(&sum
->list
);
506 kmem_cache_free(btrfs_ordered_extent_cache
, entry
);
511 * remove an ordered extent from the tree. No references are dropped
512 * and waiters are woken up.
514 void btrfs_remove_ordered_extent(struct inode
*inode
,
515 struct btrfs_ordered_extent
*entry
)
517 struct btrfs_ordered_inode_tree
*tree
;
518 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
519 struct rb_node
*node
;
521 tree
= &BTRFS_I(inode
)->ordered_tree
;
522 spin_lock_irq(&tree
->lock
);
523 node
= &entry
->rb_node
;
524 rb_erase(node
, &tree
->tree
);
525 if (tree
->last
== node
)
527 set_bit(BTRFS_ORDERED_COMPLETE
, &entry
->flags
);
528 spin_unlock_irq(&tree
->lock
);
530 spin_lock(&root
->ordered_extent_lock
);
531 list_del_init(&entry
->root_extent_list
);
532 root
->nr_ordered_extents
--;
534 trace_btrfs_ordered_extent_remove(inode
, entry
);
537 * we have no more ordered extents for this inode and
538 * no dirty pages. We can safely remove it from the
539 * list of ordered extents
541 if (RB_EMPTY_ROOT(&tree
->tree
) &&
542 !mapping_tagged(inode
->i_mapping
, PAGECACHE_TAG_DIRTY
)) {
543 spin_lock(&root
->fs_info
->ordered_root_lock
);
544 list_del_init(&BTRFS_I(inode
)->ordered_operations
);
545 spin_unlock(&root
->fs_info
->ordered_root_lock
);
548 if (!root
->nr_ordered_extents
) {
549 spin_lock(&root
->fs_info
->ordered_root_lock
);
550 BUG_ON(list_empty(&root
->ordered_root
));
551 list_del_init(&root
->ordered_root
);
552 spin_unlock(&root
->fs_info
->ordered_root_lock
);
554 spin_unlock(&root
->ordered_extent_lock
);
555 wake_up(&entry
->wait
);
558 static void btrfs_run_ordered_extent_work(struct btrfs_work
*work
)
560 struct btrfs_ordered_extent
*ordered
;
562 ordered
= container_of(work
, struct btrfs_ordered_extent
, flush_work
);
563 btrfs_start_ordered_extent(ordered
->inode
, ordered
, 1);
564 complete(&ordered
->completion
);
568 * wait for all the ordered extents in a root. This is done when balancing
569 * space between drives.
571 int btrfs_wait_ordered_extents(struct btrfs_root
*root
, int nr
)
573 struct list_head splice
, works
;
574 struct btrfs_ordered_extent
*ordered
, *next
;
577 INIT_LIST_HEAD(&splice
);
578 INIT_LIST_HEAD(&works
);
580 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
581 spin_lock(&root
->ordered_extent_lock
);
582 list_splice_init(&root
->ordered_extents
, &splice
);
583 while (!list_empty(&splice
) && nr
) {
584 ordered
= list_first_entry(&splice
, struct btrfs_ordered_extent
,
586 list_move_tail(&ordered
->root_extent_list
,
587 &root
->ordered_extents
);
588 atomic_inc(&ordered
->refs
);
589 spin_unlock(&root
->ordered_extent_lock
);
591 ordered
->flush_work
.func
= btrfs_run_ordered_extent_work
;
592 list_add_tail(&ordered
->work_list
, &works
);
593 btrfs_queue_worker(&root
->fs_info
->flush_workers
,
594 &ordered
->flush_work
);
597 spin_lock(&root
->ordered_extent_lock
);
602 list_splice_tail(&splice
, &root
->ordered_extents
);
603 spin_unlock(&root
->ordered_extent_lock
);
605 list_for_each_entry_safe(ordered
, next
, &works
, work_list
) {
606 list_del_init(&ordered
->work_list
);
607 wait_for_completion(&ordered
->completion
);
608 btrfs_put_ordered_extent(ordered
);
611 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
616 void btrfs_wait_ordered_roots(struct btrfs_fs_info
*fs_info
, int nr
)
618 struct btrfs_root
*root
;
619 struct list_head splice
;
622 INIT_LIST_HEAD(&splice
);
624 spin_lock(&fs_info
->ordered_root_lock
);
625 list_splice_init(&fs_info
->ordered_roots
, &splice
);
626 while (!list_empty(&splice
) && nr
) {
627 root
= list_first_entry(&splice
, struct btrfs_root
,
629 root
= btrfs_grab_fs_root(root
);
631 list_move_tail(&root
->ordered_root
,
632 &fs_info
->ordered_roots
);
633 spin_unlock(&fs_info
->ordered_root_lock
);
635 done
= btrfs_wait_ordered_extents(root
, nr
);
636 btrfs_put_fs_root(root
);
638 spin_lock(&fs_info
->ordered_root_lock
);
644 list_splice_tail(&splice
, &fs_info
->ordered_roots
);
645 spin_unlock(&fs_info
->ordered_root_lock
);
649 * this is used during transaction commit to write all the inodes
650 * added to the ordered operation list. These files must be fully on
651 * disk before the transaction commits.
653 * we have two modes here, one is to just start the IO via filemap_flush
654 * and the other is to wait for all the io. When we wait, we have an
655 * extra check to make sure the ordered operation list really is empty
658 int btrfs_run_ordered_operations(struct btrfs_trans_handle
*trans
,
659 struct btrfs_root
*root
, int wait
)
661 struct btrfs_inode
*btrfs_inode
;
663 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
664 struct list_head splice
;
665 struct list_head works
;
666 struct btrfs_delalloc_work
*work
, *next
;
669 INIT_LIST_HEAD(&splice
);
670 INIT_LIST_HEAD(&works
);
672 mutex_lock(&root
->fs_info
->ordered_extent_flush_mutex
);
673 spin_lock(&root
->fs_info
->ordered_root_lock
);
674 list_splice_init(&cur_trans
->ordered_operations
, &splice
);
675 while (!list_empty(&splice
)) {
676 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
678 inode
= &btrfs_inode
->vfs_inode
;
680 list_del_init(&btrfs_inode
->ordered_operations
);
683 * the inode may be getting freed (in sys_unlink path).
685 inode
= igrab(inode
);
690 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
691 &cur_trans
->ordered_operations
);
692 spin_unlock(&root
->fs_info
->ordered_root_lock
);
694 work
= btrfs_alloc_delalloc_work(inode
, wait
, 1);
696 spin_lock(&root
->fs_info
->ordered_root_lock
);
697 if (list_empty(&BTRFS_I(inode
)->ordered_operations
))
698 list_add_tail(&btrfs_inode
->ordered_operations
,
700 list_splice_tail(&splice
,
701 &cur_trans
->ordered_operations
);
702 spin_unlock(&root
->fs_info
->ordered_root_lock
);
706 list_add_tail(&work
->list
, &works
);
707 btrfs_queue_worker(&root
->fs_info
->flush_workers
,
711 spin_lock(&root
->fs_info
->ordered_root_lock
);
713 spin_unlock(&root
->fs_info
->ordered_root_lock
);
715 list_for_each_entry_safe(work
, next
, &works
, list
) {
716 list_del_init(&work
->list
);
717 btrfs_wait_and_free_delalloc_work(work
);
719 mutex_unlock(&root
->fs_info
->ordered_extent_flush_mutex
);
724 * Used to start IO or wait for a given ordered extent to finish.
726 * If wait is one, this effectively waits on page writeback for all the pages
727 * in the extent, and it waits on the io completion code to insert
728 * metadata into the btree corresponding to the extent
730 void btrfs_start_ordered_extent(struct inode
*inode
,
731 struct btrfs_ordered_extent
*entry
,
734 u64 start
= entry
->file_offset
;
735 u64 end
= start
+ entry
->len
- 1;
737 trace_btrfs_ordered_extent_start(inode
, entry
);
740 * pages in the range can be dirty, clean or writeback. We
741 * start IO on any dirty ones so the wait doesn't stall waiting
742 * for the flusher thread to find them
744 if (!test_bit(BTRFS_ORDERED_DIRECT
, &entry
->flags
))
745 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
747 wait_event(entry
->wait
, test_bit(BTRFS_ORDERED_COMPLETE
,
753 * Used to wait on ordered extents across a large range of bytes.
755 int btrfs_wait_ordered_range(struct inode
*inode
, u64 start
, u64 len
)
760 struct btrfs_ordered_extent
*ordered
;
762 if (start
+ len
< start
) {
763 orig_end
= INT_LIMIT(loff_t
);
765 orig_end
= start
+ len
- 1;
766 if (orig_end
> INT_LIMIT(loff_t
))
767 orig_end
= INT_LIMIT(loff_t
);
770 /* start IO across the range first to instantiate any delalloc
773 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, orig_end
);
777 * So with compression we will find and lock a dirty page and clear the
778 * first one as dirty, setup an async extent, and immediately return
779 * with the entire range locked but with nobody actually marked with
780 * writeback. So we can't just filemap_write_and_wait_range() and
781 * expect it to work since it will just kick off a thread to do the
782 * actual work. So we need to call filemap_fdatawrite_range _again_
783 * since it will wait on the page lock, which won't be unlocked until
784 * after the pages have been marked as writeback and so we're good to go
785 * from there. We have to do this otherwise we'll miss the ordered
786 * extents and that results in badness. Please Josef, do not think you
787 * know better and pull this out at some point in the future, it is
788 * right and you are wrong.
790 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
791 &BTRFS_I(inode
)->runtime_flags
)) {
792 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
,
797 ret
= filemap_fdatawait_range(inode
->i_mapping
, start
, orig_end
);
803 ordered
= btrfs_lookup_first_ordered_extent(inode
, end
);
806 if (ordered
->file_offset
> orig_end
) {
807 btrfs_put_ordered_extent(ordered
);
810 if (ordered
->file_offset
+ ordered
->len
<= start
) {
811 btrfs_put_ordered_extent(ordered
);
814 btrfs_start_ordered_extent(inode
, ordered
, 1);
815 end
= ordered
->file_offset
;
816 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
))
818 btrfs_put_ordered_extent(ordered
);
819 if (ret
|| end
== 0 || end
== start
)
827 * find an ordered extent corresponding to file_offset. return NULL if
828 * nothing is found, otherwise take a reference on the extent and return it
830 struct btrfs_ordered_extent
*btrfs_lookup_ordered_extent(struct inode
*inode
,
833 struct btrfs_ordered_inode_tree
*tree
;
834 struct rb_node
*node
;
835 struct btrfs_ordered_extent
*entry
= NULL
;
837 tree
= &BTRFS_I(inode
)->ordered_tree
;
838 spin_lock_irq(&tree
->lock
);
839 node
= tree_search(tree
, file_offset
);
843 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
844 if (!offset_in_entry(entry
, file_offset
))
847 atomic_inc(&entry
->refs
);
849 spin_unlock_irq(&tree
->lock
);
853 /* Since the DIO code tries to lock a wide area we need to look for any ordered
854 * extents that exist in the range, rather than just the start of the range.
856 struct btrfs_ordered_extent
*btrfs_lookup_ordered_range(struct inode
*inode
,
860 struct btrfs_ordered_inode_tree
*tree
;
861 struct rb_node
*node
;
862 struct btrfs_ordered_extent
*entry
= NULL
;
864 tree
= &BTRFS_I(inode
)->ordered_tree
;
865 spin_lock_irq(&tree
->lock
);
866 node
= tree_search(tree
, file_offset
);
868 node
= tree_search(tree
, file_offset
+ len
);
874 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
875 if (range_overlaps(entry
, file_offset
, len
))
878 if (entry
->file_offset
>= file_offset
+ len
) {
883 node
= rb_next(node
);
889 atomic_inc(&entry
->refs
);
890 spin_unlock_irq(&tree
->lock
);
895 * lookup and return any extent before 'file_offset'. NULL is returned
898 struct btrfs_ordered_extent
*
899 btrfs_lookup_first_ordered_extent(struct inode
*inode
, u64 file_offset
)
901 struct btrfs_ordered_inode_tree
*tree
;
902 struct rb_node
*node
;
903 struct btrfs_ordered_extent
*entry
= NULL
;
905 tree
= &BTRFS_I(inode
)->ordered_tree
;
906 spin_lock_irq(&tree
->lock
);
907 node
= tree_search(tree
, file_offset
);
911 entry
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
912 atomic_inc(&entry
->refs
);
914 spin_unlock_irq(&tree
->lock
);
919 * After an extent is done, call this to conditionally update the on disk
920 * i_size. i_size is updated to cover any fully written part of the file.
922 int btrfs_ordered_update_i_size(struct inode
*inode
, u64 offset
,
923 struct btrfs_ordered_extent
*ordered
)
925 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
928 u64 i_size
= i_size_read(inode
);
929 struct rb_node
*node
;
930 struct rb_node
*prev
= NULL
;
931 struct btrfs_ordered_extent
*test
;
934 spin_lock_irq(&tree
->lock
);
936 offset
= entry_end(ordered
);
937 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
))
939 ordered
->file_offset
+
940 ordered
->truncated_len
);
942 offset
= ALIGN(offset
, BTRFS_I(inode
)->root
->sectorsize
);
944 disk_i_size
= BTRFS_I(inode
)->disk_i_size
;
947 if (disk_i_size
> i_size
) {
948 BTRFS_I(inode
)->disk_i_size
= i_size
;
954 * if the disk i_size is already at the inode->i_size, or
955 * this ordered extent is inside the disk i_size, we're done
957 if (disk_i_size
== i_size
)
961 * We still need to update disk_i_size if outstanding_isize is greater
964 if (offset
<= disk_i_size
&&
965 (!ordered
|| ordered
->outstanding_isize
<= disk_i_size
))
969 * walk backward from this ordered extent to disk_i_size.
970 * if we find an ordered extent then we can't update disk i_size
974 node
= rb_prev(&ordered
->rb_node
);
976 prev
= tree_search(tree
, offset
);
978 * we insert file extents without involving ordered struct,
979 * so there should be no ordered struct cover this offset
982 test
= rb_entry(prev
, struct btrfs_ordered_extent
,
984 BUG_ON(offset_in_entry(test
, offset
));
988 for (; node
; node
= rb_prev(node
)) {
989 test
= rb_entry(node
, struct btrfs_ordered_extent
, rb_node
);
991 /* We treat this entry as if it doesnt exist */
992 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE
, &test
->flags
))
994 if (test
->file_offset
+ test
->len
<= disk_i_size
)
996 if (test
->file_offset
>= i_size
)
998 if (entry_end(test
) > disk_i_size
) {
1000 * we don't update disk_i_size now, so record this
1001 * undealt i_size. Or we will not know the real
1004 if (test
->outstanding_isize
< offset
)
1005 test
->outstanding_isize
= offset
;
1007 ordered
->outstanding_isize
>
1008 test
->outstanding_isize
)
1009 test
->outstanding_isize
=
1010 ordered
->outstanding_isize
;
1014 new_i_size
= min_t(u64
, offset
, i_size
);
1017 * Some ordered extents may completed before the current one, and
1018 * we hold the real i_size in ->outstanding_isize.
1020 if (ordered
&& ordered
->outstanding_isize
> new_i_size
)
1021 new_i_size
= min_t(u64
, ordered
->outstanding_isize
, i_size
);
1022 BTRFS_I(inode
)->disk_i_size
= new_i_size
;
1026 * We need to do this because we can't remove ordered extents until
1027 * after the i_disk_size has been updated and then the inode has been
1028 * updated to reflect the change, so we need to tell anybody who finds
1029 * this ordered extent that we've already done all the real work, we
1030 * just haven't completed all the other work.
1033 set_bit(BTRFS_ORDERED_UPDATED_ISIZE
, &ordered
->flags
);
1034 spin_unlock_irq(&tree
->lock
);
1039 * search the ordered extents for one corresponding to 'offset' and
1040 * try to find a checksum. This is used because we allow pages to
1041 * be reclaimed before their checksum is actually put into the btree
1043 int btrfs_find_ordered_sum(struct inode
*inode
, u64 offset
, u64 disk_bytenr
,
1046 struct btrfs_ordered_sum
*ordered_sum
;
1047 struct btrfs_ordered_extent
*ordered
;
1048 struct btrfs_ordered_inode_tree
*tree
= &BTRFS_I(inode
)->ordered_tree
;
1049 unsigned long num_sectors
;
1051 u32 sectorsize
= BTRFS_I(inode
)->root
->sectorsize
;
1054 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
1058 spin_lock_irq(&tree
->lock
);
1059 list_for_each_entry_reverse(ordered_sum
, &ordered
->list
, list
) {
1060 if (disk_bytenr
>= ordered_sum
->bytenr
&&
1061 disk_bytenr
< ordered_sum
->bytenr
+ ordered_sum
->len
) {
1062 i
= (disk_bytenr
- ordered_sum
->bytenr
) >>
1063 inode
->i_sb
->s_blocksize_bits
;
1064 num_sectors
= ordered_sum
->len
>>
1065 inode
->i_sb
->s_blocksize_bits
;
1066 num_sectors
= min_t(int, len
- index
, num_sectors
- i
);
1067 memcpy(sum
+ index
, ordered_sum
->sums
+ i
,
1070 index
+= (int)num_sectors
;
1073 disk_bytenr
+= num_sectors
* sectorsize
;
1077 spin_unlock_irq(&tree
->lock
);
1078 btrfs_put_ordered_extent(ordered
);
1084 * add a given inode to the list of inodes that must be fully on
1085 * disk before a transaction commit finishes.
1087 * This basically gives us the ext3 style data=ordered mode, and it is mostly
1088 * used to make sure renamed files are fully on disk.
1090 * It is a noop if the inode is already fully on disk.
1092 * If trans is not null, we'll do a friendly check for a transaction that
1093 * is already flushing things and force the IO down ourselves.
1095 void btrfs_add_ordered_operation(struct btrfs_trans_handle
*trans
,
1096 struct btrfs_root
*root
, struct inode
*inode
)
1098 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
1101 last_mod
= max(BTRFS_I(inode
)->generation
, BTRFS_I(inode
)->last_trans
);
1104 * if this file hasn't been changed since the last transaction
1105 * commit, we can safely return without doing anything
1107 if (last_mod
<= root
->fs_info
->last_trans_committed
)
1110 spin_lock(&root
->fs_info
->ordered_root_lock
);
1111 if (list_empty(&BTRFS_I(inode
)->ordered_operations
)) {
1112 list_add_tail(&BTRFS_I(inode
)->ordered_operations
,
1113 &cur_trans
->ordered_operations
);
1115 spin_unlock(&root
->fs_info
->ordered_root_lock
);
1118 int __init
ordered_data_init(void)
1120 btrfs_ordered_extent_cache
= kmem_cache_create("btrfs_ordered_extent",
1121 sizeof(struct btrfs_ordered_extent
), 0,
1122 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
1124 if (!btrfs_ordered_extent_cache
)
1130 void ordered_data_exit(void)
1132 if (btrfs_ordered_extent_cache
)
1133 kmem_cache_destroy(btrfs_ordered_extent_cache
);