1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
16 #include "print-tree.h"
18 #include "compression.h"
20 #include "block-group.h"
21 #include "space-info.h"
23 /* magic values for the inode_only field in btrfs_log_inode:
25 * LOG_INODE_ALL means to log everything
26 * LOG_INODE_EXISTS means to log just enough to recreate the inode
37 * directory trouble cases
39 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
40 * log, we must force a full commit before doing an fsync of the directory
41 * where the unlink was done.
42 * ---> record transid of last unlink/rename per directory
46 * rename foo/some_dir foo2/some_dir
48 * fsync foo/some_dir/some_file
50 * The fsync above will unlink the original some_dir without recording
51 * it in its new location (foo2). After a crash, some_dir will be gone
52 * unless the fsync of some_file forces a full commit
54 * 2) we must log any new names for any file or dir that is in the fsync
55 * log. ---> check inode while renaming/linking.
57 * 2a) we must log any new names for any file or dir during rename
58 * when the directory they are being removed from was logged.
59 * ---> check inode and old parent dir during rename
61 * 2a is actually the more important variant. With the extra logging
62 * a crash might unlink the old name without recreating the new one
64 * 3) after a crash, we must go through any directories with a link count
65 * of zero and redo the rm -rf
72 * The directory f1 was fully removed from the FS, but fsync was never
73 * called on f1, only its parent dir. After a crash the rm -rf must
74 * be replayed. This must be able to recurse down the entire
75 * directory tree. The inode link count fixup code takes care of the
80 * stages for the tree walking. The first
81 * stage (0) is to only pin down the blocks we find
82 * the second stage (1) is to make sure that all the inodes
83 * we find in the log are created in the subvolume.
85 * The last stage is to deal with directories and links and extents
86 * and all the other fun semantics
90 LOG_WALK_REPLAY_INODES
,
91 LOG_WALK_REPLAY_DIR_INDEX
,
95 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
96 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
98 struct btrfs_log_ctx
*ctx
);
99 static int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
100 struct btrfs_root
*root
,
101 struct btrfs_path
*path
, u64 objectid
);
102 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
103 struct btrfs_root
*root
,
104 struct btrfs_root
*log
,
105 struct btrfs_path
*path
,
106 u64 dirid
, int del_all
);
109 * tree logging is a special write ahead log used to make sure that
110 * fsyncs and O_SYNCs can happen without doing full tree commits.
112 * Full tree commits are expensive because they require commonly
113 * modified blocks to be recowed, creating many dirty pages in the
114 * extent tree an 4x-6x higher write load than ext3.
116 * Instead of doing a tree commit on every fsync, we use the
117 * key ranges and transaction ids to find items for a given file or directory
118 * that have changed in this transaction. Those items are copied into
119 * a special tree (one per subvolume root), that tree is written to disk
120 * and then the fsync is considered complete.
122 * After a crash, items are copied out of the log-tree back into the
123 * subvolume tree. Any file data extents found are recorded in the extent
124 * allocation tree, and the log-tree freed.
126 * The log tree is read three times, once to pin down all the extents it is
127 * using in ram and once, once to create all the inodes logged in the tree
128 * and once to do all the other items.
132 * start a sub transaction and setup the log tree
133 * this increments the log tree writer count to make the people
134 * syncing the tree wait for us to finish
136 static int start_log_trans(struct btrfs_trans_handle
*trans
,
137 struct btrfs_root
*root
,
138 struct btrfs_log_ctx
*ctx
)
140 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
141 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
145 * First check if the log root tree was already created. If not, create
146 * it before locking the root's log_mutex, just to keep lockdep happy.
148 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE
, &tree_root
->state
)) {
149 mutex_lock(&tree_root
->log_mutex
);
150 if (!fs_info
->log_root_tree
) {
151 ret
= btrfs_init_log_root_tree(trans
, fs_info
);
153 set_bit(BTRFS_ROOT_HAS_LOG_TREE
, &tree_root
->state
);
155 mutex_unlock(&tree_root
->log_mutex
);
160 mutex_lock(&root
->log_mutex
);
162 if (root
->log_root
) {
163 if (btrfs_need_log_full_commit(trans
)) {
168 if (!root
->log_start_pid
) {
169 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
170 root
->log_start_pid
= current
->pid
;
171 } else if (root
->log_start_pid
!= current
->pid
) {
172 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
175 ret
= btrfs_add_log_tree(trans
, root
);
179 set_bit(BTRFS_ROOT_HAS_LOG_TREE
, &root
->state
);
180 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
181 root
->log_start_pid
= current
->pid
;
184 atomic_inc(&root
->log_writers
);
185 if (ctx
&& !ctx
->logging_new_name
) {
186 int index
= root
->log_transid
% 2;
187 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
188 ctx
->log_transid
= root
->log_transid
;
192 mutex_unlock(&root
->log_mutex
);
197 * returns 0 if there was a log transaction running and we were able
198 * to join, or returns -ENOENT if there were not transactions
201 static int join_running_log_trans(struct btrfs_root
*root
)
205 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE
, &root
->state
))
208 mutex_lock(&root
->log_mutex
);
209 if (root
->log_root
) {
211 atomic_inc(&root
->log_writers
);
213 mutex_unlock(&root
->log_mutex
);
218 * This either makes the current running log transaction wait
219 * until you call btrfs_end_log_trans() or it makes any future
220 * log transactions wait until you call btrfs_end_log_trans()
222 void btrfs_pin_log_trans(struct btrfs_root
*root
)
224 atomic_inc(&root
->log_writers
);
228 * indicate we're done making changes to the log tree
229 * and wake up anyone waiting to do a sync
231 void btrfs_end_log_trans(struct btrfs_root
*root
)
233 if (atomic_dec_and_test(&root
->log_writers
)) {
234 /* atomic_dec_and_test implies a barrier */
235 cond_wake_up_nomb(&root
->log_writer_wait
);
239 static int btrfs_write_tree_block(struct extent_buffer
*buf
)
241 return filemap_fdatawrite_range(buf
->pages
[0]->mapping
, buf
->start
,
242 buf
->start
+ buf
->len
- 1);
245 static void btrfs_wait_tree_block_writeback(struct extent_buffer
*buf
)
247 filemap_fdatawait_range(buf
->pages
[0]->mapping
,
248 buf
->start
, buf
->start
+ buf
->len
- 1);
252 * the walk control struct is used to pass state down the chain when
253 * processing the log tree. The stage field tells us which part
254 * of the log tree processing we are currently doing. The others
255 * are state fields used for that specific part
257 struct walk_control
{
258 /* should we free the extent on disk when done? This is used
259 * at transaction commit time while freeing a log tree
263 /* should we write out the extent buffer? This is used
264 * while flushing the log tree to disk during a sync
268 /* should we wait for the extent buffer io to finish? Also used
269 * while flushing the log tree to disk for a sync
273 /* pin only walk, we record which extents on disk belong to the
278 /* what stage of the replay code we're currently in */
282 * Ignore any items from the inode currently being processed. Needs
283 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
284 * the LOG_WALK_REPLAY_INODES stage.
286 bool ignore_cur_inode
;
288 /* the root we are currently replaying */
289 struct btrfs_root
*replay_dest
;
291 /* the trans handle for the current replay */
292 struct btrfs_trans_handle
*trans
;
294 /* the function that gets used to process blocks we find in the
295 * tree. Note the extent_buffer might not be up to date when it is
296 * passed in, and it must be checked or read if you need the data
299 int (*process_func
)(struct btrfs_root
*log
, struct extent_buffer
*eb
,
300 struct walk_control
*wc
, u64 gen
, int level
);
304 * process_func used to pin down extents, write them or wait on them
306 static int process_one_buffer(struct btrfs_root
*log
,
307 struct extent_buffer
*eb
,
308 struct walk_control
*wc
, u64 gen
, int level
)
310 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
314 * If this fs is mixed then we need to be able to process the leaves to
315 * pin down any logged extents, so we have to read the block.
317 if (btrfs_fs_incompat(fs_info
, MIXED_GROUPS
)) {
318 ret
= btrfs_read_buffer(eb
, gen
, level
, NULL
);
324 ret
= btrfs_pin_extent_for_log_replay(wc
->trans
, eb
->start
,
327 if (!ret
&& btrfs_buffer_uptodate(eb
, gen
, 0)) {
328 if (wc
->pin
&& btrfs_header_level(eb
) == 0)
329 ret
= btrfs_exclude_logged_extents(eb
);
331 btrfs_write_tree_block(eb
);
333 btrfs_wait_tree_block_writeback(eb
);
339 * Item overwrite used by replay and tree logging. eb, slot and key all refer
340 * to the src data we are copying out.
342 * root is the tree we are copying into, and path is a scratch
343 * path for use in this function (it should be released on entry and
344 * will be released on exit).
346 * If the key is already in the destination tree the existing item is
347 * overwritten. If the existing item isn't big enough, it is extended.
348 * If it is too large, it is truncated.
350 * If the key isn't in the destination yet, a new item is inserted.
352 static noinline
int overwrite_item(struct btrfs_trans_handle
*trans
,
353 struct btrfs_root
*root
,
354 struct btrfs_path
*path
,
355 struct extent_buffer
*eb
, int slot
,
356 struct btrfs_key
*key
)
360 u64 saved_i_size
= 0;
361 int save_old_i_size
= 0;
362 unsigned long src_ptr
;
363 unsigned long dst_ptr
;
364 int overwrite_root
= 0;
365 bool inode_item
= key
->type
== BTRFS_INODE_ITEM_KEY
;
367 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
370 item_size
= btrfs_item_size_nr(eb
, slot
);
371 src_ptr
= btrfs_item_ptr_offset(eb
, slot
);
373 /* look for the key in the destination tree */
374 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
381 u32 dst_size
= btrfs_item_size_nr(path
->nodes
[0],
383 if (dst_size
!= item_size
)
386 if (item_size
== 0) {
387 btrfs_release_path(path
);
390 dst_copy
= kmalloc(item_size
, GFP_NOFS
);
391 src_copy
= kmalloc(item_size
, GFP_NOFS
);
392 if (!dst_copy
|| !src_copy
) {
393 btrfs_release_path(path
);
399 read_extent_buffer(eb
, src_copy
, src_ptr
, item_size
);
401 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
402 read_extent_buffer(path
->nodes
[0], dst_copy
, dst_ptr
,
404 ret
= memcmp(dst_copy
, src_copy
, item_size
);
409 * they have the same contents, just return, this saves
410 * us from cowing blocks in the destination tree and doing
411 * extra writes that may not have been done by a previous
415 btrfs_release_path(path
);
420 * We need to load the old nbytes into the inode so when we
421 * replay the extents we've logged we get the right nbytes.
424 struct btrfs_inode_item
*item
;
428 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
429 struct btrfs_inode_item
);
430 nbytes
= btrfs_inode_nbytes(path
->nodes
[0], item
);
431 item
= btrfs_item_ptr(eb
, slot
,
432 struct btrfs_inode_item
);
433 btrfs_set_inode_nbytes(eb
, item
, nbytes
);
436 * If this is a directory we need to reset the i_size to
437 * 0 so that we can set it up properly when replaying
438 * the rest of the items in this log.
440 mode
= btrfs_inode_mode(eb
, item
);
442 btrfs_set_inode_size(eb
, item
, 0);
444 } else if (inode_item
) {
445 struct btrfs_inode_item
*item
;
449 * New inode, set nbytes to 0 so that the nbytes comes out
450 * properly when we replay the extents.
452 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
453 btrfs_set_inode_nbytes(eb
, item
, 0);
456 * If this is a directory we need to reset the i_size to 0 so
457 * that we can set it up properly when replaying the rest of
458 * the items in this log.
460 mode
= btrfs_inode_mode(eb
, item
);
462 btrfs_set_inode_size(eb
, item
, 0);
465 btrfs_release_path(path
);
466 /* try to insert the key into the destination tree */
467 path
->skip_release_on_error
= 1;
468 ret
= btrfs_insert_empty_item(trans
, root
, path
,
470 path
->skip_release_on_error
= 0;
472 /* make sure any existing item is the correct size */
473 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
) {
475 found_size
= btrfs_item_size_nr(path
->nodes
[0],
477 if (found_size
> item_size
)
478 btrfs_truncate_item(path
, item_size
, 1);
479 else if (found_size
< item_size
)
480 btrfs_extend_item(path
, item_size
- found_size
);
484 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0],
487 /* don't overwrite an existing inode if the generation number
488 * was logged as zero. This is done when the tree logging code
489 * is just logging an inode to make sure it exists after recovery.
491 * Also, don't overwrite i_size on directories during replay.
492 * log replay inserts and removes directory items based on the
493 * state of the tree found in the subvolume, and i_size is modified
496 if (key
->type
== BTRFS_INODE_ITEM_KEY
&& ret
== -EEXIST
) {
497 struct btrfs_inode_item
*src_item
;
498 struct btrfs_inode_item
*dst_item
;
500 src_item
= (struct btrfs_inode_item
*)src_ptr
;
501 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
503 if (btrfs_inode_generation(eb
, src_item
) == 0) {
504 struct extent_buffer
*dst_eb
= path
->nodes
[0];
505 const u64 ino_size
= btrfs_inode_size(eb
, src_item
);
508 * For regular files an ino_size == 0 is used only when
509 * logging that an inode exists, as part of a directory
510 * fsync, and the inode wasn't fsynced before. In this
511 * case don't set the size of the inode in the fs/subvol
512 * tree, otherwise we would be throwing valid data away.
514 if (S_ISREG(btrfs_inode_mode(eb
, src_item
)) &&
515 S_ISREG(btrfs_inode_mode(dst_eb
, dst_item
)) &&
517 btrfs_set_inode_size(dst_eb
, dst_item
, ino_size
);
521 if (overwrite_root
&&
522 S_ISDIR(btrfs_inode_mode(eb
, src_item
)) &&
523 S_ISDIR(btrfs_inode_mode(path
->nodes
[0], dst_item
))) {
525 saved_i_size
= btrfs_inode_size(path
->nodes
[0],
530 copy_extent_buffer(path
->nodes
[0], eb
, dst_ptr
,
533 if (save_old_i_size
) {
534 struct btrfs_inode_item
*dst_item
;
535 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
536 btrfs_set_inode_size(path
->nodes
[0], dst_item
, saved_i_size
);
539 /* make sure the generation is filled in */
540 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
541 struct btrfs_inode_item
*dst_item
;
542 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
543 if (btrfs_inode_generation(path
->nodes
[0], dst_item
) == 0) {
544 btrfs_set_inode_generation(path
->nodes
[0], dst_item
,
549 btrfs_mark_buffer_dirty(path
->nodes
[0]);
550 btrfs_release_path(path
);
555 * simple helper to read an inode off the disk from a given root
556 * This can only be called for subvolume roots and not for the log
558 static noinline
struct inode
*read_one_inode(struct btrfs_root
*root
,
563 inode
= btrfs_iget(root
->fs_info
->sb
, objectid
, root
);
569 /* replays a single extent in 'eb' at 'slot' with 'key' into the
570 * subvolume 'root'. path is released on entry and should be released
573 * extents in the log tree have not been allocated out of the extent
574 * tree yet. So, this completes the allocation, taking a reference
575 * as required if the extent already exists or creating a new extent
576 * if it isn't in the extent allocation tree yet.
578 * The extent is inserted into the file, dropping any existing extents
579 * from the file that overlap the new one.
581 static noinline
int replay_one_extent(struct btrfs_trans_handle
*trans
,
582 struct btrfs_root
*root
,
583 struct btrfs_path
*path
,
584 struct extent_buffer
*eb
, int slot
,
585 struct btrfs_key
*key
)
587 struct btrfs_drop_extents_args drop_args
= { 0 };
588 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
591 u64 start
= key
->offset
;
593 struct btrfs_file_extent_item
*item
;
594 struct inode
*inode
= NULL
;
598 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
599 found_type
= btrfs_file_extent_type(eb
, item
);
601 if (found_type
== BTRFS_FILE_EXTENT_REG
||
602 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
603 nbytes
= btrfs_file_extent_num_bytes(eb
, item
);
604 extent_end
= start
+ nbytes
;
607 * We don't add to the inodes nbytes if we are prealloc or a
610 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0)
612 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
613 size
= btrfs_file_extent_ram_bytes(eb
, item
);
614 nbytes
= btrfs_file_extent_ram_bytes(eb
, item
);
615 extent_end
= ALIGN(start
+ size
,
616 fs_info
->sectorsize
);
622 inode
= read_one_inode(root
, key
->objectid
);
629 * first check to see if we already have this extent in the
630 * file. This must be done before the btrfs_drop_extents run
631 * so we don't try to drop this extent.
633 ret
= btrfs_lookup_file_extent(trans
, root
, path
,
634 btrfs_ino(BTRFS_I(inode
)), start
, 0);
637 (found_type
== BTRFS_FILE_EXTENT_REG
||
638 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)) {
639 struct btrfs_file_extent_item cmp1
;
640 struct btrfs_file_extent_item cmp2
;
641 struct btrfs_file_extent_item
*existing
;
642 struct extent_buffer
*leaf
;
644 leaf
= path
->nodes
[0];
645 existing
= btrfs_item_ptr(leaf
, path
->slots
[0],
646 struct btrfs_file_extent_item
);
648 read_extent_buffer(eb
, &cmp1
, (unsigned long)item
,
650 read_extent_buffer(leaf
, &cmp2
, (unsigned long)existing
,
654 * we already have a pointer to this exact extent,
655 * we don't have to do anything
657 if (memcmp(&cmp1
, &cmp2
, sizeof(cmp1
)) == 0) {
658 btrfs_release_path(path
);
662 btrfs_release_path(path
);
664 /* drop any overlapping extents */
665 drop_args
.start
= start
;
666 drop_args
.end
= extent_end
;
667 drop_args
.drop_cache
= true;
668 ret
= btrfs_drop_extents(trans
, root
, BTRFS_I(inode
), &drop_args
);
672 if (found_type
== BTRFS_FILE_EXTENT_REG
||
673 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
675 unsigned long dest_offset
;
676 struct btrfs_key ins
;
678 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0 &&
679 btrfs_fs_incompat(fs_info
, NO_HOLES
))
682 ret
= btrfs_insert_empty_item(trans
, root
, path
, key
,
686 dest_offset
= btrfs_item_ptr_offset(path
->nodes
[0],
688 copy_extent_buffer(path
->nodes
[0], eb
, dest_offset
,
689 (unsigned long)item
, sizeof(*item
));
691 ins
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
692 ins
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
693 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
694 offset
= key
->offset
- btrfs_file_extent_offset(eb
, item
);
697 * Manually record dirty extent, as here we did a shallow
698 * file extent item copy and skip normal backref update,
699 * but modifying extent tree all by ourselves.
700 * So need to manually record dirty extent for qgroup,
701 * as the owner of the file extent changed from log tree
702 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
704 ret
= btrfs_qgroup_trace_extent(trans
,
705 btrfs_file_extent_disk_bytenr(eb
, item
),
706 btrfs_file_extent_disk_num_bytes(eb
, item
),
711 if (ins
.objectid
> 0) {
712 struct btrfs_ref ref
= { 0 };
715 LIST_HEAD(ordered_sums
);
718 * is this extent already allocated in the extent
719 * allocation tree? If so, just add a reference
721 ret
= btrfs_lookup_data_extent(fs_info
, ins
.objectid
,
724 btrfs_init_generic_ref(&ref
,
725 BTRFS_ADD_DELAYED_REF
,
726 ins
.objectid
, ins
.offset
, 0);
727 btrfs_init_data_ref(&ref
,
728 root
->root_key
.objectid
,
729 key
->objectid
, offset
);
730 ret
= btrfs_inc_extent_ref(trans
, &ref
);
735 * insert the extent pointer in the extent
738 ret
= btrfs_alloc_logged_file_extent(trans
,
739 root
->root_key
.objectid
,
740 key
->objectid
, offset
, &ins
);
744 btrfs_release_path(path
);
746 if (btrfs_file_extent_compression(eb
, item
)) {
747 csum_start
= ins
.objectid
;
748 csum_end
= csum_start
+ ins
.offset
;
750 csum_start
= ins
.objectid
+
751 btrfs_file_extent_offset(eb
, item
);
752 csum_end
= csum_start
+
753 btrfs_file_extent_num_bytes(eb
, item
);
756 ret
= btrfs_lookup_csums_range(root
->log_root
,
757 csum_start
, csum_end
- 1,
762 * Now delete all existing cums in the csum root that
763 * cover our range. We do this because we can have an
764 * extent that is completely referenced by one file
765 * extent item and partially referenced by another
766 * file extent item (like after using the clone or
767 * extent_same ioctls). In this case if we end up doing
768 * the replay of the one that partially references the
769 * extent first, and we do not do the csum deletion
770 * below, we can get 2 csum items in the csum tree that
771 * overlap each other. For example, imagine our log has
772 * the two following file extent items:
774 * key (257 EXTENT_DATA 409600)
775 * extent data disk byte 12845056 nr 102400
776 * extent data offset 20480 nr 20480 ram 102400
778 * key (257 EXTENT_DATA 819200)
779 * extent data disk byte 12845056 nr 102400
780 * extent data offset 0 nr 102400 ram 102400
782 * Where the second one fully references the 100K extent
783 * that starts at disk byte 12845056, and the log tree
784 * has a single csum item that covers the entire range
787 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
789 * After the first file extent item is replayed, the
790 * csum tree gets the following csum item:
792 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
794 * Which covers the 20K sub-range starting at offset 20K
795 * of our extent. Now when we replay the second file
796 * extent item, if we do not delete existing csum items
797 * that cover any of its blocks, we end up getting two
798 * csum items in our csum tree that overlap each other:
800 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
801 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
803 * Which is a problem, because after this anyone trying
804 * to lookup up for the checksum of any block of our
805 * extent starting at an offset of 40K or higher, will
806 * end up looking at the second csum item only, which
807 * does not contain the checksum for any block starting
808 * at offset 40K or higher of our extent.
810 while (!list_empty(&ordered_sums
)) {
811 struct btrfs_ordered_sum
*sums
;
812 sums
= list_entry(ordered_sums
.next
,
813 struct btrfs_ordered_sum
,
816 ret
= btrfs_del_csums(trans
,
821 ret
= btrfs_csum_file_blocks(trans
,
822 fs_info
->csum_root
, sums
);
823 list_del(&sums
->list
);
829 btrfs_release_path(path
);
831 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
832 /* inline extents are easy, we just overwrite them */
833 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
838 ret
= btrfs_inode_set_file_extent_range(BTRFS_I(inode
), start
,
844 btrfs_update_inode_bytes(BTRFS_I(inode
), nbytes
, drop_args
.bytes_found
);
845 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
853 * when cleaning up conflicts between the directory names in the
854 * subvolume, directory names in the log and directory names in the
855 * inode back references, we may have to unlink inodes from directories.
857 * This is a helper function to do the unlink of a specific directory
860 static noinline
int drop_one_dir_item(struct btrfs_trans_handle
*trans
,
861 struct btrfs_root
*root
,
862 struct btrfs_path
*path
,
863 struct btrfs_inode
*dir
,
864 struct btrfs_dir_item
*di
)
869 struct extent_buffer
*leaf
;
870 struct btrfs_key location
;
873 leaf
= path
->nodes
[0];
875 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
876 name_len
= btrfs_dir_name_len(leaf
, di
);
877 name
= kmalloc(name_len
, GFP_NOFS
);
881 read_extent_buffer(leaf
, name
, (unsigned long)(di
+ 1), name_len
);
882 btrfs_release_path(path
);
884 inode
= read_one_inode(root
, location
.objectid
);
890 ret
= link_to_fixup_dir(trans
, root
, path
, location
.objectid
);
894 ret
= btrfs_unlink_inode(trans
, root
, dir
, BTRFS_I(inode
), name
,
899 ret
= btrfs_run_delayed_items(trans
);
907 * helper function to see if a given name and sequence number found
908 * in an inode back reference are already in a directory and correctly
909 * point to this inode
911 static noinline
int inode_in_dir(struct btrfs_root
*root
,
912 struct btrfs_path
*path
,
913 u64 dirid
, u64 objectid
, u64 index
,
914 const char *name
, int name_len
)
916 struct btrfs_dir_item
*di
;
917 struct btrfs_key location
;
920 di
= btrfs_lookup_dir_index_item(NULL
, root
, path
, dirid
,
921 index
, name
, name_len
, 0);
922 if (di
&& !IS_ERR(di
)) {
923 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
924 if (location
.objectid
!= objectid
)
928 btrfs_release_path(path
);
930 di
= btrfs_lookup_dir_item(NULL
, root
, path
, dirid
, name
, name_len
, 0);
931 if (di
&& !IS_ERR(di
)) {
932 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
933 if (location
.objectid
!= objectid
)
939 btrfs_release_path(path
);
944 * helper function to check a log tree for a named back reference in
945 * an inode. This is used to decide if a back reference that is
946 * found in the subvolume conflicts with what we find in the log.
948 * inode backreferences may have multiple refs in a single item,
949 * during replay we process one reference at a time, and we don't
950 * want to delete valid links to a file from the subvolume if that
951 * link is also in the log.
953 static noinline
int backref_in_log(struct btrfs_root
*log
,
954 struct btrfs_key
*key
,
956 const char *name
, int namelen
)
958 struct btrfs_path
*path
;
961 path
= btrfs_alloc_path();
965 ret
= btrfs_search_slot(NULL
, log
, key
, path
, 0, 0);
968 } else if (ret
== 1) {
973 if (key
->type
== BTRFS_INODE_EXTREF_KEY
)
974 ret
= !!btrfs_find_name_in_ext_backref(path
->nodes
[0],
979 ret
= !!btrfs_find_name_in_backref(path
->nodes
[0],
983 btrfs_free_path(path
);
987 static inline int __add_inode_ref(struct btrfs_trans_handle
*trans
,
988 struct btrfs_root
*root
,
989 struct btrfs_path
*path
,
990 struct btrfs_root
*log_root
,
991 struct btrfs_inode
*dir
,
992 struct btrfs_inode
*inode
,
993 u64 inode_objectid
, u64 parent_objectid
,
994 u64 ref_index
, char *name
, int namelen
,
1000 struct extent_buffer
*leaf
;
1001 struct btrfs_dir_item
*di
;
1002 struct btrfs_key search_key
;
1003 struct btrfs_inode_extref
*extref
;
1006 /* Search old style refs */
1007 search_key
.objectid
= inode_objectid
;
1008 search_key
.type
= BTRFS_INODE_REF_KEY
;
1009 search_key
.offset
= parent_objectid
;
1010 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
1012 struct btrfs_inode_ref
*victim_ref
;
1014 unsigned long ptr_end
;
1016 leaf
= path
->nodes
[0];
1018 /* are we trying to overwrite a back ref for the root directory
1019 * if so, just jump out, we're done
1021 if (search_key
.objectid
== search_key
.offset
)
1024 /* check all the names in this back reference to see
1025 * if they are in the log. if so, we allow them to stay
1026 * otherwise they must be unlinked as a conflict
1028 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1029 ptr_end
= ptr
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1030 while (ptr
< ptr_end
) {
1031 victim_ref
= (struct btrfs_inode_ref
*)ptr
;
1032 victim_name_len
= btrfs_inode_ref_name_len(leaf
,
1034 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1038 read_extent_buffer(leaf
, victim_name
,
1039 (unsigned long)(victim_ref
+ 1),
1042 ret
= backref_in_log(log_root
, &search_key
,
1043 parent_objectid
, victim_name
,
1049 inc_nlink(&inode
->vfs_inode
);
1050 btrfs_release_path(path
);
1052 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
,
1053 victim_name
, victim_name_len
);
1057 ret
= btrfs_run_delayed_items(trans
);
1065 ptr
= (unsigned long)(victim_ref
+ 1) + victim_name_len
;
1069 * NOTE: we have searched root tree and checked the
1070 * corresponding ref, it does not need to check again.
1074 btrfs_release_path(path
);
1076 /* Same search but for extended refs */
1077 extref
= btrfs_lookup_inode_extref(NULL
, root
, path
, name
, namelen
,
1078 inode_objectid
, parent_objectid
, 0,
1080 if (!IS_ERR_OR_NULL(extref
)) {
1084 struct inode
*victim_parent
;
1086 leaf
= path
->nodes
[0];
1088 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1089 base
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1091 while (cur_offset
< item_size
) {
1092 extref
= (struct btrfs_inode_extref
*)(base
+ cur_offset
);
1094 victim_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1096 if (btrfs_inode_extref_parent(leaf
, extref
) != parent_objectid
)
1099 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1102 read_extent_buffer(leaf
, victim_name
, (unsigned long)&extref
->name
,
1105 search_key
.objectid
= inode_objectid
;
1106 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1107 search_key
.offset
= btrfs_extref_hash(parent_objectid
,
1110 ret
= backref_in_log(log_root
, &search_key
,
1111 parent_objectid
, victim_name
,
1117 victim_parent
= read_one_inode(root
,
1119 if (victim_parent
) {
1120 inc_nlink(&inode
->vfs_inode
);
1121 btrfs_release_path(path
);
1123 ret
= btrfs_unlink_inode(trans
, root
,
1124 BTRFS_I(victim_parent
),
1129 ret
= btrfs_run_delayed_items(
1132 iput(victim_parent
);
1141 cur_offset
+= victim_name_len
+ sizeof(*extref
);
1145 btrfs_release_path(path
);
1147 /* look for a conflicting sequence number */
1148 di
= btrfs_lookup_dir_index_item(trans
, root
, path
, btrfs_ino(dir
),
1149 ref_index
, name
, namelen
, 0);
1150 if (di
&& !IS_ERR(di
)) {
1151 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1155 btrfs_release_path(path
);
1157 /* look for a conflicting name */
1158 di
= btrfs_lookup_dir_item(trans
, root
, path
, btrfs_ino(dir
),
1160 if (di
&& !IS_ERR(di
)) {
1161 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1165 btrfs_release_path(path
);
1170 static int extref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1171 u32
*namelen
, char **name
, u64
*index
,
1172 u64
*parent_objectid
)
1174 struct btrfs_inode_extref
*extref
;
1176 extref
= (struct btrfs_inode_extref
*)ref_ptr
;
1178 *namelen
= btrfs_inode_extref_name_len(eb
, extref
);
1179 *name
= kmalloc(*namelen
, GFP_NOFS
);
1183 read_extent_buffer(eb
, *name
, (unsigned long)&extref
->name
,
1187 *index
= btrfs_inode_extref_index(eb
, extref
);
1188 if (parent_objectid
)
1189 *parent_objectid
= btrfs_inode_extref_parent(eb
, extref
);
1194 static int ref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1195 u32
*namelen
, char **name
, u64
*index
)
1197 struct btrfs_inode_ref
*ref
;
1199 ref
= (struct btrfs_inode_ref
*)ref_ptr
;
1201 *namelen
= btrfs_inode_ref_name_len(eb
, ref
);
1202 *name
= kmalloc(*namelen
, GFP_NOFS
);
1206 read_extent_buffer(eb
, *name
, (unsigned long)(ref
+ 1), *namelen
);
1209 *index
= btrfs_inode_ref_index(eb
, ref
);
1215 * Take an inode reference item from the log tree and iterate all names from the
1216 * inode reference item in the subvolume tree with the same key (if it exists).
1217 * For any name that is not in the inode reference item from the log tree, do a
1218 * proper unlink of that name (that is, remove its entry from the inode
1219 * reference item and both dir index keys).
1221 static int unlink_old_inode_refs(struct btrfs_trans_handle
*trans
,
1222 struct btrfs_root
*root
,
1223 struct btrfs_path
*path
,
1224 struct btrfs_inode
*inode
,
1225 struct extent_buffer
*log_eb
,
1227 struct btrfs_key
*key
)
1230 unsigned long ref_ptr
;
1231 unsigned long ref_end
;
1232 struct extent_buffer
*eb
;
1235 btrfs_release_path(path
);
1236 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
1244 eb
= path
->nodes
[0];
1245 ref_ptr
= btrfs_item_ptr_offset(eb
, path
->slots
[0]);
1246 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, path
->slots
[0]);
1247 while (ref_ptr
< ref_end
) {
1252 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1253 ret
= extref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1256 parent_id
= key
->offset
;
1257 ret
= ref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1263 if (key
->type
== BTRFS_INODE_EXTREF_KEY
)
1264 ret
= !!btrfs_find_name_in_ext_backref(log_eb
, log_slot
,
1268 ret
= !!btrfs_find_name_in_backref(log_eb
, log_slot
,
1274 btrfs_release_path(path
);
1275 dir
= read_one_inode(root
, parent_id
);
1281 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
1282 inode
, name
, namelen
);
1292 if (key
->type
== BTRFS_INODE_EXTREF_KEY
)
1293 ref_ptr
+= sizeof(struct btrfs_inode_extref
);
1295 ref_ptr
+= sizeof(struct btrfs_inode_ref
);
1299 btrfs_release_path(path
);
1303 static int btrfs_inode_ref_exists(struct inode
*inode
, struct inode
*dir
,
1304 const u8 ref_type
, const char *name
,
1307 struct btrfs_key key
;
1308 struct btrfs_path
*path
;
1309 const u64 parent_id
= btrfs_ino(BTRFS_I(dir
));
1312 path
= btrfs_alloc_path();
1316 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
1317 key
.type
= ref_type
;
1318 if (key
.type
== BTRFS_INODE_REF_KEY
)
1319 key
.offset
= parent_id
;
1321 key
.offset
= btrfs_extref_hash(parent_id
, name
, namelen
);
1323 ret
= btrfs_search_slot(NULL
, BTRFS_I(inode
)->root
, &key
, path
, 0, 0);
1330 if (key
.type
== BTRFS_INODE_EXTREF_KEY
)
1331 ret
= !!btrfs_find_name_in_ext_backref(path
->nodes
[0],
1332 path
->slots
[0], parent_id
, name
, namelen
);
1334 ret
= !!btrfs_find_name_in_backref(path
->nodes
[0], path
->slots
[0],
1338 btrfs_free_path(path
);
1342 static int add_link(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1343 struct inode
*dir
, struct inode
*inode
, const char *name
,
1344 int namelen
, u64 ref_index
)
1346 struct btrfs_dir_item
*dir_item
;
1347 struct btrfs_key key
;
1348 struct btrfs_path
*path
;
1349 struct inode
*other_inode
= NULL
;
1352 path
= btrfs_alloc_path();
1356 dir_item
= btrfs_lookup_dir_item(NULL
, root
, path
,
1357 btrfs_ino(BTRFS_I(dir
)),
1360 btrfs_release_path(path
);
1362 } else if (IS_ERR(dir_item
)) {
1363 ret
= PTR_ERR(dir_item
);
1368 * Our inode's dentry collides with the dentry of another inode which is
1369 * in the log but not yet processed since it has a higher inode number.
1370 * So delete that other dentry.
1372 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dir_item
, &key
);
1373 btrfs_release_path(path
);
1374 other_inode
= read_one_inode(root
, key
.objectid
);
1379 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
), BTRFS_I(other_inode
),
1384 * If we dropped the link count to 0, bump it so that later the iput()
1385 * on the inode will not free it. We will fixup the link count later.
1387 if (other_inode
->i_nlink
== 0)
1388 inc_nlink(other_inode
);
1390 ret
= btrfs_run_delayed_items(trans
);
1394 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
1395 name
, namelen
, 0, ref_index
);
1398 btrfs_free_path(path
);
1404 * replay one inode back reference item found in the log tree.
1405 * eb, slot and key refer to the buffer and key found in the log tree.
1406 * root is the destination we are replaying into, and path is for temp
1407 * use by this function. (it should be released on return).
1409 static noinline
int add_inode_ref(struct btrfs_trans_handle
*trans
,
1410 struct btrfs_root
*root
,
1411 struct btrfs_root
*log
,
1412 struct btrfs_path
*path
,
1413 struct extent_buffer
*eb
, int slot
,
1414 struct btrfs_key
*key
)
1416 struct inode
*dir
= NULL
;
1417 struct inode
*inode
= NULL
;
1418 unsigned long ref_ptr
;
1419 unsigned long ref_end
;
1423 int search_done
= 0;
1424 int log_ref_ver
= 0;
1425 u64 parent_objectid
;
1428 int ref_struct_size
;
1430 ref_ptr
= btrfs_item_ptr_offset(eb
, slot
);
1431 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, slot
);
1433 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1434 struct btrfs_inode_extref
*r
;
1436 ref_struct_size
= sizeof(struct btrfs_inode_extref
);
1438 r
= (struct btrfs_inode_extref
*)ref_ptr
;
1439 parent_objectid
= btrfs_inode_extref_parent(eb
, r
);
1441 ref_struct_size
= sizeof(struct btrfs_inode_ref
);
1442 parent_objectid
= key
->offset
;
1444 inode_objectid
= key
->objectid
;
1447 * it is possible that we didn't log all the parent directories
1448 * for a given inode. If we don't find the dir, just don't
1449 * copy the back ref in. The link count fixup code will take
1452 dir
= read_one_inode(root
, parent_objectid
);
1458 inode
= read_one_inode(root
, inode_objectid
);
1464 while (ref_ptr
< ref_end
) {
1466 ret
= extref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1467 &ref_index
, &parent_objectid
);
1469 * parent object can change from one array
1473 dir
= read_one_inode(root
, parent_objectid
);
1479 ret
= ref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1485 /* if we already have a perfect match, we're done */
1486 if (!inode_in_dir(root
, path
, btrfs_ino(BTRFS_I(dir
)),
1487 btrfs_ino(BTRFS_I(inode
)), ref_index
,
1490 * look for a conflicting back reference in the
1491 * metadata. if we find one we have to unlink that name
1492 * of the file before we add our new link. Later on, we
1493 * overwrite any existing back reference, and we don't
1494 * want to create dangling pointers in the directory.
1498 ret
= __add_inode_ref(trans
, root
, path
, log
,
1503 ref_index
, name
, namelen
,
1513 * If a reference item already exists for this inode
1514 * with the same parent and name, but different index,
1515 * drop it and the corresponding directory index entries
1516 * from the parent before adding the new reference item
1517 * and dir index entries, otherwise we would fail with
1518 * -EEXIST returned from btrfs_add_link() below.
1520 ret
= btrfs_inode_ref_exists(inode
, dir
, key
->type
,
1523 ret
= btrfs_unlink_inode(trans
, root
,
1528 * If we dropped the link count to 0, bump it so
1529 * that later the iput() on the inode will not
1530 * free it. We will fixup the link count later.
1532 if (!ret
&& inode
->i_nlink
== 0)
1538 /* insert our name */
1539 ret
= add_link(trans
, root
, dir
, inode
, name
, namelen
,
1544 btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
1547 ref_ptr
= (unsigned long)(ref_ptr
+ ref_struct_size
) + namelen
;
1557 * Before we overwrite the inode reference item in the subvolume tree
1558 * with the item from the log tree, we must unlink all names from the
1559 * parent directory that are in the subvolume's tree inode reference
1560 * item, otherwise we end up with an inconsistent subvolume tree where
1561 * dir index entries exist for a name but there is no inode reference
1562 * item with the same name.
1564 ret
= unlink_old_inode_refs(trans
, root
, path
, BTRFS_I(inode
), eb
, slot
,
1569 /* finally write the back reference in the inode */
1570 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
1572 btrfs_release_path(path
);
1579 static int count_inode_extrefs(struct btrfs_root
*root
,
1580 struct btrfs_inode
*inode
, struct btrfs_path
*path
)
1584 unsigned int nlink
= 0;
1587 u64 inode_objectid
= btrfs_ino(inode
);
1590 struct btrfs_inode_extref
*extref
;
1591 struct extent_buffer
*leaf
;
1594 ret
= btrfs_find_one_extref(root
, inode_objectid
, offset
, path
,
1599 leaf
= path
->nodes
[0];
1600 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1601 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1604 while (cur_offset
< item_size
) {
1605 extref
= (struct btrfs_inode_extref
*) (ptr
+ cur_offset
);
1606 name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1610 cur_offset
+= name_len
+ sizeof(*extref
);
1614 btrfs_release_path(path
);
1616 btrfs_release_path(path
);
1618 if (ret
< 0 && ret
!= -ENOENT
)
1623 static int count_inode_refs(struct btrfs_root
*root
,
1624 struct btrfs_inode
*inode
, struct btrfs_path
*path
)
1627 struct btrfs_key key
;
1628 unsigned int nlink
= 0;
1630 unsigned long ptr_end
;
1632 u64 ino
= btrfs_ino(inode
);
1635 key
.type
= BTRFS_INODE_REF_KEY
;
1636 key
.offset
= (u64
)-1;
1639 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1643 if (path
->slots
[0] == 0)
1648 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1650 if (key
.objectid
!= ino
||
1651 key
.type
!= BTRFS_INODE_REF_KEY
)
1653 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
1654 ptr_end
= ptr
+ btrfs_item_size_nr(path
->nodes
[0],
1656 while (ptr
< ptr_end
) {
1657 struct btrfs_inode_ref
*ref
;
1659 ref
= (struct btrfs_inode_ref
*)ptr
;
1660 name_len
= btrfs_inode_ref_name_len(path
->nodes
[0],
1662 ptr
= (unsigned long)(ref
+ 1) + name_len
;
1666 if (key
.offset
== 0)
1668 if (path
->slots
[0] > 0) {
1673 btrfs_release_path(path
);
1675 btrfs_release_path(path
);
1681 * There are a few corners where the link count of the file can't
1682 * be properly maintained during replay. So, instead of adding
1683 * lots of complexity to the log code, we just scan the backrefs
1684 * for any file that has been through replay.
1686 * The scan will update the link count on the inode to reflect the
1687 * number of back refs found. If it goes down to zero, the iput
1688 * will free the inode.
1690 static noinline
int fixup_inode_link_count(struct btrfs_trans_handle
*trans
,
1691 struct btrfs_root
*root
,
1692 struct inode
*inode
)
1694 struct btrfs_path
*path
;
1697 u64 ino
= btrfs_ino(BTRFS_I(inode
));
1699 path
= btrfs_alloc_path();
1703 ret
= count_inode_refs(root
, BTRFS_I(inode
), path
);
1709 ret
= count_inode_extrefs(root
, BTRFS_I(inode
), path
);
1717 if (nlink
!= inode
->i_nlink
) {
1718 set_nlink(inode
, nlink
);
1719 btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
1721 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1723 if (inode
->i_nlink
== 0) {
1724 if (S_ISDIR(inode
->i_mode
)) {
1725 ret
= replay_dir_deletes(trans
, root
, NULL
, path
,
1730 ret
= btrfs_insert_orphan_item(trans
, root
, ino
);
1736 btrfs_free_path(path
);
1740 static noinline
int fixup_inode_link_counts(struct btrfs_trans_handle
*trans
,
1741 struct btrfs_root
*root
,
1742 struct btrfs_path
*path
)
1745 struct btrfs_key key
;
1746 struct inode
*inode
;
1748 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1749 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1750 key
.offset
= (u64
)-1;
1752 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1757 if (path
->slots
[0] == 0)
1762 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1763 if (key
.objectid
!= BTRFS_TREE_LOG_FIXUP_OBJECTID
||
1764 key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
1767 ret
= btrfs_del_item(trans
, root
, path
);
1771 btrfs_release_path(path
);
1772 inode
= read_one_inode(root
, key
.offset
);
1776 ret
= fixup_inode_link_count(trans
, root
, inode
);
1782 * fixup on a directory may create new entries,
1783 * make sure we always look for the highset possible
1786 key
.offset
= (u64
)-1;
1790 btrfs_release_path(path
);
1796 * record a given inode in the fixup dir so we can check its link
1797 * count when replay is done. The link count is incremented here
1798 * so the inode won't go away until we check it
1800 static noinline
int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
1801 struct btrfs_root
*root
,
1802 struct btrfs_path
*path
,
1805 struct btrfs_key key
;
1807 struct inode
*inode
;
1809 inode
= read_one_inode(root
, objectid
);
1813 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1814 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1815 key
.offset
= objectid
;
1817 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1819 btrfs_release_path(path
);
1821 if (!inode
->i_nlink
)
1822 set_nlink(inode
, 1);
1825 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(inode
));
1826 } else if (ret
== -EEXIST
) {
1829 BUG(); /* Logic Error */
1837 * when replaying the log for a directory, we only insert names
1838 * for inodes that actually exist. This means an fsync on a directory
1839 * does not implicitly fsync all the new files in it
1841 static noinline
int insert_one_name(struct btrfs_trans_handle
*trans
,
1842 struct btrfs_root
*root
,
1843 u64 dirid
, u64 index
,
1844 char *name
, int name_len
,
1845 struct btrfs_key
*location
)
1847 struct inode
*inode
;
1851 inode
= read_one_inode(root
, location
->objectid
);
1855 dir
= read_one_inode(root
, dirid
);
1861 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
1862 name_len
, 1, index
);
1864 /* FIXME, put inode into FIXUP list */
1872 * take a single entry in a log directory item and replay it into
1875 * if a conflicting item exists in the subdirectory already,
1876 * the inode it points to is unlinked and put into the link count
1879 * If a name from the log points to a file or directory that does
1880 * not exist in the FS, it is skipped. fsyncs on directories
1881 * do not force down inodes inside that directory, just changes to the
1882 * names or unlinks in a directory.
1884 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1885 * non-existing inode) and 1 if the name was replayed.
1887 static noinline
int replay_one_name(struct btrfs_trans_handle
*trans
,
1888 struct btrfs_root
*root
,
1889 struct btrfs_path
*path
,
1890 struct extent_buffer
*eb
,
1891 struct btrfs_dir_item
*di
,
1892 struct btrfs_key
*key
)
1896 struct btrfs_dir_item
*dst_di
;
1897 struct btrfs_key found_key
;
1898 struct btrfs_key log_key
;
1903 bool update_size
= (key
->type
== BTRFS_DIR_INDEX_KEY
);
1904 bool name_added
= false;
1906 dir
= read_one_inode(root
, key
->objectid
);
1910 name_len
= btrfs_dir_name_len(eb
, di
);
1911 name
= kmalloc(name_len
, GFP_NOFS
);
1917 log_type
= btrfs_dir_type(eb
, di
);
1918 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1921 btrfs_dir_item_key_to_cpu(eb
, di
, &log_key
);
1922 exists
= btrfs_lookup_inode(trans
, root
, path
, &log_key
, 0);
1927 btrfs_release_path(path
);
1929 if (key
->type
== BTRFS_DIR_ITEM_KEY
) {
1930 dst_di
= btrfs_lookup_dir_item(trans
, root
, path
, key
->objectid
,
1932 } else if (key
->type
== BTRFS_DIR_INDEX_KEY
) {
1933 dst_di
= btrfs_lookup_dir_index_item(trans
, root
, path
,
1942 if (IS_ERR_OR_NULL(dst_di
)) {
1943 /* we need a sequence number to insert, so we only
1944 * do inserts for the BTRFS_DIR_INDEX_KEY types
1946 if (key
->type
!= BTRFS_DIR_INDEX_KEY
)
1951 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dst_di
, &found_key
);
1952 /* the existing item matches the logged item */
1953 if (found_key
.objectid
== log_key
.objectid
&&
1954 found_key
.type
== log_key
.type
&&
1955 found_key
.offset
== log_key
.offset
&&
1956 btrfs_dir_type(path
->nodes
[0], dst_di
) == log_type
) {
1957 update_size
= false;
1962 * don't drop the conflicting directory entry if the inode
1963 * for the new entry doesn't exist
1968 ret
= drop_one_dir_item(trans
, root
, path
, BTRFS_I(dir
), dst_di
);
1972 if (key
->type
== BTRFS_DIR_INDEX_KEY
)
1975 btrfs_release_path(path
);
1976 if (!ret
&& update_size
) {
1977 btrfs_i_size_write(BTRFS_I(dir
), dir
->i_size
+ name_len
* 2);
1978 ret
= btrfs_update_inode(trans
, root
, BTRFS_I(dir
));
1982 if (!ret
&& name_added
)
1988 * Check if the inode reference exists in the log for the given name,
1989 * inode and parent inode
1991 found_key
.objectid
= log_key
.objectid
;
1992 found_key
.type
= BTRFS_INODE_REF_KEY
;
1993 found_key
.offset
= key
->objectid
;
1994 ret
= backref_in_log(root
->log_root
, &found_key
, 0, name
, name_len
);
1998 /* The dentry will be added later. */
2000 update_size
= false;
2004 found_key
.objectid
= log_key
.objectid
;
2005 found_key
.type
= BTRFS_INODE_EXTREF_KEY
;
2006 found_key
.offset
= key
->objectid
;
2007 ret
= backref_in_log(root
->log_root
, &found_key
, key
->objectid
, name
,
2012 /* The dentry will be added later. */
2014 update_size
= false;
2017 btrfs_release_path(path
);
2018 ret
= insert_one_name(trans
, root
, key
->objectid
, key
->offset
,
2019 name
, name_len
, &log_key
);
2020 if (ret
&& ret
!= -ENOENT
&& ret
!= -EEXIST
)
2024 update_size
= false;
2030 * find all the names in a directory item and reconcile them into
2031 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2032 * one name in a directory item, but the same code gets used for
2033 * both directory index types
2035 static noinline
int replay_one_dir_item(struct btrfs_trans_handle
*trans
,
2036 struct btrfs_root
*root
,
2037 struct btrfs_path
*path
,
2038 struct extent_buffer
*eb
, int slot
,
2039 struct btrfs_key
*key
)
2042 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
2043 struct btrfs_dir_item
*di
;
2046 unsigned long ptr_end
;
2047 struct btrfs_path
*fixup_path
= NULL
;
2049 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2050 ptr_end
= ptr
+ item_size
;
2051 while (ptr
< ptr_end
) {
2052 di
= (struct btrfs_dir_item
*)ptr
;
2053 name_len
= btrfs_dir_name_len(eb
, di
);
2054 ret
= replay_one_name(trans
, root
, path
, eb
, di
, key
);
2057 ptr
= (unsigned long)(di
+ 1);
2061 * If this entry refers to a non-directory (directories can not
2062 * have a link count > 1) and it was added in the transaction
2063 * that was not committed, make sure we fixup the link count of
2064 * the inode it the entry points to. Otherwise something like
2065 * the following would result in a directory pointing to an
2066 * inode with a wrong link that does not account for this dir
2074 * ln testdir/bar testdir/bar_link
2075 * ln testdir/foo testdir/foo_link
2076 * xfs_io -c "fsync" testdir/bar
2080 * mount fs, log replay happens
2082 * File foo would remain with a link count of 1 when it has two
2083 * entries pointing to it in the directory testdir. This would
2084 * make it impossible to ever delete the parent directory has
2085 * it would result in stale dentries that can never be deleted.
2087 if (ret
== 1 && btrfs_dir_type(eb
, di
) != BTRFS_FT_DIR
) {
2088 struct btrfs_key di_key
;
2091 fixup_path
= btrfs_alloc_path();
2098 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2099 ret
= link_to_fixup_dir(trans
, root
, fixup_path
,
2106 btrfs_free_path(fixup_path
);
2111 * directory replay has two parts. There are the standard directory
2112 * items in the log copied from the subvolume, and range items
2113 * created in the log while the subvolume was logged.
2115 * The range items tell us which parts of the key space the log
2116 * is authoritative for. During replay, if a key in the subvolume
2117 * directory is in a logged range item, but not actually in the log
2118 * that means it was deleted from the directory before the fsync
2119 * and should be removed.
2121 static noinline
int find_dir_range(struct btrfs_root
*root
,
2122 struct btrfs_path
*path
,
2123 u64 dirid
, int key_type
,
2124 u64
*start_ret
, u64
*end_ret
)
2126 struct btrfs_key key
;
2128 struct btrfs_dir_log_item
*item
;
2132 if (*start_ret
== (u64
)-1)
2135 key
.objectid
= dirid
;
2136 key
.type
= key_type
;
2137 key
.offset
= *start_ret
;
2139 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2143 if (path
->slots
[0] == 0)
2148 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2150 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
2154 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2155 struct btrfs_dir_log_item
);
2156 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
2158 if (*start_ret
>= key
.offset
&& *start_ret
<= found_end
) {
2160 *start_ret
= key
.offset
;
2161 *end_ret
= found_end
;
2166 /* check the next slot in the tree to see if it is a valid item */
2167 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2169 if (path
->slots
[0] >= nritems
) {
2170 ret
= btrfs_next_leaf(root
, path
);
2175 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2177 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
2181 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2182 struct btrfs_dir_log_item
);
2183 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
2184 *start_ret
= key
.offset
;
2185 *end_ret
= found_end
;
2188 btrfs_release_path(path
);
2193 * this looks for a given directory item in the log. If the directory
2194 * item is not in the log, the item is removed and the inode it points
2197 static noinline
int check_item_in_log(struct btrfs_trans_handle
*trans
,
2198 struct btrfs_root
*root
,
2199 struct btrfs_root
*log
,
2200 struct btrfs_path
*path
,
2201 struct btrfs_path
*log_path
,
2203 struct btrfs_key
*dir_key
)
2206 struct extent_buffer
*eb
;
2209 struct btrfs_dir_item
*di
;
2210 struct btrfs_dir_item
*log_di
;
2213 unsigned long ptr_end
;
2215 struct inode
*inode
;
2216 struct btrfs_key location
;
2219 eb
= path
->nodes
[0];
2220 slot
= path
->slots
[0];
2221 item_size
= btrfs_item_size_nr(eb
, slot
);
2222 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2223 ptr_end
= ptr
+ item_size
;
2224 while (ptr
< ptr_end
) {
2225 di
= (struct btrfs_dir_item
*)ptr
;
2226 name_len
= btrfs_dir_name_len(eb
, di
);
2227 name
= kmalloc(name_len
, GFP_NOFS
);
2232 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
2235 if (log
&& dir_key
->type
== BTRFS_DIR_ITEM_KEY
) {
2236 log_di
= btrfs_lookup_dir_item(trans
, log
, log_path
,
2239 } else if (log
&& dir_key
->type
== BTRFS_DIR_INDEX_KEY
) {
2240 log_di
= btrfs_lookup_dir_index_item(trans
, log
,
2246 if (!log_di
|| log_di
== ERR_PTR(-ENOENT
)) {
2247 btrfs_dir_item_key_to_cpu(eb
, di
, &location
);
2248 btrfs_release_path(path
);
2249 btrfs_release_path(log_path
);
2250 inode
= read_one_inode(root
, location
.objectid
);
2256 ret
= link_to_fixup_dir(trans
, root
,
2257 path
, location
.objectid
);
2265 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
2266 BTRFS_I(inode
), name
, name_len
);
2268 ret
= btrfs_run_delayed_items(trans
);
2274 /* there might still be more names under this key
2275 * check and repeat if required
2277 ret
= btrfs_search_slot(NULL
, root
, dir_key
, path
,
2283 } else if (IS_ERR(log_di
)) {
2285 return PTR_ERR(log_di
);
2287 btrfs_release_path(log_path
);
2290 ptr
= (unsigned long)(di
+ 1);
2295 btrfs_release_path(path
);
2296 btrfs_release_path(log_path
);
2300 static int replay_xattr_deletes(struct btrfs_trans_handle
*trans
,
2301 struct btrfs_root
*root
,
2302 struct btrfs_root
*log
,
2303 struct btrfs_path
*path
,
2306 struct btrfs_key search_key
;
2307 struct btrfs_path
*log_path
;
2312 log_path
= btrfs_alloc_path();
2316 search_key
.objectid
= ino
;
2317 search_key
.type
= BTRFS_XATTR_ITEM_KEY
;
2318 search_key
.offset
= 0;
2320 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
2324 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2325 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
2326 struct btrfs_key key
;
2327 struct btrfs_dir_item
*di
;
2328 struct btrfs_dir_item
*log_di
;
2332 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, i
);
2333 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
) {
2338 di
= btrfs_item_ptr(path
->nodes
[0], i
, struct btrfs_dir_item
);
2339 total_size
= btrfs_item_size_nr(path
->nodes
[0], i
);
2341 while (cur
< total_size
) {
2342 u16 name_len
= btrfs_dir_name_len(path
->nodes
[0], di
);
2343 u16 data_len
= btrfs_dir_data_len(path
->nodes
[0], di
);
2344 u32 this_len
= sizeof(*di
) + name_len
+ data_len
;
2347 name
= kmalloc(name_len
, GFP_NOFS
);
2352 read_extent_buffer(path
->nodes
[0], name
,
2353 (unsigned long)(di
+ 1), name_len
);
2355 log_di
= btrfs_lookup_xattr(NULL
, log
, log_path
, ino
,
2357 btrfs_release_path(log_path
);
2359 /* Doesn't exist in log tree, so delete it. */
2360 btrfs_release_path(path
);
2361 di
= btrfs_lookup_xattr(trans
, root
, path
, ino
,
2362 name
, name_len
, -1);
2369 ret
= btrfs_delete_one_dir_name(trans
, root
,
2373 btrfs_release_path(path
);
2378 if (IS_ERR(log_di
)) {
2379 ret
= PTR_ERR(log_di
);
2383 di
= (struct btrfs_dir_item
*)((char *)di
+ this_len
);
2386 ret
= btrfs_next_leaf(root
, path
);
2392 btrfs_free_path(log_path
);
2393 btrfs_release_path(path
);
2399 * deletion replay happens before we copy any new directory items
2400 * out of the log or out of backreferences from inodes. It
2401 * scans the log to find ranges of keys that log is authoritative for,
2402 * and then scans the directory to find items in those ranges that are
2403 * not present in the log.
2405 * Anything we don't find in the log is unlinked and removed from the
2408 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
2409 struct btrfs_root
*root
,
2410 struct btrfs_root
*log
,
2411 struct btrfs_path
*path
,
2412 u64 dirid
, int del_all
)
2416 int key_type
= BTRFS_DIR_LOG_ITEM_KEY
;
2418 struct btrfs_key dir_key
;
2419 struct btrfs_key found_key
;
2420 struct btrfs_path
*log_path
;
2423 dir_key
.objectid
= dirid
;
2424 dir_key
.type
= BTRFS_DIR_ITEM_KEY
;
2425 log_path
= btrfs_alloc_path();
2429 dir
= read_one_inode(root
, dirid
);
2430 /* it isn't an error if the inode isn't there, that can happen
2431 * because we replay the deletes before we copy in the inode item
2435 btrfs_free_path(log_path
);
2443 range_end
= (u64
)-1;
2445 ret
= find_dir_range(log
, path
, dirid
, key_type
,
2446 &range_start
, &range_end
);
2451 dir_key
.offset
= range_start
;
2454 ret
= btrfs_search_slot(NULL
, root
, &dir_key
, path
,
2459 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2460 if (path
->slots
[0] >= nritems
) {
2461 ret
= btrfs_next_leaf(root
, path
);
2467 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2469 if (found_key
.objectid
!= dirid
||
2470 found_key
.type
!= dir_key
.type
)
2473 if (found_key
.offset
> range_end
)
2476 ret
= check_item_in_log(trans
, root
, log
, path
,
2481 if (found_key
.offset
== (u64
)-1)
2483 dir_key
.offset
= found_key
.offset
+ 1;
2485 btrfs_release_path(path
);
2486 if (range_end
== (u64
)-1)
2488 range_start
= range_end
+ 1;
2493 if (key_type
== BTRFS_DIR_LOG_ITEM_KEY
) {
2494 key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
2495 dir_key
.type
= BTRFS_DIR_INDEX_KEY
;
2496 btrfs_release_path(path
);
2500 btrfs_release_path(path
);
2501 btrfs_free_path(log_path
);
2507 * the process_func used to replay items from the log tree. This
2508 * gets called in two different stages. The first stage just looks
2509 * for inodes and makes sure they are all copied into the subvolume.
2511 * The second stage copies all the other item types from the log into
2512 * the subvolume. The two stage approach is slower, but gets rid of
2513 * lots of complexity around inodes referencing other inodes that exist
2514 * only in the log (references come from either directory items or inode
2517 static int replay_one_buffer(struct btrfs_root
*log
, struct extent_buffer
*eb
,
2518 struct walk_control
*wc
, u64 gen
, int level
)
2521 struct btrfs_path
*path
;
2522 struct btrfs_root
*root
= wc
->replay_dest
;
2523 struct btrfs_key key
;
2527 ret
= btrfs_read_buffer(eb
, gen
, level
, NULL
);
2531 level
= btrfs_header_level(eb
);
2536 path
= btrfs_alloc_path();
2540 nritems
= btrfs_header_nritems(eb
);
2541 for (i
= 0; i
< nritems
; i
++) {
2542 btrfs_item_key_to_cpu(eb
, &key
, i
);
2544 /* inode keys are done during the first stage */
2545 if (key
.type
== BTRFS_INODE_ITEM_KEY
&&
2546 wc
->stage
== LOG_WALK_REPLAY_INODES
) {
2547 struct btrfs_inode_item
*inode_item
;
2550 inode_item
= btrfs_item_ptr(eb
, i
,
2551 struct btrfs_inode_item
);
2553 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2554 * and never got linked before the fsync, skip it, as
2555 * replaying it is pointless since it would be deleted
2556 * later. We skip logging tmpfiles, but it's always
2557 * possible we are replaying a log created with a kernel
2558 * that used to log tmpfiles.
2560 if (btrfs_inode_nlink(eb
, inode_item
) == 0) {
2561 wc
->ignore_cur_inode
= true;
2564 wc
->ignore_cur_inode
= false;
2566 ret
= replay_xattr_deletes(wc
->trans
, root
, log
,
2567 path
, key
.objectid
);
2570 mode
= btrfs_inode_mode(eb
, inode_item
);
2571 if (S_ISDIR(mode
)) {
2572 ret
= replay_dir_deletes(wc
->trans
,
2573 root
, log
, path
, key
.objectid
, 0);
2577 ret
= overwrite_item(wc
->trans
, root
, path
,
2583 * Before replaying extents, truncate the inode to its
2584 * size. We need to do it now and not after log replay
2585 * because before an fsync we can have prealloc extents
2586 * added beyond the inode's i_size. If we did it after,
2587 * through orphan cleanup for example, we would drop
2588 * those prealloc extents just after replaying them.
2590 if (S_ISREG(mode
)) {
2591 struct btrfs_drop_extents_args drop_args
= { 0 };
2592 struct inode
*inode
;
2595 inode
= read_one_inode(root
, key
.objectid
);
2600 from
= ALIGN(i_size_read(inode
),
2601 root
->fs_info
->sectorsize
);
2602 drop_args
.start
= from
;
2603 drop_args
.end
= (u64
)-1;
2604 drop_args
.drop_cache
= true;
2605 ret
= btrfs_drop_extents(wc
->trans
, root
,
2609 inode_sub_bytes(inode
,
2610 drop_args
.bytes_found
);
2611 /* Update the inode's nbytes. */
2612 ret
= btrfs_update_inode(wc
->trans
,
2613 root
, BTRFS_I(inode
));
2620 ret
= link_to_fixup_dir(wc
->trans
, root
,
2621 path
, key
.objectid
);
2626 if (wc
->ignore_cur_inode
)
2629 if (key
.type
== BTRFS_DIR_INDEX_KEY
&&
2630 wc
->stage
== LOG_WALK_REPLAY_DIR_INDEX
) {
2631 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2637 if (wc
->stage
< LOG_WALK_REPLAY_ALL
)
2640 /* these keys are simply copied */
2641 if (key
.type
== BTRFS_XATTR_ITEM_KEY
) {
2642 ret
= overwrite_item(wc
->trans
, root
, path
,
2646 } else if (key
.type
== BTRFS_INODE_REF_KEY
||
2647 key
.type
== BTRFS_INODE_EXTREF_KEY
) {
2648 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
2650 if (ret
&& ret
!= -ENOENT
)
2653 } else if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
2654 ret
= replay_one_extent(wc
->trans
, root
, path
,
2658 } else if (key
.type
== BTRFS_DIR_ITEM_KEY
) {
2659 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2665 btrfs_free_path(path
);
2670 * Correctly adjust the reserved bytes occupied by a log tree extent buffer
2672 static void unaccount_log_buffer(struct btrfs_fs_info
*fs_info
, u64 start
)
2674 struct btrfs_block_group
*cache
;
2676 cache
= btrfs_lookup_block_group(fs_info
, start
);
2678 btrfs_err(fs_info
, "unable to find block group for %llu", start
);
2682 spin_lock(&cache
->space_info
->lock
);
2683 spin_lock(&cache
->lock
);
2684 cache
->reserved
-= fs_info
->nodesize
;
2685 cache
->space_info
->bytes_reserved
-= fs_info
->nodesize
;
2686 spin_unlock(&cache
->lock
);
2687 spin_unlock(&cache
->space_info
->lock
);
2689 btrfs_put_block_group(cache
);
2692 static noinline
int walk_down_log_tree(struct btrfs_trans_handle
*trans
,
2693 struct btrfs_root
*root
,
2694 struct btrfs_path
*path
, int *level
,
2695 struct walk_control
*wc
)
2697 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2700 struct extent_buffer
*next
;
2701 struct extent_buffer
*cur
;
2705 while (*level
> 0) {
2706 struct btrfs_key first_key
;
2708 cur
= path
->nodes
[*level
];
2710 WARN_ON(btrfs_header_level(cur
) != *level
);
2712 if (path
->slots
[*level
] >=
2713 btrfs_header_nritems(cur
))
2716 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2717 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2718 btrfs_node_key_to_cpu(cur
, &first_key
, path
->slots
[*level
]);
2719 blocksize
= fs_info
->nodesize
;
2721 next
= btrfs_find_create_tree_block(fs_info
, bytenr
,
2722 btrfs_header_owner(cur
),
2725 return PTR_ERR(next
);
2728 ret
= wc
->process_func(root
, next
, wc
, ptr_gen
,
2731 free_extent_buffer(next
);
2735 path
->slots
[*level
]++;
2737 ret
= btrfs_read_buffer(next
, ptr_gen
,
2738 *level
- 1, &first_key
);
2740 free_extent_buffer(next
);
2745 btrfs_tree_lock(next
);
2746 btrfs_clean_tree_block(next
);
2747 btrfs_wait_tree_block_writeback(next
);
2748 btrfs_tree_unlock(next
);
2749 ret
= btrfs_pin_reserved_extent(trans
,
2752 free_extent_buffer(next
);
2756 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2757 clear_extent_buffer_dirty(next
);
2758 unaccount_log_buffer(fs_info
, bytenr
);
2761 free_extent_buffer(next
);
2764 ret
= btrfs_read_buffer(next
, ptr_gen
, *level
- 1, &first_key
);
2766 free_extent_buffer(next
);
2770 if (path
->nodes
[*level
-1])
2771 free_extent_buffer(path
->nodes
[*level
-1]);
2772 path
->nodes
[*level
-1] = next
;
2773 *level
= btrfs_header_level(next
);
2774 path
->slots
[*level
] = 0;
2777 path
->slots
[*level
] = btrfs_header_nritems(path
->nodes
[*level
]);
2783 static noinline
int walk_up_log_tree(struct btrfs_trans_handle
*trans
,
2784 struct btrfs_root
*root
,
2785 struct btrfs_path
*path
, int *level
,
2786 struct walk_control
*wc
)
2788 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2793 for (i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2794 slot
= path
->slots
[i
];
2795 if (slot
+ 1 < btrfs_header_nritems(path
->nodes
[i
])) {
2798 WARN_ON(*level
== 0);
2801 ret
= wc
->process_func(root
, path
->nodes
[*level
], wc
,
2802 btrfs_header_generation(path
->nodes
[*level
]),
2808 struct extent_buffer
*next
;
2810 next
= path
->nodes
[*level
];
2813 btrfs_tree_lock(next
);
2814 btrfs_clean_tree_block(next
);
2815 btrfs_wait_tree_block_writeback(next
);
2816 btrfs_tree_unlock(next
);
2817 ret
= btrfs_pin_reserved_extent(trans
,
2818 path
->nodes
[*level
]->start
,
2819 path
->nodes
[*level
]->len
);
2823 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2824 clear_extent_buffer_dirty(next
);
2826 unaccount_log_buffer(fs_info
,
2827 path
->nodes
[*level
]->start
);
2830 free_extent_buffer(path
->nodes
[*level
]);
2831 path
->nodes
[*level
] = NULL
;
2839 * drop the reference count on the tree rooted at 'snap'. This traverses
2840 * the tree freeing any blocks that have a ref count of zero after being
2843 static int walk_log_tree(struct btrfs_trans_handle
*trans
,
2844 struct btrfs_root
*log
, struct walk_control
*wc
)
2846 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
2850 struct btrfs_path
*path
;
2853 path
= btrfs_alloc_path();
2857 level
= btrfs_header_level(log
->node
);
2859 path
->nodes
[level
] = log
->node
;
2860 atomic_inc(&log
->node
->refs
);
2861 path
->slots
[level
] = 0;
2864 wret
= walk_down_log_tree(trans
, log
, path
, &level
, wc
);
2872 wret
= walk_up_log_tree(trans
, log
, path
, &level
, wc
);
2881 /* was the root node processed? if not, catch it here */
2882 if (path
->nodes
[orig_level
]) {
2883 ret
= wc
->process_func(log
, path
->nodes
[orig_level
], wc
,
2884 btrfs_header_generation(path
->nodes
[orig_level
]),
2889 struct extent_buffer
*next
;
2891 next
= path
->nodes
[orig_level
];
2894 btrfs_tree_lock(next
);
2895 btrfs_clean_tree_block(next
);
2896 btrfs_wait_tree_block_writeback(next
);
2897 btrfs_tree_unlock(next
);
2898 ret
= btrfs_pin_reserved_extent(trans
,
2899 next
->start
, next
->len
);
2903 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &next
->bflags
))
2904 clear_extent_buffer_dirty(next
);
2905 unaccount_log_buffer(fs_info
, next
->start
);
2911 btrfs_free_path(path
);
2916 * helper function to update the item for a given subvolumes log root
2917 * in the tree of log roots
2919 static int update_log_root(struct btrfs_trans_handle
*trans
,
2920 struct btrfs_root
*log
,
2921 struct btrfs_root_item
*root_item
)
2923 struct btrfs_fs_info
*fs_info
= log
->fs_info
;
2926 if (log
->log_transid
== 1) {
2927 /* insert root item on the first sync */
2928 ret
= btrfs_insert_root(trans
, fs_info
->log_root_tree
,
2929 &log
->root_key
, root_item
);
2931 ret
= btrfs_update_root(trans
, fs_info
->log_root_tree
,
2932 &log
->root_key
, root_item
);
2937 static void wait_log_commit(struct btrfs_root
*root
, int transid
)
2940 int index
= transid
% 2;
2943 * we only allow two pending log transactions at a time,
2944 * so we know that if ours is more than 2 older than the
2945 * current transaction, we're done
2948 prepare_to_wait(&root
->log_commit_wait
[index
],
2949 &wait
, TASK_UNINTERRUPTIBLE
);
2951 if (!(root
->log_transid_committed
< transid
&&
2952 atomic_read(&root
->log_commit
[index
])))
2955 mutex_unlock(&root
->log_mutex
);
2957 mutex_lock(&root
->log_mutex
);
2959 finish_wait(&root
->log_commit_wait
[index
], &wait
);
2962 static void wait_for_writer(struct btrfs_root
*root
)
2967 prepare_to_wait(&root
->log_writer_wait
, &wait
,
2968 TASK_UNINTERRUPTIBLE
);
2969 if (!atomic_read(&root
->log_writers
))
2972 mutex_unlock(&root
->log_mutex
);
2974 mutex_lock(&root
->log_mutex
);
2976 finish_wait(&root
->log_writer_wait
, &wait
);
2979 static inline void btrfs_remove_log_ctx(struct btrfs_root
*root
,
2980 struct btrfs_log_ctx
*ctx
)
2985 mutex_lock(&root
->log_mutex
);
2986 list_del_init(&ctx
->list
);
2987 mutex_unlock(&root
->log_mutex
);
2991 * Invoked in log mutex context, or be sure there is no other task which
2992 * can access the list.
2994 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root
*root
,
2995 int index
, int error
)
2997 struct btrfs_log_ctx
*ctx
;
2998 struct btrfs_log_ctx
*safe
;
3000 list_for_each_entry_safe(ctx
, safe
, &root
->log_ctxs
[index
], list
) {
3001 list_del_init(&ctx
->list
);
3002 ctx
->log_ret
= error
;
3005 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
3009 * btrfs_sync_log does sends a given tree log down to the disk and
3010 * updates the super blocks to record it. When this call is done,
3011 * you know that any inodes previously logged are safely on disk only
3014 * Any other return value means you need to call btrfs_commit_transaction.
3015 * Some of the edge cases for fsyncing directories that have had unlinks
3016 * or renames done in the past mean that sometimes the only safe
3017 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3018 * that has happened.
3020 int btrfs_sync_log(struct btrfs_trans_handle
*trans
,
3021 struct btrfs_root
*root
, struct btrfs_log_ctx
*ctx
)
3027 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3028 struct btrfs_root
*log
= root
->log_root
;
3029 struct btrfs_root
*log_root_tree
= fs_info
->log_root_tree
;
3030 struct btrfs_root_item new_root_item
;
3031 int log_transid
= 0;
3032 struct btrfs_log_ctx root_log_ctx
;
3033 struct blk_plug plug
;
3037 mutex_lock(&root
->log_mutex
);
3038 log_transid
= ctx
->log_transid
;
3039 if (root
->log_transid_committed
>= log_transid
) {
3040 mutex_unlock(&root
->log_mutex
);
3041 return ctx
->log_ret
;
3044 index1
= log_transid
% 2;
3045 if (atomic_read(&root
->log_commit
[index1
])) {
3046 wait_log_commit(root
, log_transid
);
3047 mutex_unlock(&root
->log_mutex
);
3048 return ctx
->log_ret
;
3050 ASSERT(log_transid
== root
->log_transid
);
3051 atomic_set(&root
->log_commit
[index1
], 1);
3053 /* wait for previous tree log sync to complete */
3054 if (atomic_read(&root
->log_commit
[(index1
+ 1) % 2]))
3055 wait_log_commit(root
, log_transid
- 1);
3058 int batch
= atomic_read(&root
->log_batch
);
3059 /* when we're on an ssd, just kick the log commit out */
3060 if (!btrfs_test_opt(fs_info
, SSD
) &&
3061 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
)) {
3062 mutex_unlock(&root
->log_mutex
);
3063 schedule_timeout_uninterruptible(1);
3064 mutex_lock(&root
->log_mutex
);
3066 wait_for_writer(root
);
3067 if (batch
== atomic_read(&root
->log_batch
))
3071 /* bail out if we need to do a full commit */
3072 if (btrfs_need_log_full_commit(trans
)) {
3074 mutex_unlock(&root
->log_mutex
);
3078 if (log_transid
% 2 == 0)
3079 mark
= EXTENT_DIRTY
;
3083 /* we start IO on all the marked extents here, but we don't actually
3084 * wait for them until later.
3086 blk_start_plug(&plug
);
3087 ret
= btrfs_write_marked_extents(fs_info
, &log
->dirty_log_pages
, mark
);
3089 blk_finish_plug(&plug
);
3090 btrfs_abort_transaction(trans
, ret
);
3091 btrfs_set_log_full_commit(trans
);
3092 mutex_unlock(&root
->log_mutex
);
3097 * We _must_ update under the root->log_mutex in order to make sure we
3098 * have a consistent view of the log root we are trying to commit at
3101 * We _must_ copy this into a local copy, because we are not holding the
3102 * log_root_tree->log_mutex yet. This is important because when we
3103 * commit the log_root_tree we must have a consistent view of the
3104 * log_root_tree when we update the super block to point at the
3105 * log_root_tree bytenr. If we update the log_root_tree here we'll race
3106 * with the commit and possibly point at the new block which we may not
3109 btrfs_set_root_node(&log
->root_item
, log
->node
);
3110 memcpy(&new_root_item
, &log
->root_item
, sizeof(new_root_item
));
3112 root
->log_transid
++;
3113 log
->log_transid
= root
->log_transid
;
3114 root
->log_start_pid
= 0;
3116 * IO has been started, blocks of the log tree have WRITTEN flag set
3117 * in their headers. new modifications of the log will be written to
3118 * new positions. so it's safe to allow log writers to go in.
3120 mutex_unlock(&root
->log_mutex
);
3122 btrfs_init_log_ctx(&root_log_ctx
, NULL
);
3124 mutex_lock(&log_root_tree
->log_mutex
);
3126 index2
= log_root_tree
->log_transid
% 2;
3127 list_add_tail(&root_log_ctx
.list
, &log_root_tree
->log_ctxs
[index2
]);
3128 root_log_ctx
.log_transid
= log_root_tree
->log_transid
;
3131 * Now we are safe to update the log_root_tree because we're under the
3132 * log_mutex, and we're a current writer so we're holding the commit
3133 * open until we drop the log_mutex.
3135 ret
= update_log_root(trans
, log
, &new_root_item
);
3137 if (!list_empty(&root_log_ctx
.list
))
3138 list_del_init(&root_log_ctx
.list
);
3140 blk_finish_plug(&plug
);
3141 btrfs_set_log_full_commit(trans
);
3143 if (ret
!= -ENOSPC
) {
3144 btrfs_abort_transaction(trans
, ret
);
3145 mutex_unlock(&log_root_tree
->log_mutex
);
3148 btrfs_wait_tree_log_extents(log
, mark
);
3149 mutex_unlock(&log_root_tree
->log_mutex
);
3154 if (log_root_tree
->log_transid_committed
>= root_log_ctx
.log_transid
) {
3155 blk_finish_plug(&plug
);
3156 list_del_init(&root_log_ctx
.list
);
3157 mutex_unlock(&log_root_tree
->log_mutex
);
3158 ret
= root_log_ctx
.log_ret
;
3162 index2
= root_log_ctx
.log_transid
% 2;
3163 if (atomic_read(&log_root_tree
->log_commit
[index2
])) {
3164 blk_finish_plug(&plug
);
3165 ret
= btrfs_wait_tree_log_extents(log
, mark
);
3166 wait_log_commit(log_root_tree
,
3167 root_log_ctx
.log_transid
);
3168 mutex_unlock(&log_root_tree
->log_mutex
);
3170 ret
= root_log_ctx
.log_ret
;
3173 ASSERT(root_log_ctx
.log_transid
== log_root_tree
->log_transid
);
3174 atomic_set(&log_root_tree
->log_commit
[index2
], 1);
3176 if (atomic_read(&log_root_tree
->log_commit
[(index2
+ 1) % 2])) {
3177 wait_log_commit(log_root_tree
,
3178 root_log_ctx
.log_transid
- 1);
3182 * now that we've moved on to the tree of log tree roots,
3183 * check the full commit flag again
3185 if (btrfs_need_log_full_commit(trans
)) {
3186 blk_finish_plug(&plug
);
3187 btrfs_wait_tree_log_extents(log
, mark
);
3188 mutex_unlock(&log_root_tree
->log_mutex
);
3190 goto out_wake_log_root
;
3193 ret
= btrfs_write_marked_extents(fs_info
,
3194 &log_root_tree
->dirty_log_pages
,
3195 EXTENT_DIRTY
| EXTENT_NEW
);
3196 blk_finish_plug(&plug
);
3198 btrfs_set_log_full_commit(trans
);
3199 btrfs_abort_transaction(trans
, ret
);
3200 mutex_unlock(&log_root_tree
->log_mutex
);
3201 goto out_wake_log_root
;
3203 ret
= btrfs_wait_tree_log_extents(log
, mark
);
3205 ret
= btrfs_wait_tree_log_extents(log_root_tree
,
3206 EXTENT_NEW
| EXTENT_DIRTY
);
3208 btrfs_set_log_full_commit(trans
);
3209 mutex_unlock(&log_root_tree
->log_mutex
);
3210 goto out_wake_log_root
;
3213 log_root_start
= log_root_tree
->node
->start
;
3214 log_root_level
= btrfs_header_level(log_root_tree
->node
);
3215 log_root_tree
->log_transid
++;
3216 mutex_unlock(&log_root_tree
->log_mutex
);
3219 * Here we are guaranteed that nobody is going to write the superblock
3220 * for the current transaction before us and that neither we do write
3221 * our superblock before the previous transaction finishes its commit
3222 * and writes its superblock, because:
3224 * 1) We are holding a handle on the current transaction, so no body
3225 * can commit it until we release the handle;
3227 * 2) Before writing our superblock we acquire the tree_log_mutex, so
3228 * if the previous transaction is still committing, and hasn't yet
3229 * written its superblock, we wait for it to do it, because a
3230 * transaction commit acquires the tree_log_mutex when the commit
3231 * begins and releases it only after writing its superblock.
3233 mutex_lock(&fs_info
->tree_log_mutex
);
3234 btrfs_set_super_log_root(fs_info
->super_for_commit
, log_root_start
);
3235 btrfs_set_super_log_root_level(fs_info
->super_for_commit
, log_root_level
);
3236 ret
= write_all_supers(fs_info
, 1);
3237 mutex_unlock(&fs_info
->tree_log_mutex
);
3239 btrfs_set_log_full_commit(trans
);
3240 btrfs_abort_transaction(trans
, ret
);
3241 goto out_wake_log_root
;
3244 mutex_lock(&root
->log_mutex
);
3245 if (root
->last_log_commit
< log_transid
)
3246 root
->last_log_commit
= log_transid
;
3247 mutex_unlock(&root
->log_mutex
);
3250 mutex_lock(&log_root_tree
->log_mutex
);
3251 btrfs_remove_all_log_ctxs(log_root_tree
, index2
, ret
);
3253 log_root_tree
->log_transid_committed
++;
3254 atomic_set(&log_root_tree
->log_commit
[index2
], 0);
3255 mutex_unlock(&log_root_tree
->log_mutex
);
3258 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3259 * all the updates above are seen by the woken threads. It might not be
3260 * necessary, but proving that seems to be hard.
3262 cond_wake_up(&log_root_tree
->log_commit_wait
[index2
]);
3264 mutex_lock(&root
->log_mutex
);
3265 btrfs_remove_all_log_ctxs(root
, index1
, ret
);
3266 root
->log_transid_committed
++;
3267 atomic_set(&root
->log_commit
[index1
], 0);
3268 mutex_unlock(&root
->log_mutex
);
3271 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3272 * all the updates above are seen by the woken threads. It might not be
3273 * necessary, but proving that seems to be hard.
3275 cond_wake_up(&root
->log_commit_wait
[index1
]);
3279 static void free_log_tree(struct btrfs_trans_handle
*trans
,
3280 struct btrfs_root
*log
)
3283 struct walk_control wc
= {
3285 .process_func
= process_one_buffer
3288 ret
= walk_log_tree(trans
, log
, &wc
);
3291 btrfs_abort_transaction(trans
, ret
);
3293 btrfs_handle_fs_error(log
->fs_info
, ret
, NULL
);
3296 clear_extent_bits(&log
->dirty_log_pages
, 0, (u64
)-1,
3297 EXTENT_DIRTY
| EXTENT_NEW
| EXTENT_NEED_WAIT
);
3298 extent_io_tree_release(&log
->log_csum_range
);
3299 btrfs_put_root(log
);
3303 * free all the extents used by the tree log. This should be called
3304 * at commit time of the full transaction
3306 int btrfs_free_log(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
)
3308 if (root
->log_root
) {
3309 free_log_tree(trans
, root
->log_root
);
3310 root
->log_root
= NULL
;
3311 clear_bit(BTRFS_ROOT_HAS_LOG_TREE
, &root
->state
);
3316 int btrfs_free_log_root_tree(struct btrfs_trans_handle
*trans
,
3317 struct btrfs_fs_info
*fs_info
)
3319 if (fs_info
->log_root_tree
) {
3320 free_log_tree(trans
, fs_info
->log_root_tree
);
3321 fs_info
->log_root_tree
= NULL
;
3322 clear_bit(BTRFS_ROOT_HAS_LOG_TREE
, &fs_info
->tree_root
->state
);
3328 * Check if an inode was logged in the current transaction. We can't always rely
3329 * on an inode's logged_trans value, because it's an in-memory only field and
3330 * therefore not persisted. This means that its value is lost if the inode gets
3331 * evicted and loaded again from disk (in which case it has a value of 0, and
3332 * certainly it is smaller then any possible transaction ID), when that happens
3333 * the full_sync flag is set in the inode's runtime flags, so on that case we
3334 * assume eviction happened and ignore the logged_trans value, assuming the
3335 * worst case, that the inode was logged before in the current transaction.
3337 static bool inode_logged(struct btrfs_trans_handle
*trans
,
3338 struct btrfs_inode
*inode
)
3340 if (inode
->logged_trans
== trans
->transid
)
3343 if (inode
->last_trans
== trans
->transid
&&
3344 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &inode
->runtime_flags
) &&
3345 !test_bit(BTRFS_FS_LOG_RECOVERING
, &trans
->fs_info
->flags
))
3352 * If both a file and directory are logged, and unlinks or renames are
3353 * mixed in, we have a few interesting corners:
3355 * create file X in dir Y
3356 * link file X to X.link in dir Y
3358 * unlink file X but leave X.link
3361 * After a crash we would expect only X.link to exist. But file X
3362 * didn't get fsync'd again so the log has back refs for X and X.link.
3364 * We solve this by removing directory entries and inode backrefs from the
3365 * log when a file that was logged in the current transaction is
3366 * unlinked. Any later fsync will include the updated log entries, and
3367 * we'll be able to reconstruct the proper directory items from backrefs.
3369 * This optimizations allows us to avoid relogging the entire inode
3370 * or the entire directory.
3372 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle
*trans
,
3373 struct btrfs_root
*root
,
3374 const char *name
, int name_len
,
3375 struct btrfs_inode
*dir
, u64 index
)
3377 struct btrfs_root
*log
;
3378 struct btrfs_dir_item
*di
;
3379 struct btrfs_path
*path
;
3383 u64 dir_ino
= btrfs_ino(dir
);
3385 if (!inode_logged(trans
, dir
))
3388 ret
= join_running_log_trans(root
);
3392 mutex_lock(&dir
->log_mutex
);
3394 log
= root
->log_root
;
3395 path
= btrfs_alloc_path();
3401 di
= btrfs_lookup_dir_item(trans
, log
, path
, dir_ino
,
3402 name
, name_len
, -1);
3408 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3409 bytes_del
+= name_len
;
3415 btrfs_release_path(path
);
3416 di
= btrfs_lookup_dir_index_item(trans
, log
, path
, dir_ino
,
3417 index
, name
, name_len
, -1);
3423 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3424 bytes_del
+= name_len
;
3431 /* update the directory size in the log to reflect the names
3435 struct btrfs_key key
;
3437 key
.objectid
= dir_ino
;
3439 key
.type
= BTRFS_INODE_ITEM_KEY
;
3440 btrfs_release_path(path
);
3442 ret
= btrfs_search_slot(trans
, log
, &key
, path
, 0, 1);
3448 struct btrfs_inode_item
*item
;
3451 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3452 struct btrfs_inode_item
);
3453 i_size
= btrfs_inode_size(path
->nodes
[0], item
);
3454 if (i_size
> bytes_del
)
3455 i_size
-= bytes_del
;
3458 btrfs_set_inode_size(path
->nodes
[0], item
, i_size
);
3459 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3462 btrfs_release_path(path
);
3465 btrfs_free_path(path
);
3467 mutex_unlock(&dir
->log_mutex
);
3468 if (err
== -ENOSPC
) {
3469 btrfs_set_log_full_commit(trans
);
3471 } else if (err
< 0 && err
!= -ENOENT
) {
3472 /* ENOENT can be returned if the entry hasn't been fsynced yet */
3473 btrfs_abort_transaction(trans
, err
);
3476 btrfs_end_log_trans(root
);
3481 /* see comments for btrfs_del_dir_entries_in_log */
3482 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle
*trans
,
3483 struct btrfs_root
*root
,
3484 const char *name
, int name_len
,
3485 struct btrfs_inode
*inode
, u64 dirid
)
3487 struct btrfs_root
*log
;
3491 if (!inode_logged(trans
, inode
))
3494 ret
= join_running_log_trans(root
);
3497 log
= root
->log_root
;
3498 mutex_lock(&inode
->log_mutex
);
3500 ret
= btrfs_del_inode_ref(trans
, log
, name
, name_len
, btrfs_ino(inode
),
3502 mutex_unlock(&inode
->log_mutex
);
3503 if (ret
== -ENOSPC
) {
3504 btrfs_set_log_full_commit(trans
);
3506 } else if (ret
< 0 && ret
!= -ENOENT
)
3507 btrfs_abort_transaction(trans
, ret
);
3508 btrfs_end_log_trans(root
);
3514 * creates a range item in the log for 'dirid'. first_offset and
3515 * last_offset tell us which parts of the key space the log should
3516 * be considered authoritative for.
3518 static noinline
int insert_dir_log_key(struct btrfs_trans_handle
*trans
,
3519 struct btrfs_root
*log
,
3520 struct btrfs_path
*path
,
3521 int key_type
, u64 dirid
,
3522 u64 first_offset
, u64 last_offset
)
3525 struct btrfs_key key
;
3526 struct btrfs_dir_log_item
*item
;
3528 key
.objectid
= dirid
;
3529 key
.offset
= first_offset
;
3530 if (key_type
== BTRFS_DIR_ITEM_KEY
)
3531 key
.type
= BTRFS_DIR_LOG_ITEM_KEY
;
3533 key
.type
= BTRFS_DIR_LOG_INDEX_KEY
;
3534 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*item
));
3538 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3539 struct btrfs_dir_log_item
);
3540 btrfs_set_dir_log_end(path
->nodes
[0], item
, last_offset
);
3541 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3542 btrfs_release_path(path
);
3547 * log all the items included in the current transaction for a given
3548 * directory. This also creates the range items in the log tree required
3549 * to replay anything deleted before the fsync
3551 static noinline
int log_dir_items(struct btrfs_trans_handle
*trans
,
3552 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
3553 struct btrfs_path
*path
,
3554 struct btrfs_path
*dst_path
, int key_type
,
3555 struct btrfs_log_ctx
*ctx
,
3556 u64 min_offset
, u64
*last_offset_ret
)
3558 struct btrfs_key min_key
;
3559 struct btrfs_root
*log
= root
->log_root
;
3560 struct extent_buffer
*src
;
3565 u64 first_offset
= min_offset
;
3566 u64 last_offset
= (u64
)-1;
3567 u64 ino
= btrfs_ino(inode
);
3569 log
= root
->log_root
;
3571 min_key
.objectid
= ino
;
3572 min_key
.type
= key_type
;
3573 min_key
.offset
= min_offset
;
3575 ret
= btrfs_search_forward(root
, &min_key
, path
, trans
->transid
);
3578 * we didn't find anything from this transaction, see if there
3579 * is anything at all
3581 if (ret
!= 0 || min_key
.objectid
!= ino
|| min_key
.type
!= key_type
) {
3582 min_key
.objectid
= ino
;
3583 min_key
.type
= key_type
;
3584 min_key
.offset
= (u64
)-1;
3585 btrfs_release_path(path
);
3586 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3588 btrfs_release_path(path
);
3591 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3593 /* if ret == 0 there are items for this type,
3594 * create a range to tell us the last key of this type.
3595 * otherwise, there are no items in this directory after
3596 * *min_offset, and we create a range to indicate that.
3599 struct btrfs_key tmp
;
3600 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
,
3602 if (key_type
== tmp
.type
)
3603 first_offset
= max(min_offset
, tmp
.offset
) + 1;
3608 /* go backward to find any previous key */
3609 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3611 struct btrfs_key tmp
;
3612 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3613 if (key_type
== tmp
.type
) {
3614 first_offset
= tmp
.offset
;
3615 ret
= overwrite_item(trans
, log
, dst_path
,
3616 path
->nodes
[0], path
->slots
[0],
3624 btrfs_release_path(path
);
3627 * Find the first key from this transaction again. See the note for
3628 * log_new_dir_dentries, if we're logging a directory recursively we
3629 * won't be holding its i_mutex, which means we can modify the directory
3630 * while we're logging it. If we remove an entry between our first
3631 * search and this search we'll not find the key again and can just
3635 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3640 * we have a block from this transaction, log every item in it
3641 * from our directory
3644 struct btrfs_key tmp
;
3645 src
= path
->nodes
[0];
3646 nritems
= btrfs_header_nritems(src
);
3647 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
3648 struct btrfs_dir_item
*di
;
3650 btrfs_item_key_to_cpu(src
, &min_key
, i
);
3652 if (min_key
.objectid
!= ino
|| min_key
.type
!= key_type
)
3655 if (need_resched()) {
3656 btrfs_release_path(path
);
3661 ret
= overwrite_item(trans
, log
, dst_path
, src
, i
,
3669 * We must make sure that when we log a directory entry,
3670 * the corresponding inode, after log replay, has a
3671 * matching link count. For example:
3677 * xfs_io -c "fsync" mydir
3679 * <mount fs and log replay>
3681 * Would result in a fsync log that when replayed, our
3682 * file inode would have a link count of 1, but we get
3683 * two directory entries pointing to the same inode.
3684 * After removing one of the names, it would not be
3685 * possible to remove the other name, which resulted
3686 * always in stale file handle errors, and would not
3687 * be possible to rmdir the parent directory, since
3688 * its i_size could never decrement to the value
3689 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3691 di
= btrfs_item_ptr(src
, i
, struct btrfs_dir_item
);
3692 btrfs_dir_item_key_to_cpu(src
, di
, &tmp
);
3694 (btrfs_dir_transid(src
, di
) == trans
->transid
||
3695 btrfs_dir_type(src
, di
) == BTRFS_FT_DIR
) &&
3696 tmp
.type
!= BTRFS_ROOT_ITEM_KEY
)
3697 ctx
->log_new_dentries
= true;
3699 path
->slots
[0] = nritems
;
3702 * look ahead to the next item and see if it is also
3703 * from this directory and from this transaction
3705 ret
= btrfs_next_leaf(root
, path
);
3708 last_offset
= (u64
)-1;
3713 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3714 if (tmp
.objectid
!= ino
|| tmp
.type
!= key_type
) {
3715 last_offset
= (u64
)-1;
3718 if (btrfs_header_generation(path
->nodes
[0]) != trans
->transid
) {
3719 ret
= overwrite_item(trans
, log
, dst_path
,
3720 path
->nodes
[0], path
->slots
[0],
3725 last_offset
= tmp
.offset
;
3730 btrfs_release_path(path
);
3731 btrfs_release_path(dst_path
);
3734 *last_offset_ret
= last_offset
;
3736 * insert the log range keys to indicate where the log
3739 ret
= insert_dir_log_key(trans
, log
, path
, key_type
,
3740 ino
, first_offset
, last_offset
);
3748 * logging directories is very similar to logging inodes, We find all the items
3749 * from the current transaction and write them to the log.
3751 * The recovery code scans the directory in the subvolume, and if it finds a
3752 * key in the range logged that is not present in the log tree, then it means
3753 * that dir entry was unlinked during the transaction.
3755 * In order for that scan to work, we must include one key smaller than
3756 * the smallest logged by this transaction and one key larger than the largest
3757 * key logged by this transaction.
3759 static noinline
int log_directory_changes(struct btrfs_trans_handle
*trans
,
3760 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
3761 struct btrfs_path
*path
,
3762 struct btrfs_path
*dst_path
,
3763 struct btrfs_log_ctx
*ctx
)
3768 int key_type
= BTRFS_DIR_ITEM_KEY
;
3774 ret
= log_dir_items(trans
, root
, inode
, path
, dst_path
, key_type
,
3775 ctx
, min_key
, &max_key
);
3778 if (max_key
== (u64
)-1)
3780 min_key
= max_key
+ 1;
3783 if (key_type
== BTRFS_DIR_ITEM_KEY
) {
3784 key_type
= BTRFS_DIR_INDEX_KEY
;
3791 * a helper function to drop items from the log before we relog an
3792 * inode. max_key_type indicates the highest item type to remove.
3793 * This cannot be run for file data extents because it does not
3794 * free the extents they point to.
3796 static int drop_objectid_items(struct btrfs_trans_handle
*trans
,
3797 struct btrfs_root
*log
,
3798 struct btrfs_path
*path
,
3799 u64 objectid
, int max_key_type
)
3802 struct btrfs_key key
;
3803 struct btrfs_key found_key
;
3806 key
.objectid
= objectid
;
3807 key
.type
= max_key_type
;
3808 key
.offset
= (u64
)-1;
3811 ret
= btrfs_search_slot(trans
, log
, &key
, path
, -1, 1);
3812 BUG_ON(ret
== 0); /* Logic error */
3816 if (path
->slots
[0] == 0)
3820 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
3823 if (found_key
.objectid
!= objectid
)
3826 found_key
.offset
= 0;
3828 ret
= btrfs_bin_search(path
->nodes
[0], &found_key
, &start_slot
);
3832 ret
= btrfs_del_items(trans
, log
, path
, start_slot
,
3833 path
->slots
[0] - start_slot
+ 1);
3835 * If start slot isn't 0 then we don't need to re-search, we've
3836 * found the last guy with the objectid in this tree.
3838 if (ret
|| start_slot
!= 0)
3840 btrfs_release_path(path
);
3842 btrfs_release_path(path
);
3848 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3849 struct extent_buffer
*leaf
,
3850 struct btrfs_inode_item
*item
,
3851 struct inode
*inode
, int log_inode_only
,
3854 struct btrfs_map_token token
;
3856 btrfs_init_map_token(&token
, leaf
);
3858 if (log_inode_only
) {
3859 /* set the generation to zero so the recover code
3860 * can tell the difference between an logging
3861 * just to say 'this inode exists' and a logging
3862 * to say 'update this inode with these values'
3864 btrfs_set_token_inode_generation(&token
, item
, 0);
3865 btrfs_set_token_inode_size(&token
, item
, logged_isize
);
3867 btrfs_set_token_inode_generation(&token
, item
,
3868 BTRFS_I(inode
)->generation
);
3869 btrfs_set_token_inode_size(&token
, item
, inode
->i_size
);
3872 btrfs_set_token_inode_uid(&token
, item
, i_uid_read(inode
));
3873 btrfs_set_token_inode_gid(&token
, item
, i_gid_read(inode
));
3874 btrfs_set_token_inode_mode(&token
, item
, inode
->i_mode
);
3875 btrfs_set_token_inode_nlink(&token
, item
, inode
->i_nlink
);
3877 btrfs_set_token_timespec_sec(&token
, &item
->atime
,
3878 inode
->i_atime
.tv_sec
);
3879 btrfs_set_token_timespec_nsec(&token
, &item
->atime
,
3880 inode
->i_atime
.tv_nsec
);
3882 btrfs_set_token_timespec_sec(&token
, &item
->mtime
,
3883 inode
->i_mtime
.tv_sec
);
3884 btrfs_set_token_timespec_nsec(&token
, &item
->mtime
,
3885 inode
->i_mtime
.tv_nsec
);
3887 btrfs_set_token_timespec_sec(&token
, &item
->ctime
,
3888 inode
->i_ctime
.tv_sec
);
3889 btrfs_set_token_timespec_nsec(&token
, &item
->ctime
,
3890 inode
->i_ctime
.tv_nsec
);
3892 btrfs_set_token_inode_nbytes(&token
, item
, inode_get_bytes(inode
));
3894 btrfs_set_token_inode_sequence(&token
, item
, inode_peek_iversion(inode
));
3895 btrfs_set_token_inode_transid(&token
, item
, trans
->transid
);
3896 btrfs_set_token_inode_rdev(&token
, item
, inode
->i_rdev
);
3897 btrfs_set_token_inode_flags(&token
, item
, BTRFS_I(inode
)->flags
);
3898 btrfs_set_token_inode_block_group(&token
, item
, 0);
3901 static int log_inode_item(struct btrfs_trans_handle
*trans
,
3902 struct btrfs_root
*log
, struct btrfs_path
*path
,
3903 struct btrfs_inode
*inode
)
3905 struct btrfs_inode_item
*inode_item
;
3908 ret
= btrfs_insert_empty_item(trans
, log
, path
,
3909 &inode
->location
, sizeof(*inode_item
));
3910 if (ret
&& ret
!= -EEXIST
)
3912 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3913 struct btrfs_inode_item
);
3914 fill_inode_item(trans
, path
->nodes
[0], inode_item
, &inode
->vfs_inode
,
3916 btrfs_release_path(path
);
3920 static int log_csums(struct btrfs_trans_handle
*trans
,
3921 struct btrfs_inode
*inode
,
3922 struct btrfs_root
*log_root
,
3923 struct btrfs_ordered_sum
*sums
)
3925 const u64 lock_end
= sums
->bytenr
+ sums
->len
- 1;
3926 struct extent_state
*cached_state
= NULL
;
3930 * If this inode was not used for reflink operations in the current
3931 * transaction with new extents, then do the fast path, no need to
3932 * worry about logging checksum items with overlapping ranges.
3934 if (inode
->last_reflink_trans
< trans
->transid
)
3935 return btrfs_csum_file_blocks(trans
, log_root
, sums
);
3938 * Serialize logging for checksums. This is to avoid racing with the
3939 * same checksum being logged by another task that is logging another
3940 * file which happens to refer to the same extent as well. Such races
3941 * can leave checksum items in the log with overlapping ranges.
3943 ret
= lock_extent_bits(&log_root
->log_csum_range
, sums
->bytenr
,
3944 lock_end
, &cached_state
);
3948 * Due to extent cloning, we might have logged a csum item that covers a
3949 * subrange of a cloned extent, and later we can end up logging a csum
3950 * item for a larger subrange of the same extent or the entire range.
3951 * This would leave csum items in the log tree that cover the same range
3952 * and break the searches for checksums in the log tree, resulting in
3953 * some checksums missing in the fs/subvolume tree. So just delete (or
3954 * trim and adjust) any existing csum items in the log for this range.
3956 ret
= btrfs_del_csums(trans
, log_root
, sums
->bytenr
, sums
->len
);
3958 ret
= btrfs_csum_file_blocks(trans
, log_root
, sums
);
3960 unlock_extent_cached(&log_root
->log_csum_range
, sums
->bytenr
, lock_end
,
3966 static noinline
int copy_items(struct btrfs_trans_handle
*trans
,
3967 struct btrfs_inode
*inode
,
3968 struct btrfs_path
*dst_path
,
3969 struct btrfs_path
*src_path
,
3970 int start_slot
, int nr
, int inode_only
,
3973 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3974 unsigned long src_offset
;
3975 unsigned long dst_offset
;
3976 struct btrfs_root
*log
= inode
->root
->log_root
;
3977 struct btrfs_file_extent_item
*extent
;
3978 struct btrfs_inode_item
*inode_item
;
3979 struct extent_buffer
*src
= src_path
->nodes
[0];
3981 struct btrfs_key
*ins_keys
;
3985 struct list_head ordered_sums
;
3986 int skip_csum
= inode
->flags
& BTRFS_INODE_NODATASUM
;
3988 INIT_LIST_HEAD(&ordered_sums
);
3990 ins_data
= kmalloc(nr
* sizeof(struct btrfs_key
) +
3991 nr
* sizeof(u32
), GFP_NOFS
);
3995 ins_sizes
= (u32
*)ins_data
;
3996 ins_keys
= (struct btrfs_key
*)(ins_data
+ nr
* sizeof(u32
));
3998 for (i
= 0; i
< nr
; i
++) {
3999 ins_sizes
[i
] = btrfs_item_size_nr(src
, i
+ start_slot
);
4000 btrfs_item_key_to_cpu(src
, ins_keys
+ i
, i
+ start_slot
);
4002 ret
= btrfs_insert_empty_items(trans
, log
, dst_path
,
4003 ins_keys
, ins_sizes
, nr
);
4009 for (i
= 0; i
< nr
; i
++, dst_path
->slots
[0]++) {
4010 dst_offset
= btrfs_item_ptr_offset(dst_path
->nodes
[0],
4011 dst_path
->slots
[0]);
4013 src_offset
= btrfs_item_ptr_offset(src
, start_slot
+ i
);
4015 if (ins_keys
[i
].type
== BTRFS_INODE_ITEM_KEY
) {
4016 inode_item
= btrfs_item_ptr(dst_path
->nodes
[0],
4018 struct btrfs_inode_item
);
4019 fill_inode_item(trans
, dst_path
->nodes
[0], inode_item
,
4021 inode_only
== LOG_INODE_EXISTS
,
4024 copy_extent_buffer(dst_path
->nodes
[0], src
, dst_offset
,
4025 src_offset
, ins_sizes
[i
]);
4028 /* take a reference on file data extents so that truncates
4029 * or deletes of this inode don't have to relog the inode
4032 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
&&
4035 extent
= btrfs_item_ptr(src
, start_slot
+ i
,
4036 struct btrfs_file_extent_item
);
4038 if (btrfs_file_extent_generation(src
, extent
) < trans
->transid
)
4041 found_type
= btrfs_file_extent_type(src
, extent
);
4042 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
4044 ds
= btrfs_file_extent_disk_bytenr(src
,
4046 /* ds == 0 is a hole */
4050 dl
= btrfs_file_extent_disk_num_bytes(src
,
4052 cs
= btrfs_file_extent_offset(src
, extent
);
4053 cl
= btrfs_file_extent_num_bytes(src
,
4055 if (btrfs_file_extent_compression(src
,
4061 ret
= btrfs_lookup_csums_range(
4063 ds
+ cs
, ds
+ cs
+ cl
- 1,
4071 btrfs_mark_buffer_dirty(dst_path
->nodes
[0]);
4072 btrfs_release_path(dst_path
);
4076 * we have to do this after the loop above to avoid changing the
4077 * log tree while trying to change the log tree.
4079 while (!list_empty(&ordered_sums
)) {
4080 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
4081 struct btrfs_ordered_sum
,
4084 ret
= log_csums(trans
, inode
, log
, sums
);
4085 list_del(&sums
->list
);
4092 static int extent_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
4094 struct extent_map
*em1
, *em2
;
4096 em1
= list_entry(a
, struct extent_map
, list
);
4097 em2
= list_entry(b
, struct extent_map
, list
);
4099 if (em1
->start
< em2
->start
)
4101 else if (em1
->start
> em2
->start
)
4106 static int log_extent_csums(struct btrfs_trans_handle
*trans
,
4107 struct btrfs_inode
*inode
,
4108 struct btrfs_root
*log_root
,
4109 const struct extent_map
*em
,
4110 struct btrfs_log_ctx
*ctx
)
4112 struct btrfs_ordered_extent
*ordered
;
4115 u64 mod_start
= em
->mod_start
;
4116 u64 mod_len
= em
->mod_len
;
4117 LIST_HEAD(ordered_sums
);
4120 if (inode
->flags
& BTRFS_INODE_NODATASUM
||
4121 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
4122 em
->block_start
== EXTENT_MAP_HOLE
)
4125 list_for_each_entry(ordered
, &ctx
->ordered_extents
, log_list
) {
4126 const u64 ordered_end
= ordered
->file_offset
+ ordered
->num_bytes
;
4127 const u64 mod_end
= mod_start
+ mod_len
;
4128 struct btrfs_ordered_sum
*sums
;
4133 if (ordered_end
<= mod_start
)
4135 if (mod_end
<= ordered
->file_offset
)
4139 * We are going to copy all the csums on this ordered extent, so
4140 * go ahead and adjust mod_start and mod_len in case this ordered
4141 * extent has already been logged.
4143 if (ordered
->file_offset
> mod_start
) {
4144 if (ordered_end
>= mod_end
)
4145 mod_len
= ordered
->file_offset
- mod_start
;
4147 * If we have this case
4149 * |--------- logged extent ---------|
4150 * |----- ordered extent ----|
4152 * Just don't mess with mod_start and mod_len, we'll
4153 * just end up logging more csums than we need and it
4157 if (ordered_end
< mod_end
) {
4158 mod_len
= mod_end
- ordered_end
;
4159 mod_start
= ordered_end
;
4166 * To keep us from looping for the above case of an ordered
4167 * extent that falls inside of the logged extent.
4169 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM
, &ordered
->flags
))
4172 list_for_each_entry(sums
, &ordered
->list
, list
) {
4173 ret
= log_csums(trans
, inode
, log_root
, sums
);
4179 /* We're done, found all csums in the ordered extents. */
4183 /* If we're compressed we have to save the entire range of csums. */
4184 if (em
->compress_type
) {
4186 csum_len
= max(em
->block_len
, em
->orig_block_len
);
4188 csum_offset
= mod_start
- em
->start
;
4192 /* block start is already adjusted for the file extent offset. */
4193 ret
= btrfs_lookup_csums_range(trans
->fs_info
->csum_root
,
4194 em
->block_start
+ csum_offset
,
4195 em
->block_start
+ csum_offset
+
4196 csum_len
- 1, &ordered_sums
, 0);
4200 while (!list_empty(&ordered_sums
)) {
4201 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
4202 struct btrfs_ordered_sum
,
4205 ret
= log_csums(trans
, inode
, log_root
, sums
);
4206 list_del(&sums
->list
);
4213 static int log_one_extent(struct btrfs_trans_handle
*trans
,
4214 struct btrfs_inode
*inode
, struct btrfs_root
*root
,
4215 const struct extent_map
*em
,
4216 struct btrfs_path
*path
,
4217 struct btrfs_log_ctx
*ctx
)
4219 struct btrfs_drop_extents_args drop_args
= { 0 };
4220 struct btrfs_root
*log
= root
->log_root
;
4221 struct btrfs_file_extent_item
*fi
;
4222 struct extent_buffer
*leaf
;
4223 struct btrfs_map_token token
;
4224 struct btrfs_key key
;
4225 u64 extent_offset
= em
->start
- em
->orig_start
;
4229 ret
= log_extent_csums(trans
, inode
, log
, em
, ctx
);
4233 drop_args
.path
= path
;
4234 drop_args
.start
= em
->start
;
4235 drop_args
.end
= em
->start
+ em
->len
;
4236 drop_args
.replace_extent
= true;
4237 drop_args
.extent_item_size
= sizeof(*fi
);
4238 ret
= btrfs_drop_extents(trans
, log
, inode
, &drop_args
);
4242 if (!drop_args
.extent_inserted
) {
4243 key
.objectid
= btrfs_ino(inode
);
4244 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4245 key
.offset
= em
->start
;
4247 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
,
4252 leaf
= path
->nodes
[0];
4253 btrfs_init_map_token(&token
, leaf
);
4254 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4255 struct btrfs_file_extent_item
);
4257 btrfs_set_token_file_extent_generation(&token
, fi
, trans
->transid
);
4258 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
4259 btrfs_set_token_file_extent_type(&token
, fi
,
4260 BTRFS_FILE_EXTENT_PREALLOC
);
4262 btrfs_set_token_file_extent_type(&token
, fi
,
4263 BTRFS_FILE_EXTENT_REG
);
4265 block_len
= max(em
->block_len
, em
->orig_block_len
);
4266 if (em
->compress_type
!= BTRFS_COMPRESS_NONE
) {
4267 btrfs_set_token_file_extent_disk_bytenr(&token
, fi
,
4269 btrfs_set_token_file_extent_disk_num_bytes(&token
, fi
, block_len
);
4270 } else if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
4271 btrfs_set_token_file_extent_disk_bytenr(&token
, fi
,
4274 btrfs_set_token_file_extent_disk_num_bytes(&token
, fi
, block_len
);
4276 btrfs_set_token_file_extent_disk_bytenr(&token
, fi
, 0);
4277 btrfs_set_token_file_extent_disk_num_bytes(&token
, fi
, 0);
4280 btrfs_set_token_file_extent_offset(&token
, fi
, extent_offset
);
4281 btrfs_set_token_file_extent_num_bytes(&token
, fi
, em
->len
);
4282 btrfs_set_token_file_extent_ram_bytes(&token
, fi
, em
->ram_bytes
);
4283 btrfs_set_token_file_extent_compression(&token
, fi
, em
->compress_type
);
4284 btrfs_set_token_file_extent_encryption(&token
, fi
, 0);
4285 btrfs_set_token_file_extent_other_encoding(&token
, fi
, 0);
4286 btrfs_mark_buffer_dirty(leaf
);
4288 btrfs_release_path(path
);
4294 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4295 * lose them after doing a fast fsync and replaying the log. We scan the
4296 * subvolume's root instead of iterating the inode's extent map tree because
4297 * otherwise we can log incorrect extent items based on extent map conversion.
4298 * That can happen due to the fact that extent maps are merged when they
4299 * are not in the extent map tree's list of modified extents.
4301 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle
*trans
,
4302 struct btrfs_inode
*inode
,
4303 struct btrfs_path
*path
)
4305 struct btrfs_root
*root
= inode
->root
;
4306 struct btrfs_key key
;
4307 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
4308 const u64 ino
= btrfs_ino(inode
);
4309 struct btrfs_path
*dst_path
= NULL
;
4310 bool dropped_extents
= false;
4311 u64 truncate_offset
= i_size
;
4312 struct extent_buffer
*leaf
;
4318 if (!(inode
->flags
& BTRFS_INODE_PREALLOC
))
4322 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4323 key
.offset
= i_size
;
4324 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4329 * We must check if there is a prealloc extent that starts before the
4330 * i_size and crosses the i_size boundary. This is to ensure later we
4331 * truncate down to the end of that extent and not to the i_size, as
4332 * otherwise we end up losing part of the prealloc extent after a log
4333 * replay and with an implicit hole if there is another prealloc extent
4334 * that starts at an offset beyond i_size.
4336 ret
= btrfs_previous_item(root
, path
, ino
, BTRFS_EXTENT_DATA_KEY
);
4341 struct btrfs_file_extent_item
*ei
;
4343 leaf
= path
->nodes
[0];
4344 slot
= path
->slots
[0];
4345 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
4347 if (btrfs_file_extent_type(leaf
, ei
) ==
4348 BTRFS_FILE_EXTENT_PREALLOC
) {
4351 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4352 extent_end
= key
.offset
+
4353 btrfs_file_extent_num_bytes(leaf
, ei
);
4355 if (extent_end
> i_size
)
4356 truncate_offset
= extent_end
;
4363 leaf
= path
->nodes
[0];
4364 slot
= path
->slots
[0];
4366 if (slot
>= btrfs_header_nritems(leaf
)) {
4368 ret
= copy_items(trans
, inode
, dst_path
, path
,
4369 start_slot
, ins_nr
, 1, 0);
4374 ret
= btrfs_next_leaf(root
, path
);
4384 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4385 if (key
.objectid
> ino
)
4387 if (WARN_ON_ONCE(key
.objectid
< ino
) ||
4388 key
.type
< BTRFS_EXTENT_DATA_KEY
||
4389 key
.offset
< i_size
) {
4393 if (!dropped_extents
) {
4395 * Avoid logging extent items logged in past fsync calls
4396 * and leading to duplicate keys in the log tree.
4399 ret
= btrfs_truncate_inode_items(trans
,
4401 inode
, truncate_offset
,
4402 BTRFS_EXTENT_DATA_KEY
);
4403 } while (ret
== -EAGAIN
);
4406 dropped_extents
= true;
4413 dst_path
= btrfs_alloc_path();
4421 ret
= copy_items(trans
, inode
, dst_path
, path
,
4422 start_slot
, ins_nr
, 1, 0);
4424 btrfs_release_path(path
);
4425 btrfs_free_path(dst_path
);
4429 static int btrfs_log_changed_extents(struct btrfs_trans_handle
*trans
,
4430 struct btrfs_root
*root
,
4431 struct btrfs_inode
*inode
,
4432 struct btrfs_path
*path
,
4433 struct btrfs_log_ctx
*ctx
)
4435 struct btrfs_ordered_extent
*ordered
;
4436 struct btrfs_ordered_extent
*tmp
;
4437 struct extent_map
*em
, *n
;
4438 struct list_head extents
;
4439 struct extent_map_tree
*tree
= &inode
->extent_tree
;
4443 INIT_LIST_HEAD(&extents
);
4445 write_lock(&tree
->lock
);
4447 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
) {
4448 list_del_init(&em
->list
);
4450 * Just an arbitrary number, this can be really CPU intensive
4451 * once we start getting a lot of extents, and really once we
4452 * have a bunch of extents we just want to commit since it will
4455 if (++num
> 32768) {
4456 list_del_init(&tree
->modified_extents
);
4461 if (em
->generation
< trans
->transid
)
4464 /* We log prealloc extents beyond eof later. */
4465 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) &&
4466 em
->start
>= i_size_read(&inode
->vfs_inode
))
4469 /* Need a ref to keep it from getting evicted from cache */
4470 refcount_inc(&em
->refs
);
4471 set_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
4472 list_add_tail(&em
->list
, &extents
);
4476 list_sort(NULL
, &extents
, extent_cmp
);
4478 while (!list_empty(&extents
)) {
4479 em
= list_entry(extents
.next
, struct extent_map
, list
);
4481 list_del_init(&em
->list
);
4484 * If we had an error we just need to delete everybody from our
4488 clear_em_logging(tree
, em
);
4489 free_extent_map(em
);
4493 write_unlock(&tree
->lock
);
4495 ret
= log_one_extent(trans
, inode
, root
, em
, path
, ctx
);
4496 write_lock(&tree
->lock
);
4497 clear_em_logging(tree
, em
);
4498 free_extent_map(em
);
4500 WARN_ON(!list_empty(&extents
));
4501 write_unlock(&tree
->lock
);
4503 btrfs_release_path(path
);
4505 ret
= btrfs_log_prealloc_extents(trans
, inode
, path
);
4510 * We have logged all extents successfully, now make sure the commit of
4511 * the current transaction waits for the ordered extents to complete
4512 * before it commits and wipes out the log trees, otherwise we would
4513 * lose data if an ordered extents completes after the transaction
4514 * commits and a power failure happens after the transaction commit.
4516 list_for_each_entry_safe(ordered
, tmp
, &ctx
->ordered_extents
, log_list
) {
4517 list_del_init(&ordered
->log_list
);
4518 set_bit(BTRFS_ORDERED_LOGGED
, &ordered
->flags
);
4520 if (!test_bit(BTRFS_ORDERED_COMPLETE
, &ordered
->flags
)) {
4521 spin_lock_irq(&inode
->ordered_tree
.lock
);
4522 if (!test_bit(BTRFS_ORDERED_COMPLETE
, &ordered
->flags
)) {
4523 set_bit(BTRFS_ORDERED_PENDING
, &ordered
->flags
);
4524 atomic_inc(&trans
->transaction
->pending_ordered
);
4526 spin_unlock_irq(&inode
->ordered_tree
.lock
);
4528 btrfs_put_ordered_extent(ordered
);
4534 static int logged_inode_size(struct btrfs_root
*log
, struct btrfs_inode
*inode
,
4535 struct btrfs_path
*path
, u64
*size_ret
)
4537 struct btrfs_key key
;
4540 key
.objectid
= btrfs_ino(inode
);
4541 key
.type
= BTRFS_INODE_ITEM_KEY
;
4544 ret
= btrfs_search_slot(NULL
, log
, &key
, path
, 0, 0);
4547 } else if (ret
> 0) {
4550 struct btrfs_inode_item
*item
;
4552 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4553 struct btrfs_inode_item
);
4554 *size_ret
= btrfs_inode_size(path
->nodes
[0], item
);
4556 * If the in-memory inode's i_size is smaller then the inode
4557 * size stored in the btree, return the inode's i_size, so
4558 * that we get a correct inode size after replaying the log
4559 * when before a power failure we had a shrinking truncate
4560 * followed by addition of a new name (rename / new hard link).
4561 * Otherwise return the inode size from the btree, to avoid
4562 * data loss when replaying a log due to previously doing a
4563 * write that expands the inode's size and logging a new name
4564 * immediately after.
4566 if (*size_ret
> inode
->vfs_inode
.i_size
)
4567 *size_ret
= inode
->vfs_inode
.i_size
;
4570 btrfs_release_path(path
);
4575 * At the moment we always log all xattrs. This is to figure out at log replay
4576 * time which xattrs must have their deletion replayed. If a xattr is missing
4577 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4578 * because if a xattr is deleted, the inode is fsynced and a power failure
4579 * happens, causing the log to be replayed the next time the fs is mounted,
4580 * we want the xattr to not exist anymore (same behaviour as other filesystems
4581 * with a journal, ext3/4, xfs, f2fs, etc).
4583 static int btrfs_log_all_xattrs(struct btrfs_trans_handle
*trans
,
4584 struct btrfs_root
*root
,
4585 struct btrfs_inode
*inode
,
4586 struct btrfs_path
*path
,
4587 struct btrfs_path
*dst_path
)
4590 struct btrfs_key key
;
4591 const u64 ino
= btrfs_ino(inode
);
4594 bool found_xattrs
= false;
4596 if (test_bit(BTRFS_INODE_NO_XATTRS
, &inode
->runtime_flags
))
4600 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4603 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4608 int slot
= path
->slots
[0];
4609 struct extent_buffer
*leaf
= path
->nodes
[0];
4610 int nritems
= btrfs_header_nritems(leaf
);
4612 if (slot
>= nritems
) {
4614 ret
= copy_items(trans
, inode
, dst_path
, path
,
4615 start_slot
, ins_nr
, 1, 0);
4620 ret
= btrfs_next_leaf(root
, path
);
4628 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4629 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
)
4636 found_xattrs
= true;
4640 ret
= copy_items(trans
, inode
, dst_path
, path
,
4641 start_slot
, ins_nr
, 1, 0);
4647 set_bit(BTRFS_INODE_NO_XATTRS
, &inode
->runtime_flags
);
4653 * When using the NO_HOLES feature if we punched a hole that causes the
4654 * deletion of entire leafs or all the extent items of the first leaf (the one
4655 * that contains the inode item and references) we may end up not processing
4656 * any extents, because there are no leafs with a generation matching the
4657 * current transaction that have extent items for our inode. So we need to find
4658 * if any holes exist and then log them. We also need to log holes after any
4659 * truncate operation that changes the inode's size.
4661 static int btrfs_log_holes(struct btrfs_trans_handle
*trans
,
4662 struct btrfs_root
*root
,
4663 struct btrfs_inode
*inode
,
4664 struct btrfs_path
*path
)
4666 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4667 struct btrfs_key key
;
4668 const u64 ino
= btrfs_ino(inode
);
4669 const u64 i_size
= i_size_read(&inode
->vfs_inode
);
4670 u64 prev_extent_end
= 0;
4673 if (!btrfs_fs_incompat(fs_info
, NO_HOLES
) || i_size
== 0)
4677 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4680 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4685 struct extent_buffer
*leaf
= path
->nodes
[0];
4687 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
4688 ret
= btrfs_next_leaf(root
, path
);
4695 leaf
= path
->nodes
[0];
4698 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4699 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
4702 /* We have a hole, log it. */
4703 if (prev_extent_end
< key
.offset
) {
4704 const u64 hole_len
= key
.offset
- prev_extent_end
;
4707 * Release the path to avoid deadlocks with other code
4708 * paths that search the root while holding locks on
4709 * leafs from the log root.
4711 btrfs_release_path(path
);
4712 ret
= btrfs_insert_file_extent(trans
, root
->log_root
,
4713 ino
, prev_extent_end
, 0,
4714 0, hole_len
, 0, hole_len
,
4720 * Search for the same key again in the root. Since it's
4721 * an extent item and we are holding the inode lock, the
4722 * key must still exist. If it doesn't just emit warning
4723 * and return an error to fall back to a transaction
4726 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4729 if (WARN_ON(ret
> 0))
4731 leaf
= path
->nodes
[0];
4734 prev_extent_end
= btrfs_file_extent_end(path
);
4739 if (prev_extent_end
< i_size
) {
4742 btrfs_release_path(path
);
4743 hole_len
= ALIGN(i_size
- prev_extent_end
, fs_info
->sectorsize
);
4744 ret
= btrfs_insert_file_extent(trans
, root
->log_root
,
4745 ino
, prev_extent_end
, 0, 0,
4746 hole_len
, 0, hole_len
,
4756 * When we are logging a new inode X, check if it doesn't have a reference that
4757 * matches the reference from some other inode Y created in a past transaction
4758 * and that was renamed in the current transaction. If we don't do this, then at
4759 * log replay time we can lose inode Y (and all its files if it's a directory):
4762 * echo "hello world" > /mnt/x/foobar
4765 * mkdir /mnt/x # or touch /mnt/x
4766 * xfs_io -c fsync /mnt/x
4768 * mount fs, trigger log replay
4770 * After the log replay procedure, we would lose the first directory and all its
4771 * files (file foobar).
4772 * For the case where inode Y is not a directory we simply end up losing it:
4774 * echo "123" > /mnt/foo
4776 * mv /mnt/foo /mnt/bar
4777 * echo "abc" > /mnt/foo
4778 * xfs_io -c fsync /mnt/foo
4781 * We also need this for cases where a snapshot entry is replaced by some other
4782 * entry (file or directory) otherwise we end up with an unreplayable log due to
4783 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4784 * if it were a regular entry:
4787 * btrfs subvolume snapshot /mnt /mnt/x/snap
4788 * btrfs subvolume delete /mnt/x/snap
4791 * fsync /mnt/x or fsync some new file inside it
4794 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4795 * the same transaction.
4797 static int btrfs_check_ref_name_override(struct extent_buffer
*eb
,
4799 const struct btrfs_key
*key
,
4800 struct btrfs_inode
*inode
,
4801 u64
*other_ino
, u64
*other_parent
)
4804 struct btrfs_path
*search_path
;
4807 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
4809 unsigned long ptr
= btrfs_item_ptr_offset(eb
, slot
);
4811 search_path
= btrfs_alloc_path();
4814 search_path
->search_commit_root
= 1;
4815 search_path
->skip_locking
= 1;
4817 while (cur_offset
< item_size
) {
4821 unsigned long name_ptr
;
4822 struct btrfs_dir_item
*di
;
4824 if (key
->type
== BTRFS_INODE_REF_KEY
) {
4825 struct btrfs_inode_ref
*iref
;
4827 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur_offset
);
4828 parent
= key
->offset
;
4829 this_name_len
= btrfs_inode_ref_name_len(eb
, iref
);
4830 name_ptr
= (unsigned long)(iref
+ 1);
4831 this_len
= sizeof(*iref
) + this_name_len
;
4833 struct btrfs_inode_extref
*extref
;
4835 extref
= (struct btrfs_inode_extref
*)(ptr
+
4837 parent
= btrfs_inode_extref_parent(eb
, extref
);
4838 this_name_len
= btrfs_inode_extref_name_len(eb
, extref
);
4839 name_ptr
= (unsigned long)&extref
->name
;
4840 this_len
= sizeof(*extref
) + this_name_len
;
4843 if (this_name_len
> name_len
) {
4846 new_name
= krealloc(name
, this_name_len
, GFP_NOFS
);
4851 name_len
= this_name_len
;
4855 read_extent_buffer(eb
, name
, name_ptr
, this_name_len
);
4856 di
= btrfs_lookup_dir_item(NULL
, inode
->root
, search_path
,
4857 parent
, name
, this_name_len
, 0);
4858 if (di
&& !IS_ERR(di
)) {
4859 struct btrfs_key di_key
;
4861 btrfs_dir_item_key_to_cpu(search_path
->nodes
[0],
4863 if (di_key
.type
== BTRFS_INODE_ITEM_KEY
) {
4864 if (di_key
.objectid
!= key
->objectid
) {
4866 *other_ino
= di_key
.objectid
;
4867 *other_parent
= parent
;
4875 } else if (IS_ERR(di
)) {
4879 btrfs_release_path(search_path
);
4881 cur_offset
+= this_len
;
4885 btrfs_free_path(search_path
);
4890 struct btrfs_ino_list
{
4893 struct list_head list
;
4896 static int log_conflicting_inodes(struct btrfs_trans_handle
*trans
,
4897 struct btrfs_root
*root
,
4898 struct btrfs_path
*path
,
4899 struct btrfs_log_ctx
*ctx
,
4900 u64 ino
, u64 parent
)
4902 struct btrfs_ino_list
*ino_elem
;
4903 LIST_HEAD(inode_list
);
4906 ino_elem
= kmalloc(sizeof(*ino_elem
), GFP_NOFS
);
4909 ino_elem
->ino
= ino
;
4910 ino_elem
->parent
= parent
;
4911 list_add_tail(&ino_elem
->list
, &inode_list
);
4913 while (!list_empty(&inode_list
)) {
4914 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4915 struct btrfs_key key
;
4916 struct inode
*inode
;
4918 ino_elem
= list_first_entry(&inode_list
, struct btrfs_ino_list
,
4920 ino
= ino_elem
->ino
;
4921 parent
= ino_elem
->parent
;
4922 list_del(&ino_elem
->list
);
4927 btrfs_release_path(path
);
4929 inode
= btrfs_iget(fs_info
->sb
, ino
, root
);
4931 * If the other inode that had a conflicting dir entry was
4932 * deleted in the current transaction, we need to log its parent
4935 if (IS_ERR(inode
)) {
4936 ret
= PTR_ERR(inode
);
4937 if (ret
== -ENOENT
) {
4938 inode
= btrfs_iget(fs_info
->sb
, parent
, root
);
4939 if (IS_ERR(inode
)) {
4940 ret
= PTR_ERR(inode
);
4942 ret
= btrfs_log_inode(trans
, root
,
4944 LOG_OTHER_INODE_ALL
,
4946 btrfs_add_delayed_iput(inode
);
4952 * If the inode was already logged skip it - otherwise we can
4953 * hit an infinite loop. Example:
4955 * From the commit root (previous transaction) we have the
4958 * inode 257 a directory
4959 * inode 258 with references "zz" and "zz_link" on inode 257
4960 * inode 259 with reference "a" on inode 257
4962 * And in the current (uncommitted) transaction we have:
4964 * inode 257 a directory, unchanged
4965 * inode 258 with references "a" and "a2" on inode 257
4966 * inode 259 with reference "zz_link" on inode 257
4967 * inode 261 with reference "zz" on inode 257
4969 * When logging inode 261 the following infinite loop could
4970 * happen if we don't skip already logged inodes:
4972 * - we detect inode 258 as a conflicting inode, with inode 261
4973 * on reference "zz", and log it;
4975 * - we detect inode 259 as a conflicting inode, with inode 258
4976 * on reference "a", and log it;
4978 * - we detect inode 258 as a conflicting inode, with inode 259
4979 * on reference "zz_link", and log it - again! After this we
4980 * repeat the above steps forever.
4982 spin_lock(&BTRFS_I(inode
)->lock
);
4984 * Check the inode's logged_trans only instead of
4985 * btrfs_inode_in_log(). This is because the last_log_commit of
4986 * the inode is not updated when we only log that it exists and
4987 * it has the full sync bit set (see btrfs_log_inode()).
4989 if (BTRFS_I(inode
)->logged_trans
== trans
->transid
) {
4990 spin_unlock(&BTRFS_I(inode
)->lock
);
4991 btrfs_add_delayed_iput(inode
);
4994 spin_unlock(&BTRFS_I(inode
)->lock
);
4996 * We are safe logging the other inode without acquiring its
4997 * lock as long as we log with the LOG_INODE_EXISTS mode. We
4998 * are safe against concurrent renames of the other inode as
4999 * well because during a rename we pin the log and update the
5000 * log with the new name before we unpin it.
5002 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(inode
),
5003 LOG_OTHER_INODE
, ctx
);
5005 btrfs_add_delayed_iput(inode
);
5010 key
.type
= BTRFS_INODE_REF_KEY
;
5012 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5014 btrfs_add_delayed_iput(inode
);
5019 struct extent_buffer
*leaf
= path
->nodes
[0];
5020 int slot
= path
->slots
[0];
5022 u64 other_parent
= 0;
5024 if (slot
>= btrfs_header_nritems(leaf
)) {
5025 ret
= btrfs_next_leaf(root
, path
);
5028 } else if (ret
> 0) {
5035 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5036 if (key
.objectid
!= ino
||
5037 (key
.type
!= BTRFS_INODE_REF_KEY
&&
5038 key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
5043 ret
= btrfs_check_ref_name_override(leaf
, slot
, &key
,
5044 BTRFS_I(inode
), &other_ino
,
5049 ino_elem
= kmalloc(sizeof(*ino_elem
), GFP_NOFS
);
5054 ino_elem
->ino
= other_ino
;
5055 ino_elem
->parent
= other_parent
;
5056 list_add_tail(&ino_elem
->list
, &inode_list
);
5061 btrfs_add_delayed_iput(inode
);
5067 static int copy_inode_items_to_log(struct btrfs_trans_handle
*trans
,
5068 struct btrfs_inode
*inode
,
5069 struct btrfs_key
*min_key
,
5070 const struct btrfs_key
*max_key
,
5071 struct btrfs_path
*path
,
5072 struct btrfs_path
*dst_path
,
5073 const u64 logged_isize
,
5074 const bool recursive_logging
,
5075 const int inode_only
,
5076 struct btrfs_log_ctx
*ctx
,
5077 bool *need_log_inode_item
)
5079 struct btrfs_root
*root
= inode
->root
;
5080 int ins_start_slot
= 0;
5085 ret
= btrfs_search_forward(root
, min_key
, path
, trans
->transid
);
5093 /* Note, ins_nr might be > 0 here, cleanup outside the loop */
5094 if (min_key
->objectid
!= max_key
->objectid
)
5096 if (min_key
->type
> max_key
->type
)
5099 if (min_key
->type
== BTRFS_INODE_ITEM_KEY
)
5100 *need_log_inode_item
= false;
5102 if ((min_key
->type
== BTRFS_INODE_REF_KEY
||
5103 min_key
->type
== BTRFS_INODE_EXTREF_KEY
) &&
5104 inode
->generation
== trans
->transid
&&
5105 !recursive_logging
) {
5107 u64 other_parent
= 0;
5109 ret
= btrfs_check_ref_name_override(path
->nodes
[0],
5110 path
->slots
[0], min_key
, inode
,
5111 &other_ino
, &other_parent
);
5114 } else if (ret
> 0 && ctx
&&
5115 other_ino
!= btrfs_ino(BTRFS_I(ctx
->inode
))) {
5120 ins_start_slot
= path
->slots
[0];
5122 ret
= copy_items(trans
, inode
, dst_path
, path
,
5123 ins_start_slot
, ins_nr
,
5124 inode_only
, logged_isize
);
5129 ret
= log_conflicting_inodes(trans
, root
, path
,
5130 ctx
, other_ino
, other_parent
);
5133 btrfs_release_path(path
);
5138 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5139 if (min_key
->type
== BTRFS_XATTR_ITEM_KEY
) {
5142 ret
= copy_items(trans
, inode
, dst_path
, path
,
5144 ins_nr
, inode_only
, logged_isize
);
5151 if (ins_nr
&& ins_start_slot
+ ins_nr
== path
->slots
[0]) {
5154 } else if (!ins_nr
) {
5155 ins_start_slot
= path
->slots
[0];
5160 ret
= copy_items(trans
, inode
, dst_path
, path
, ins_start_slot
,
5161 ins_nr
, inode_only
, logged_isize
);
5165 ins_start_slot
= path
->slots
[0];
5168 if (path
->slots
[0] < btrfs_header_nritems(path
->nodes
[0])) {
5169 btrfs_item_key_to_cpu(path
->nodes
[0], min_key
,
5174 ret
= copy_items(trans
, inode
, dst_path
, path
,
5175 ins_start_slot
, ins_nr
, inode_only
,
5181 btrfs_release_path(path
);
5183 if (min_key
->offset
< (u64
)-1) {
5185 } else if (min_key
->type
< max_key
->type
) {
5187 min_key
->offset
= 0;
5193 ret
= copy_items(trans
, inode
, dst_path
, path
, ins_start_slot
,
5194 ins_nr
, inode_only
, logged_isize
);
5199 /* log a single inode in the tree log.
5200 * At least one parent directory for this inode must exist in the tree
5201 * or be logged already.
5203 * Any items from this inode changed by the current transaction are copied
5204 * to the log tree. An extra reference is taken on any extents in this
5205 * file, allowing us to avoid a whole pile of corner cases around logging
5206 * blocks that have been removed from the tree.
5208 * See LOG_INODE_ALL and related defines for a description of what inode_only
5211 * This handles both files and directories.
5213 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
5214 struct btrfs_root
*root
, struct btrfs_inode
*inode
,
5216 struct btrfs_log_ctx
*ctx
)
5218 struct btrfs_path
*path
;
5219 struct btrfs_path
*dst_path
;
5220 struct btrfs_key min_key
;
5221 struct btrfs_key max_key
;
5222 struct btrfs_root
*log
= root
->log_root
;
5225 bool fast_search
= false;
5226 u64 ino
= btrfs_ino(inode
);
5227 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
5228 u64 logged_isize
= 0;
5229 bool need_log_inode_item
= true;
5230 bool xattrs_logged
= false;
5231 bool recursive_logging
= false;
5233 path
= btrfs_alloc_path();
5236 dst_path
= btrfs_alloc_path();
5238 btrfs_free_path(path
);
5242 min_key
.objectid
= ino
;
5243 min_key
.type
= BTRFS_INODE_ITEM_KEY
;
5246 max_key
.objectid
= ino
;
5249 /* today the code can only do partial logging of directories */
5250 if (S_ISDIR(inode
->vfs_inode
.i_mode
) ||
5251 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
5252 &inode
->runtime_flags
) &&
5253 inode_only
>= LOG_INODE_EXISTS
))
5254 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
5256 max_key
.type
= (u8
)-1;
5257 max_key
.offset
= (u64
)-1;
5260 * Only run delayed items if we are a directory. We want to make sure
5261 * all directory indexes hit the fs/subvolume tree so we can find them
5262 * and figure out which index ranges have to be logged.
5264 * Otherwise commit the delayed inode only if the full sync flag is set,
5265 * as we want to make sure an up to date version is in the subvolume
5266 * tree so copy_inode_items_to_log() / copy_items() can find it and copy
5267 * it to the log tree. For a non full sync, we always log the inode item
5268 * based on the in-memory struct btrfs_inode which is always up to date.
5270 if (S_ISDIR(inode
->vfs_inode
.i_mode
))
5271 ret
= btrfs_commit_inode_delayed_items(trans
, inode
);
5272 else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &inode
->runtime_flags
))
5273 ret
= btrfs_commit_inode_delayed_inode(inode
);
5276 btrfs_free_path(path
);
5277 btrfs_free_path(dst_path
);
5281 if (inode_only
== LOG_OTHER_INODE
|| inode_only
== LOG_OTHER_INODE_ALL
) {
5282 recursive_logging
= true;
5283 if (inode_only
== LOG_OTHER_INODE
)
5284 inode_only
= LOG_INODE_EXISTS
;
5286 inode_only
= LOG_INODE_ALL
;
5287 mutex_lock_nested(&inode
->log_mutex
, SINGLE_DEPTH_NESTING
);
5289 mutex_lock(&inode
->log_mutex
);
5293 * a brute force approach to making sure we get the most uptodate
5294 * copies of everything.
5296 if (S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5297 int max_key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
5299 if (inode_only
== LOG_INODE_EXISTS
)
5300 max_key_type
= BTRFS_XATTR_ITEM_KEY
;
5301 ret
= drop_objectid_items(trans
, log
, path
, ino
, max_key_type
);
5303 if (inode_only
== LOG_INODE_EXISTS
) {
5305 * Make sure the new inode item we write to the log has
5306 * the same isize as the current one (if it exists).
5307 * This is necessary to prevent data loss after log
5308 * replay, and also to prevent doing a wrong expanding
5309 * truncate - for e.g. create file, write 4K into offset
5310 * 0, fsync, write 4K into offset 4096, add hard link,
5311 * fsync some other file (to sync log), power fail - if
5312 * we use the inode's current i_size, after log replay
5313 * we get a 8Kb file, with the last 4Kb extent as a hole
5314 * (zeroes), as if an expanding truncate happened,
5315 * instead of getting a file of 4Kb only.
5317 err
= logged_inode_size(log
, inode
, path
, &logged_isize
);
5321 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
5322 &inode
->runtime_flags
)) {
5323 if (inode_only
== LOG_INODE_EXISTS
) {
5324 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
5325 ret
= drop_objectid_items(trans
, log
, path
, ino
,
5328 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
5329 &inode
->runtime_flags
);
5330 clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
5331 &inode
->runtime_flags
);
5333 ret
= btrfs_truncate_inode_items(trans
,
5339 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
5340 &inode
->runtime_flags
) ||
5341 inode_only
== LOG_INODE_EXISTS
) {
5342 if (inode_only
== LOG_INODE_ALL
)
5344 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
5345 ret
= drop_objectid_items(trans
, log
, path
, ino
,
5348 if (inode_only
== LOG_INODE_ALL
)
5359 err
= copy_inode_items_to_log(trans
, inode
, &min_key
, &max_key
,
5360 path
, dst_path
, logged_isize
,
5361 recursive_logging
, inode_only
, ctx
,
5362 &need_log_inode_item
);
5366 btrfs_release_path(path
);
5367 btrfs_release_path(dst_path
);
5368 err
= btrfs_log_all_xattrs(trans
, root
, inode
, path
, dst_path
);
5371 xattrs_logged
= true;
5372 if (max_key
.type
>= BTRFS_EXTENT_DATA_KEY
&& !fast_search
) {
5373 btrfs_release_path(path
);
5374 btrfs_release_path(dst_path
);
5375 err
= btrfs_log_holes(trans
, root
, inode
, path
);
5380 btrfs_release_path(path
);
5381 btrfs_release_path(dst_path
);
5382 if (need_log_inode_item
) {
5383 err
= log_inode_item(trans
, log
, dst_path
, inode
);
5384 if (!err
&& !xattrs_logged
) {
5385 err
= btrfs_log_all_xattrs(trans
, root
, inode
, path
,
5387 btrfs_release_path(path
);
5393 ret
= btrfs_log_changed_extents(trans
, root
, inode
, dst_path
,
5399 } else if (inode_only
== LOG_INODE_ALL
) {
5400 struct extent_map
*em
, *n
;
5402 write_lock(&em_tree
->lock
);
5403 list_for_each_entry_safe(em
, n
, &em_tree
->modified_extents
, list
)
5404 list_del_init(&em
->list
);
5405 write_unlock(&em_tree
->lock
);
5408 if (inode_only
== LOG_INODE_ALL
&& S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5409 ret
= log_directory_changes(trans
, root
, inode
, path
, dst_path
,
5418 * If we are logging that an ancestor inode exists as part of logging a
5419 * new name from a link or rename operation, don't mark the inode as
5420 * logged - otherwise if an explicit fsync is made against an ancestor,
5421 * the fsync considers the inode in the log and doesn't sync the log,
5422 * resulting in the ancestor missing after a power failure unless the
5423 * log was synced as part of an fsync against any other unrelated inode.
5424 * So keep it simple for this case and just don't flag the ancestors as
5428 !(S_ISDIR(inode
->vfs_inode
.i_mode
) && ctx
->logging_new_name
&&
5429 &inode
->vfs_inode
!= ctx
->inode
)) {
5430 spin_lock(&inode
->lock
);
5431 inode
->logged_trans
= trans
->transid
;
5433 * Don't update last_log_commit if we logged that an inode exists
5434 * after it was loaded to memory (full_sync bit set).
5435 * This is to prevent data loss when we do a write to the inode,
5436 * then the inode gets evicted after all delalloc was flushed,
5437 * then we log it exists (due to a rename for example) and then
5438 * fsync it. This last fsync would do nothing (not logging the
5439 * extents previously written).
5441 if (inode_only
!= LOG_INODE_EXISTS
||
5442 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &inode
->runtime_flags
))
5443 inode
->last_log_commit
= inode
->last_sub_trans
;
5444 spin_unlock(&inode
->lock
);
5447 mutex_unlock(&inode
->log_mutex
);
5449 btrfs_free_path(path
);
5450 btrfs_free_path(dst_path
);
5455 * Check if we must fallback to a transaction commit when logging an inode.
5456 * This must be called after logging the inode and is used only in the context
5457 * when fsyncing an inode requires the need to log some other inode - in which
5458 * case we can't lock the i_mutex of each other inode we need to log as that
5459 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5460 * log inodes up or down in the hierarchy) or rename operations for example. So
5461 * we take the log_mutex of the inode after we have logged it and then check for
5462 * its last_unlink_trans value - this is safe because any task setting
5463 * last_unlink_trans must take the log_mutex and it must do this before it does
5464 * the actual unlink operation, so if we do this check before a concurrent task
5465 * sets last_unlink_trans it means we've logged a consistent version/state of
5466 * all the inode items, otherwise we are not sure and must do a transaction
5467 * commit (the concurrent task might have only updated last_unlink_trans before
5468 * we logged the inode or it might have also done the unlink).
5470 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle
*trans
,
5471 struct btrfs_inode
*inode
)
5475 mutex_lock(&inode
->log_mutex
);
5476 if (inode
->last_unlink_trans
>= trans
->transid
) {
5478 * Make sure any commits to the log are forced to be full
5481 btrfs_set_log_full_commit(trans
);
5484 mutex_unlock(&inode
->log_mutex
);
5490 * follow the dentry parent pointers up the chain and see if any
5491 * of the directories in it require a full commit before they can
5492 * be logged. Returns zero if nothing special needs to be done or 1 if
5493 * a full commit is required.
5495 static noinline
int check_parent_dirs_for_sync(struct btrfs_trans_handle
*trans
,
5496 struct btrfs_inode
*inode
,
5497 struct dentry
*parent
,
5498 struct super_block
*sb
)
5501 struct dentry
*old_parent
= NULL
;
5504 * for regular files, if its inode is already on disk, we don't
5505 * have to worry about the parents at all. This is because
5506 * we can use the last_unlink_trans field to record renames
5507 * and other fun in this file.
5509 if (S_ISREG(inode
->vfs_inode
.i_mode
) &&
5510 inode
->generation
< trans
->transid
&&
5511 inode
->last_unlink_trans
< trans
->transid
)
5514 if (!S_ISDIR(inode
->vfs_inode
.i_mode
)) {
5515 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5517 inode
= BTRFS_I(d_inode(parent
));
5521 if (btrfs_must_commit_transaction(trans
, inode
)) {
5526 if (!parent
|| d_really_is_negative(parent
) || sb
!= parent
->d_sb
)
5529 if (IS_ROOT(parent
)) {
5530 inode
= BTRFS_I(d_inode(parent
));
5531 if (btrfs_must_commit_transaction(trans
, inode
))
5536 parent
= dget_parent(parent
);
5538 old_parent
= parent
;
5539 inode
= BTRFS_I(d_inode(parent
));
5547 struct btrfs_dir_list
{
5549 struct list_head list
;
5553 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5554 * details about the why it is needed.
5555 * This is a recursive operation - if an existing dentry corresponds to a
5556 * directory, that directory's new entries are logged too (same behaviour as
5557 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5558 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5559 * complains about the following circular lock dependency / possible deadlock:
5563 * lock(&type->i_mutex_dir_key#3/2);
5564 * lock(sb_internal#2);
5565 * lock(&type->i_mutex_dir_key#3/2);
5566 * lock(&sb->s_type->i_mutex_key#14);
5568 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5569 * sb_start_intwrite() in btrfs_start_transaction().
5570 * Not locking i_mutex of the inodes is still safe because:
5572 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5573 * that while logging the inode new references (names) are added or removed
5574 * from the inode, leaving the logged inode item with a link count that does
5575 * not match the number of logged inode reference items. This is fine because
5576 * at log replay time we compute the real number of links and correct the
5577 * link count in the inode item (see replay_one_buffer() and
5578 * link_to_fixup_dir());
5580 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5581 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5582 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5583 * has a size that doesn't match the sum of the lengths of all the logged
5584 * names. This does not result in a problem because if a dir_item key is
5585 * logged but its matching dir_index key is not logged, at log replay time we
5586 * don't use it to replay the respective name (see replay_one_name()). On the
5587 * other hand if only the dir_index key ends up being logged, the respective
5588 * name is added to the fs/subvol tree with both the dir_item and dir_index
5589 * keys created (see replay_one_name()).
5590 * The directory's inode item with a wrong i_size is not a problem as well,
5591 * since we don't use it at log replay time to set the i_size in the inode
5592 * item of the fs/subvol tree (see overwrite_item()).
5594 static int log_new_dir_dentries(struct btrfs_trans_handle
*trans
,
5595 struct btrfs_root
*root
,
5596 struct btrfs_inode
*start_inode
,
5597 struct btrfs_log_ctx
*ctx
)
5599 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5600 struct btrfs_root
*log
= root
->log_root
;
5601 struct btrfs_path
*path
;
5602 LIST_HEAD(dir_list
);
5603 struct btrfs_dir_list
*dir_elem
;
5606 path
= btrfs_alloc_path();
5610 dir_elem
= kmalloc(sizeof(*dir_elem
), GFP_NOFS
);
5612 btrfs_free_path(path
);
5615 dir_elem
->ino
= btrfs_ino(start_inode
);
5616 list_add_tail(&dir_elem
->list
, &dir_list
);
5618 while (!list_empty(&dir_list
)) {
5619 struct extent_buffer
*leaf
;
5620 struct btrfs_key min_key
;
5624 dir_elem
= list_first_entry(&dir_list
, struct btrfs_dir_list
,
5627 goto next_dir_inode
;
5629 min_key
.objectid
= dir_elem
->ino
;
5630 min_key
.type
= BTRFS_DIR_ITEM_KEY
;
5633 btrfs_release_path(path
);
5634 ret
= btrfs_search_forward(log
, &min_key
, path
, trans
->transid
);
5636 goto next_dir_inode
;
5637 } else if (ret
> 0) {
5639 goto next_dir_inode
;
5643 leaf
= path
->nodes
[0];
5644 nritems
= btrfs_header_nritems(leaf
);
5645 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
5646 struct btrfs_dir_item
*di
;
5647 struct btrfs_key di_key
;
5648 struct inode
*di_inode
;
5649 struct btrfs_dir_list
*new_dir_elem
;
5650 int log_mode
= LOG_INODE_EXISTS
;
5653 btrfs_item_key_to_cpu(leaf
, &min_key
, i
);
5654 if (min_key
.objectid
!= dir_elem
->ino
||
5655 min_key
.type
!= BTRFS_DIR_ITEM_KEY
)
5656 goto next_dir_inode
;
5658 di
= btrfs_item_ptr(leaf
, i
, struct btrfs_dir_item
);
5659 type
= btrfs_dir_type(leaf
, di
);
5660 if (btrfs_dir_transid(leaf
, di
) < trans
->transid
&&
5661 type
!= BTRFS_FT_DIR
)
5663 btrfs_dir_item_key_to_cpu(leaf
, di
, &di_key
);
5664 if (di_key
.type
== BTRFS_ROOT_ITEM_KEY
)
5667 btrfs_release_path(path
);
5668 di_inode
= btrfs_iget(fs_info
->sb
, di_key
.objectid
, root
);
5669 if (IS_ERR(di_inode
)) {
5670 ret
= PTR_ERR(di_inode
);
5671 goto next_dir_inode
;
5674 if (btrfs_inode_in_log(BTRFS_I(di_inode
), trans
->transid
)) {
5675 btrfs_add_delayed_iput(di_inode
);
5679 ctx
->log_new_dentries
= false;
5680 if (type
== BTRFS_FT_DIR
|| type
== BTRFS_FT_SYMLINK
)
5681 log_mode
= LOG_INODE_ALL
;
5682 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(di_inode
),
5685 btrfs_must_commit_transaction(trans
, BTRFS_I(di_inode
)))
5687 btrfs_add_delayed_iput(di_inode
);
5689 goto next_dir_inode
;
5690 if (ctx
->log_new_dentries
) {
5691 new_dir_elem
= kmalloc(sizeof(*new_dir_elem
),
5693 if (!new_dir_elem
) {
5695 goto next_dir_inode
;
5697 new_dir_elem
->ino
= di_key
.objectid
;
5698 list_add_tail(&new_dir_elem
->list
, &dir_list
);
5703 ret
= btrfs_next_leaf(log
, path
);
5705 goto next_dir_inode
;
5706 } else if (ret
> 0) {
5708 goto next_dir_inode
;
5712 if (min_key
.offset
< (u64
)-1) {
5717 list_del(&dir_elem
->list
);
5721 btrfs_free_path(path
);
5725 static int btrfs_log_all_parents(struct btrfs_trans_handle
*trans
,
5726 struct btrfs_inode
*inode
,
5727 struct btrfs_log_ctx
*ctx
)
5729 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5731 struct btrfs_path
*path
;
5732 struct btrfs_key key
;
5733 struct btrfs_root
*root
= inode
->root
;
5734 const u64 ino
= btrfs_ino(inode
);
5736 path
= btrfs_alloc_path();
5739 path
->skip_locking
= 1;
5740 path
->search_commit_root
= 1;
5743 key
.type
= BTRFS_INODE_REF_KEY
;
5745 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5750 struct extent_buffer
*leaf
= path
->nodes
[0];
5751 int slot
= path
->slots
[0];
5756 if (slot
>= btrfs_header_nritems(leaf
)) {
5757 ret
= btrfs_next_leaf(root
, path
);
5765 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5766 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5767 if (key
.objectid
!= ino
|| key
.type
> BTRFS_INODE_EXTREF_KEY
)
5770 item_size
= btrfs_item_size_nr(leaf
, slot
);
5771 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
5772 while (cur_offset
< item_size
) {
5773 struct btrfs_key inode_key
;
5774 struct inode
*dir_inode
;
5776 inode_key
.type
= BTRFS_INODE_ITEM_KEY
;
5777 inode_key
.offset
= 0;
5779 if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
5780 struct btrfs_inode_extref
*extref
;
5782 extref
= (struct btrfs_inode_extref
*)
5784 inode_key
.objectid
= btrfs_inode_extref_parent(
5786 cur_offset
+= sizeof(*extref
);
5787 cur_offset
+= btrfs_inode_extref_name_len(leaf
,
5790 inode_key
.objectid
= key
.offset
;
5791 cur_offset
= item_size
;
5794 dir_inode
= btrfs_iget(fs_info
->sb
, inode_key
.objectid
,
5797 * If the parent inode was deleted, return an error to
5798 * fallback to a transaction commit. This is to prevent
5799 * getting an inode that was moved from one parent A to
5800 * a parent B, got its former parent A deleted and then
5801 * it got fsync'ed, from existing at both parents after
5802 * a log replay (and the old parent still existing).
5809 * mv /mnt/B/bar /mnt/A/bar
5810 * mv -T /mnt/A /mnt/B
5814 * If we ignore the old parent B which got deleted,
5815 * after a log replay we would have file bar linked
5816 * at both parents and the old parent B would still
5819 if (IS_ERR(dir_inode
)) {
5820 ret
= PTR_ERR(dir_inode
);
5825 ctx
->log_new_dentries
= false;
5826 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(dir_inode
),
5827 LOG_INODE_ALL
, ctx
);
5829 btrfs_must_commit_transaction(trans
, BTRFS_I(dir_inode
)))
5831 if (!ret
&& ctx
&& ctx
->log_new_dentries
)
5832 ret
= log_new_dir_dentries(trans
, root
,
5833 BTRFS_I(dir_inode
), ctx
);
5834 btrfs_add_delayed_iput(dir_inode
);
5842 btrfs_free_path(path
);
5846 static int log_new_ancestors(struct btrfs_trans_handle
*trans
,
5847 struct btrfs_root
*root
,
5848 struct btrfs_path
*path
,
5849 struct btrfs_log_ctx
*ctx
)
5851 struct btrfs_key found_key
;
5853 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
, path
->slots
[0]);
5856 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5857 struct extent_buffer
*leaf
= path
->nodes
[0];
5858 int slot
= path
->slots
[0];
5859 struct btrfs_key search_key
;
5860 struct inode
*inode
;
5864 btrfs_release_path(path
);
5866 ino
= found_key
.offset
;
5868 search_key
.objectid
= found_key
.offset
;
5869 search_key
.type
= BTRFS_INODE_ITEM_KEY
;
5870 search_key
.offset
= 0;
5871 inode
= btrfs_iget(fs_info
->sb
, ino
, root
);
5873 return PTR_ERR(inode
);
5875 if (BTRFS_I(inode
)->generation
>= trans
->transid
)
5876 ret
= btrfs_log_inode(trans
, root
, BTRFS_I(inode
),
5877 LOG_INODE_EXISTS
, ctx
);
5878 btrfs_add_delayed_iput(inode
);
5882 if (search_key
.objectid
== BTRFS_FIRST_FREE_OBJECTID
)
5885 search_key
.type
= BTRFS_INODE_REF_KEY
;
5886 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
5890 leaf
= path
->nodes
[0];
5891 slot
= path
->slots
[0];
5892 if (slot
>= btrfs_header_nritems(leaf
)) {
5893 ret
= btrfs_next_leaf(root
, path
);
5898 leaf
= path
->nodes
[0];
5899 slot
= path
->slots
[0];
5902 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
5903 if (found_key
.objectid
!= search_key
.objectid
||
5904 found_key
.type
!= BTRFS_INODE_REF_KEY
)
5910 static int log_new_ancestors_fast(struct btrfs_trans_handle
*trans
,
5911 struct btrfs_inode
*inode
,
5912 struct dentry
*parent
,
5913 struct btrfs_log_ctx
*ctx
)
5915 struct btrfs_root
*root
= inode
->root
;
5916 struct dentry
*old_parent
= NULL
;
5917 struct super_block
*sb
= inode
->vfs_inode
.i_sb
;
5921 if (!parent
|| d_really_is_negative(parent
) ||
5925 inode
= BTRFS_I(d_inode(parent
));
5926 if (root
!= inode
->root
)
5929 if (inode
->generation
>= trans
->transid
) {
5930 ret
= btrfs_log_inode(trans
, root
, inode
,
5931 LOG_INODE_EXISTS
, ctx
);
5935 if (IS_ROOT(parent
))
5938 parent
= dget_parent(parent
);
5940 old_parent
= parent
;
5947 static int log_all_new_ancestors(struct btrfs_trans_handle
*trans
,
5948 struct btrfs_inode
*inode
,
5949 struct dentry
*parent
,
5950 struct btrfs_log_ctx
*ctx
)
5952 struct btrfs_root
*root
= inode
->root
;
5953 const u64 ino
= btrfs_ino(inode
);
5954 struct btrfs_path
*path
;
5955 struct btrfs_key search_key
;
5959 * For a single hard link case, go through a fast path that does not
5960 * need to iterate the fs/subvolume tree.
5962 if (inode
->vfs_inode
.i_nlink
< 2)
5963 return log_new_ancestors_fast(trans
, inode
, parent
, ctx
);
5965 path
= btrfs_alloc_path();
5969 search_key
.objectid
= ino
;
5970 search_key
.type
= BTRFS_INODE_REF_KEY
;
5971 search_key
.offset
= 0;
5973 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
5980 struct extent_buffer
*leaf
= path
->nodes
[0];
5981 int slot
= path
->slots
[0];
5982 struct btrfs_key found_key
;
5984 if (slot
>= btrfs_header_nritems(leaf
)) {
5985 ret
= btrfs_next_leaf(root
, path
);
5993 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
5994 if (found_key
.objectid
!= ino
||
5995 found_key
.type
> BTRFS_INODE_EXTREF_KEY
)
5999 * Don't deal with extended references because they are rare
6000 * cases and too complex to deal with (we would need to keep
6001 * track of which subitem we are processing for each item in
6002 * this loop, etc). So just return some error to fallback to
6003 * a transaction commit.
6005 if (found_key
.type
== BTRFS_INODE_EXTREF_KEY
) {
6011 * Logging ancestors needs to do more searches on the fs/subvol
6012 * tree, so it releases the path as needed to avoid deadlocks.
6013 * Keep track of the last inode ref key and resume from that key
6014 * after logging all new ancestors for the current hard link.
6016 memcpy(&search_key
, &found_key
, sizeof(search_key
));
6018 ret
= log_new_ancestors(trans
, root
, path
, ctx
);
6021 btrfs_release_path(path
);
6026 btrfs_free_path(path
);
6031 * helper function around btrfs_log_inode to make sure newly created
6032 * parent directories also end up in the log. A minimal inode and backref
6033 * only logging is done of any parent directories that are older than
6034 * the last committed transaction
6036 static int btrfs_log_inode_parent(struct btrfs_trans_handle
*trans
,
6037 struct btrfs_inode
*inode
,
6038 struct dentry
*parent
,
6040 struct btrfs_log_ctx
*ctx
)
6042 struct btrfs_root
*root
= inode
->root
;
6043 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
6044 struct super_block
*sb
;
6046 bool log_dentries
= false;
6048 sb
= inode
->vfs_inode
.i_sb
;
6050 if (btrfs_test_opt(fs_info
, NOTREELOG
)) {
6055 if (btrfs_root_refs(&root
->root_item
) == 0) {
6060 ret
= check_parent_dirs_for_sync(trans
, inode
, parent
, sb
);
6065 * Skip already logged inodes or inodes corresponding to tmpfiles
6066 * (since logging them is pointless, a link count of 0 means they
6067 * will never be accessible).
6069 if (btrfs_inode_in_log(inode
, trans
->transid
) ||
6070 inode
->vfs_inode
.i_nlink
== 0) {
6071 ret
= BTRFS_NO_LOG_SYNC
;
6075 ret
= start_log_trans(trans
, root
, ctx
);
6079 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
, ctx
);
6084 * for regular files, if its inode is already on disk, we don't
6085 * have to worry about the parents at all. This is because
6086 * we can use the last_unlink_trans field to record renames
6087 * and other fun in this file.
6089 if (S_ISREG(inode
->vfs_inode
.i_mode
) &&
6090 inode
->generation
< trans
->transid
&&
6091 inode
->last_unlink_trans
< trans
->transid
) {
6096 if (S_ISDIR(inode
->vfs_inode
.i_mode
) && ctx
&& ctx
->log_new_dentries
)
6097 log_dentries
= true;
6100 * On unlink we must make sure all our current and old parent directory
6101 * inodes are fully logged. This is to prevent leaving dangling
6102 * directory index entries in directories that were our parents but are
6103 * not anymore. Not doing this results in old parent directory being
6104 * impossible to delete after log replay (rmdir will always fail with
6105 * error -ENOTEMPTY).
6111 * ln testdir/foo testdir/bar
6113 * unlink testdir/bar
6114 * xfs_io -c fsync testdir/foo
6116 * mount fs, triggers log replay
6118 * If we don't log the parent directory (testdir), after log replay the
6119 * directory still has an entry pointing to the file inode using the bar
6120 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
6121 * the file inode has a link count of 1.
6127 * ln foo testdir/foo2
6128 * ln foo testdir/foo3
6130 * unlink testdir/foo3
6131 * xfs_io -c fsync foo
6133 * mount fs, triggers log replay
6135 * Similar as the first example, after log replay the parent directory
6136 * testdir still has an entry pointing to the inode file with name foo3
6137 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
6138 * and has a link count of 2.
6140 if (inode
->last_unlink_trans
>= trans
->transid
) {
6141 ret
= btrfs_log_all_parents(trans
, inode
, ctx
);
6146 ret
= log_all_new_ancestors(trans
, inode
, parent
, ctx
);
6151 ret
= log_new_dir_dentries(trans
, root
, inode
, ctx
);
6156 btrfs_set_log_full_commit(trans
);
6161 btrfs_remove_log_ctx(root
, ctx
);
6162 btrfs_end_log_trans(root
);
6168 * it is not safe to log dentry if the chunk root has added new
6169 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
6170 * If this returns 1, you must commit the transaction to safely get your
6173 int btrfs_log_dentry_safe(struct btrfs_trans_handle
*trans
,
6174 struct dentry
*dentry
,
6175 struct btrfs_log_ctx
*ctx
)
6177 struct dentry
*parent
= dget_parent(dentry
);
6180 ret
= btrfs_log_inode_parent(trans
, BTRFS_I(d_inode(dentry
)), parent
,
6181 LOG_INODE_ALL
, ctx
);
6188 * should be called during mount to recover any replay any log trees
6191 int btrfs_recover_log_trees(struct btrfs_root
*log_root_tree
)
6194 struct btrfs_path
*path
;
6195 struct btrfs_trans_handle
*trans
;
6196 struct btrfs_key key
;
6197 struct btrfs_key found_key
;
6198 struct btrfs_root
*log
;
6199 struct btrfs_fs_info
*fs_info
= log_root_tree
->fs_info
;
6200 struct walk_control wc
= {
6201 .process_func
= process_one_buffer
,
6202 .stage
= LOG_WALK_PIN_ONLY
,
6205 path
= btrfs_alloc_path();
6209 set_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
);
6211 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
6212 if (IS_ERR(trans
)) {
6213 ret
= PTR_ERR(trans
);
6220 ret
= walk_log_tree(trans
, log_root_tree
, &wc
);
6222 btrfs_handle_fs_error(fs_info
, ret
,
6223 "Failed to pin buffers while recovering log root tree.");
6228 key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
6229 key
.offset
= (u64
)-1;
6230 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6233 ret
= btrfs_search_slot(NULL
, log_root_tree
, &key
, path
, 0, 0);
6236 btrfs_handle_fs_error(fs_info
, ret
,
6237 "Couldn't find tree log root.");
6241 if (path
->slots
[0] == 0)
6245 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
6247 btrfs_release_path(path
);
6248 if (found_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
6251 log
= btrfs_read_tree_root(log_root_tree
, &found_key
);
6254 btrfs_handle_fs_error(fs_info
, ret
,
6255 "Couldn't read tree log root.");
6259 wc
.replay_dest
= btrfs_get_fs_root(fs_info
, found_key
.offset
,
6261 if (IS_ERR(wc
.replay_dest
)) {
6262 ret
= PTR_ERR(wc
.replay_dest
);
6265 * We didn't find the subvol, likely because it was
6266 * deleted. This is ok, simply skip this log and go to
6269 * We need to exclude the root because we can't have
6270 * other log replays overwriting this log as we'll read
6271 * it back in a few more times. This will keep our
6272 * block from being modified, and we'll just bail for
6273 * each subsequent pass.
6276 ret
= btrfs_pin_extent_for_log_replay(trans
,
6279 btrfs_put_root(log
);
6283 btrfs_handle_fs_error(fs_info
, ret
,
6284 "Couldn't read target root for tree log recovery.");
6288 wc
.replay_dest
->log_root
= log
;
6289 btrfs_record_root_in_trans(trans
, wc
.replay_dest
);
6290 ret
= walk_log_tree(trans
, log
, &wc
);
6292 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
6293 ret
= fixup_inode_link_counts(trans
, wc
.replay_dest
,
6297 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
6298 struct btrfs_root
*root
= wc
.replay_dest
;
6300 btrfs_release_path(path
);
6303 * We have just replayed everything, and the highest
6304 * objectid of fs roots probably has changed in case
6305 * some inode_item's got replayed.
6307 * root->objectid_mutex is not acquired as log replay
6308 * could only happen during mount.
6310 ret
= btrfs_find_highest_objectid(root
,
6311 &root
->highest_objectid
);
6314 wc
.replay_dest
->log_root
= NULL
;
6315 btrfs_put_root(wc
.replay_dest
);
6316 btrfs_put_root(log
);
6321 if (found_key
.offset
== 0)
6323 key
.offset
= found_key
.offset
- 1;
6325 btrfs_release_path(path
);
6327 /* step one is to pin it all, step two is to replay just inodes */
6330 wc
.process_func
= replay_one_buffer
;
6331 wc
.stage
= LOG_WALK_REPLAY_INODES
;
6334 /* step three is to replay everything */
6335 if (wc
.stage
< LOG_WALK_REPLAY_ALL
) {
6340 btrfs_free_path(path
);
6342 /* step 4: commit the transaction, which also unpins the blocks */
6343 ret
= btrfs_commit_transaction(trans
);
6347 log_root_tree
->log_root
= NULL
;
6348 clear_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
);
6349 btrfs_put_root(log_root_tree
);
6354 btrfs_end_transaction(wc
.trans
);
6355 btrfs_free_path(path
);
6360 * there are some corner cases where we want to force a full
6361 * commit instead of allowing a directory to be logged.
6363 * They revolve around files there were unlinked from the directory, and
6364 * this function updates the parent directory so that a full commit is
6365 * properly done if it is fsync'd later after the unlinks are done.
6367 * Must be called before the unlink operations (updates to the subvolume tree,
6368 * inodes, etc) are done.
6370 void btrfs_record_unlink_dir(struct btrfs_trans_handle
*trans
,
6371 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
6375 * when we're logging a file, if it hasn't been renamed
6376 * or unlinked, and its inode is fully committed on disk,
6377 * we don't have to worry about walking up the directory chain
6378 * to log its parents.
6380 * So, we use the last_unlink_trans field to put this transid
6381 * into the file. When the file is logged we check it and
6382 * don't log the parents if the file is fully on disk.
6384 mutex_lock(&inode
->log_mutex
);
6385 inode
->last_unlink_trans
= trans
->transid
;
6386 mutex_unlock(&inode
->log_mutex
);
6389 * if this directory was already logged any new
6390 * names for this file/dir will get recorded
6392 if (dir
->logged_trans
== trans
->transid
)
6396 * if the inode we're about to unlink was logged,
6397 * the log will be properly updated for any new names
6399 if (inode
->logged_trans
== trans
->transid
)
6403 * when renaming files across directories, if the directory
6404 * there we're unlinking from gets fsync'd later on, there's
6405 * no way to find the destination directory later and fsync it
6406 * properly. So, we have to be conservative and force commits
6407 * so the new name gets discovered.
6412 /* we can safely do the unlink without any special recording */
6416 mutex_lock(&dir
->log_mutex
);
6417 dir
->last_unlink_trans
= trans
->transid
;
6418 mutex_unlock(&dir
->log_mutex
);
6422 * Make sure that if someone attempts to fsync the parent directory of a deleted
6423 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6424 * that after replaying the log tree of the parent directory's root we will not
6425 * see the snapshot anymore and at log replay time we will not see any log tree
6426 * corresponding to the deleted snapshot's root, which could lead to replaying
6427 * it after replaying the log tree of the parent directory (which would replay
6428 * the snapshot delete operation).
6430 * Must be called before the actual snapshot destroy operation (updates to the
6431 * parent root and tree of tree roots trees, etc) are done.
6433 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle
*trans
,
6434 struct btrfs_inode
*dir
)
6436 mutex_lock(&dir
->log_mutex
);
6437 dir
->last_unlink_trans
= trans
->transid
;
6438 mutex_unlock(&dir
->log_mutex
);
6442 * Call this after adding a new name for a file and it will properly
6443 * update the log to reflect the new name.
6445 void btrfs_log_new_name(struct btrfs_trans_handle
*trans
,
6446 struct btrfs_inode
*inode
, struct btrfs_inode
*old_dir
,
6447 struct dentry
*parent
)
6449 struct btrfs_log_ctx ctx
;
6452 * this will force the logging code to walk the dentry chain
6455 if (!S_ISDIR(inode
->vfs_inode
.i_mode
))
6456 inode
->last_unlink_trans
= trans
->transid
;
6459 * if this inode hasn't been logged and directory we're renaming it
6460 * from hasn't been logged, we don't need to log it
6462 if (inode
->logged_trans
< trans
->transid
&&
6463 (!old_dir
|| old_dir
->logged_trans
< trans
->transid
))
6466 btrfs_init_log_ctx(&ctx
, &inode
->vfs_inode
);
6467 ctx
.logging_new_name
= true;
6469 * We don't care about the return value. If we fail to log the new name
6470 * then we know the next attempt to sync the log will fallback to a full
6471 * transaction commit (due to a call to btrfs_set_log_full_commit()), so
6472 * we don't need to worry about getting a log committed that has an
6473 * inconsistent state after a rename operation.
6475 btrfs_log_inode_parent(trans
, inode
, parent
, LOG_INODE_EXISTS
, &ctx
);