2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
30 /* magic values for the inode_only field in btrfs_log_inode:
32 * LOG_INODE_ALL means to log everything
33 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 #define LOG_INODE_ALL 0
37 #define LOG_INODE_EXISTS 1
40 * directory trouble cases
42 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
43 * log, we must force a full commit before doing an fsync of the directory
44 * where the unlink was done.
45 * ---> record transid of last unlink/rename per directory
49 * rename foo/some_dir foo2/some_dir
51 * fsync foo/some_dir/some_file
53 * The fsync above will unlink the original some_dir without recording
54 * it in its new location (foo2). After a crash, some_dir will be gone
55 * unless the fsync of some_file forces a full commit
57 * 2) we must log any new names for any file or dir that is in the fsync
58 * log. ---> check inode while renaming/linking.
60 * 2a) we must log any new names for any file or dir during rename
61 * when the directory they are being removed from was logged.
62 * ---> check inode and old parent dir during rename
64 * 2a is actually the more important variant. With the extra logging
65 * a crash might unlink the old name without recreating the new one
67 * 3) after a crash, we must go through any directories with a link count
68 * of zero and redo the rm -rf
75 * The directory f1 was fully removed from the FS, but fsync was never
76 * called on f1, only its parent dir. After a crash the rm -rf must
77 * be replayed. This must be able to recurse down the entire
78 * directory tree. The inode link count fixup code takes care of the
83 * stages for the tree walking. The first
84 * stage (0) is to only pin down the blocks we find
85 * the second stage (1) is to make sure that all the inodes
86 * we find in the log are created in the subvolume.
88 * The last stage is to deal with directories and links and extents
89 * and all the other fun semantics
91 #define LOG_WALK_PIN_ONLY 0
92 #define LOG_WALK_REPLAY_INODES 1
93 #define LOG_WALK_REPLAY_DIR_INDEX 2
94 #define LOG_WALK_REPLAY_ALL 3
96 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
97 struct btrfs_root
*root
, struct inode
*inode
,
101 struct btrfs_log_ctx
*ctx
);
102 static int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
103 struct btrfs_root
*root
,
104 struct btrfs_path
*path
, u64 objectid
);
105 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
106 struct btrfs_root
*root
,
107 struct btrfs_root
*log
,
108 struct btrfs_path
*path
,
109 u64 dirid
, int del_all
);
112 * tree logging is a special write ahead log used to make sure that
113 * fsyncs and O_SYNCs can happen without doing full tree commits.
115 * Full tree commits are expensive because they require commonly
116 * modified blocks to be recowed, creating many dirty pages in the
117 * extent tree an 4x-6x higher write load than ext3.
119 * Instead of doing a tree commit on every fsync, we use the
120 * key ranges and transaction ids to find items for a given file or directory
121 * that have changed in this transaction. Those items are copied into
122 * a special tree (one per subvolume root), that tree is written to disk
123 * and then the fsync is considered complete.
125 * After a crash, items are copied out of the log-tree back into the
126 * subvolume tree. Any file data extents found are recorded in the extent
127 * allocation tree, and the log-tree freed.
129 * The log tree is read three times, once to pin down all the extents it is
130 * using in ram and once, once to create all the inodes logged in the tree
131 * and once to do all the other items.
135 * start a sub transaction and setup the log tree
136 * this increments the log tree writer count to make the people
137 * syncing the tree wait for us to finish
139 static int start_log_trans(struct btrfs_trans_handle
*trans
,
140 struct btrfs_root
*root
,
141 struct btrfs_log_ctx
*ctx
)
145 mutex_lock(&root
->log_mutex
);
147 if (root
->log_root
) {
148 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
153 if (!root
->log_start_pid
) {
154 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
155 root
->log_start_pid
= current
->pid
;
156 } else if (root
->log_start_pid
!= current
->pid
) {
157 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
160 mutex_lock(&root
->fs_info
->tree_log_mutex
);
161 if (!root
->fs_info
->log_root_tree
)
162 ret
= btrfs_init_log_root_tree(trans
, root
->fs_info
);
163 mutex_unlock(&root
->fs_info
->tree_log_mutex
);
167 ret
= btrfs_add_log_tree(trans
, root
);
171 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
);
172 root
->log_start_pid
= current
->pid
;
175 atomic_inc(&root
->log_batch
);
176 atomic_inc(&root
->log_writers
);
178 int index
= root
->log_transid
% 2;
179 list_add_tail(&ctx
->list
, &root
->log_ctxs
[index
]);
180 ctx
->log_transid
= root
->log_transid
;
184 mutex_unlock(&root
->log_mutex
);
189 * returns 0 if there was a log transaction running and we were able
190 * to join, or returns -ENOENT if there were not transactions
193 static int join_running_log_trans(struct btrfs_root
*root
)
201 mutex_lock(&root
->log_mutex
);
202 if (root
->log_root
) {
204 atomic_inc(&root
->log_writers
);
206 mutex_unlock(&root
->log_mutex
);
211 * This either makes the current running log transaction wait
212 * until you call btrfs_end_log_trans() or it makes any future
213 * log transactions wait until you call btrfs_end_log_trans()
215 int btrfs_pin_log_trans(struct btrfs_root
*root
)
219 mutex_lock(&root
->log_mutex
);
220 atomic_inc(&root
->log_writers
);
221 mutex_unlock(&root
->log_mutex
);
226 * indicate we're done making changes to the log tree
227 * and wake up anyone waiting to do a sync
229 void btrfs_end_log_trans(struct btrfs_root
*root
)
231 if (atomic_dec_and_test(&root
->log_writers
)) {
233 * Implicit memory barrier after atomic_dec_and_test
235 if (waitqueue_active(&root
->log_writer_wait
))
236 wake_up(&root
->log_writer_wait
);
242 * the walk control struct is used to pass state down the chain when
243 * processing the log tree. The stage field tells us which part
244 * of the log tree processing we are currently doing. The others
245 * are state fields used for that specific part
247 struct walk_control
{
248 /* should we free the extent on disk when done? This is used
249 * at transaction commit time while freeing a log tree
253 /* should we write out the extent buffer? This is used
254 * while flushing the log tree to disk during a sync
258 /* should we wait for the extent buffer io to finish? Also used
259 * while flushing the log tree to disk for a sync
263 /* pin only walk, we record which extents on disk belong to the
268 /* what stage of the replay code we're currently in */
271 /* the root we are currently replaying */
272 struct btrfs_root
*replay_dest
;
274 /* the trans handle for the current replay */
275 struct btrfs_trans_handle
*trans
;
277 /* the function that gets used to process blocks we find in the
278 * tree. Note the extent_buffer might not be up to date when it is
279 * passed in, and it must be checked or read if you need the data
282 int (*process_func
)(struct btrfs_root
*log
, struct extent_buffer
*eb
,
283 struct walk_control
*wc
, u64 gen
);
287 * process_func used to pin down extents, write them or wait on them
289 static int process_one_buffer(struct btrfs_root
*log
,
290 struct extent_buffer
*eb
,
291 struct walk_control
*wc
, u64 gen
)
296 * If this fs is mixed then we need to be able to process the leaves to
297 * pin down any logged extents, so we have to read the block.
299 if (btrfs_fs_incompat(log
->fs_info
, MIXED_GROUPS
)) {
300 ret
= btrfs_read_buffer(eb
, gen
);
306 ret
= btrfs_pin_extent_for_log_replay(log
->fs_info
->extent_root
,
309 if (!ret
&& btrfs_buffer_uptodate(eb
, gen
, 0)) {
310 if (wc
->pin
&& btrfs_header_level(eb
) == 0)
311 ret
= btrfs_exclude_logged_extents(log
, eb
);
313 btrfs_write_tree_block(eb
);
315 btrfs_wait_tree_block_writeback(eb
);
321 * Item overwrite used by replay and tree logging. eb, slot and key all refer
322 * to the src data we are copying out.
324 * root is the tree we are copying into, and path is a scratch
325 * path for use in this function (it should be released on entry and
326 * will be released on exit).
328 * If the key is already in the destination tree the existing item is
329 * overwritten. If the existing item isn't big enough, it is extended.
330 * If it is too large, it is truncated.
332 * If the key isn't in the destination yet, a new item is inserted.
334 static noinline
int overwrite_item(struct btrfs_trans_handle
*trans
,
335 struct btrfs_root
*root
,
336 struct btrfs_path
*path
,
337 struct extent_buffer
*eb
, int slot
,
338 struct btrfs_key
*key
)
342 u64 saved_i_size
= 0;
343 int save_old_i_size
= 0;
344 unsigned long src_ptr
;
345 unsigned long dst_ptr
;
346 int overwrite_root
= 0;
347 bool inode_item
= key
->type
== BTRFS_INODE_ITEM_KEY
;
349 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
352 item_size
= btrfs_item_size_nr(eb
, slot
);
353 src_ptr
= btrfs_item_ptr_offset(eb
, slot
);
355 /* look for the key in the destination tree */
356 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
363 u32 dst_size
= btrfs_item_size_nr(path
->nodes
[0],
365 if (dst_size
!= item_size
)
368 if (item_size
== 0) {
369 btrfs_release_path(path
);
372 dst_copy
= kmalloc(item_size
, GFP_NOFS
);
373 src_copy
= kmalloc(item_size
, GFP_NOFS
);
374 if (!dst_copy
|| !src_copy
) {
375 btrfs_release_path(path
);
381 read_extent_buffer(eb
, src_copy
, src_ptr
, item_size
);
383 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
384 read_extent_buffer(path
->nodes
[0], dst_copy
, dst_ptr
,
386 ret
= memcmp(dst_copy
, src_copy
, item_size
);
391 * they have the same contents, just return, this saves
392 * us from cowing blocks in the destination tree and doing
393 * extra writes that may not have been done by a previous
397 btrfs_release_path(path
);
402 * We need to load the old nbytes into the inode so when we
403 * replay the extents we've logged we get the right nbytes.
406 struct btrfs_inode_item
*item
;
410 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
411 struct btrfs_inode_item
);
412 nbytes
= btrfs_inode_nbytes(path
->nodes
[0], item
);
413 item
= btrfs_item_ptr(eb
, slot
,
414 struct btrfs_inode_item
);
415 btrfs_set_inode_nbytes(eb
, item
, nbytes
);
418 * If this is a directory we need to reset the i_size to
419 * 0 so that we can set it up properly when replaying
420 * the rest of the items in this log.
422 mode
= btrfs_inode_mode(eb
, item
);
424 btrfs_set_inode_size(eb
, item
, 0);
426 } else if (inode_item
) {
427 struct btrfs_inode_item
*item
;
431 * New inode, set nbytes to 0 so that the nbytes comes out
432 * properly when we replay the extents.
434 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
435 btrfs_set_inode_nbytes(eb
, item
, 0);
438 * If this is a directory we need to reset the i_size to 0 so
439 * that we can set it up properly when replaying the rest of
440 * the items in this log.
442 mode
= btrfs_inode_mode(eb
, item
);
444 btrfs_set_inode_size(eb
, item
, 0);
447 btrfs_release_path(path
);
448 /* try to insert the key into the destination tree */
449 path
->skip_release_on_error
= 1;
450 ret
= btrfs_insert_empty_item(trans
, root
, path
,
452 path
->skip_release_on_error
= 0;
454 /* make sure any existing item is the correct size */
455 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
) {
457 found_size
= btrfs_item_size_nr(path
->nodes
[0],
459 if (found_size
> item_size
)
460 btrfs_truncate_item(root
, path
, item_size
, 1);
461 else if (found_size
< item_size
)
462 btrfs_extend_item(root
, path
,
463 item_size
- found_size
);
467 dst_ptr
= btrfs_item_ptr_offset(path
->nodes
[0],
470 /* don't overwrite an existing inode if the generation number
471 * was logged as zero. This is done when the tree logging code
472 * is just logging an inode to make sure it exists after recovery.
474 * Also, don't overwrite i_size on directories during replay.
475 * log replay inserts and removes directory items based on the
476 * state of the tree found in the subvolume, and i_size is modified
479 if (key
->type
== BTRFS_INODE_ITEM_KEY
&& ret
== -EEXIST
) {
480 struct btrfs_inode_item
*src_item
;
481 struct btrfs_inode_item
*dst_item
;
483 src_item
= (struct btrfs_inode_item
*)src_ptr
;
484 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
486 if (btrfs_inode_generation(eb
, src_item
) == 0) {
487 struct extent_buffer
*dst_eb
= path
->nodes
[0];
488 const u64 ino_size
= btrfs_inode_size(eb
, src_item
);
491 * For regular files an ino_size == 0 is used only when
492 * logging that an inode exists, as part of a directory
493 * fsync, and the inode wasn't fsynced before. In this
494 * case don't set the size of the inode in the fs/subvol
495 * tree, otherwise we would be throwing valid data away.
497 if (S_ISREG(btrfs_inode_mode(eb
, src_item
)) &&
498 S_ISREG(btrfs_inode_mode(dst_eb
, dst_item
)) &&
500 struct btrfs_map_token token
;
502 btrfs_init_map_token(&token
);
503 btrfs_set_token_inode_size(dst_eb
, dst_item
,
509 if (overwrite_root
&&
510 S_ISDIR(btrfs_inode_mode(eb
, src_item
)) &&
511 S_ISDIR(btrfs_inode_mode(path
->nodes
[0], dst_item
))) {
513 saved_i_size
= btrfs_inode_size(path
->nodes
[0],
518 copy_extent_buffer(path
->nodes
[0], eb
, dst_ptr
,
521 if (save_old_i_size
) {
522 struct btrfs_inode_item
*dst_item
;
523 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
524 btrfs_set_inode_size(path
->nodes
[0], dst_item
, saved_i_size
);
527 /* make sure the generation is filled in */
528 if (key
->type
== BTRFS_INODE_ITEM_KEY
) {
529 struct btrfs_inode_item
*dst_item
;
530 dst_item
= (struct btrfs_inode_item
*)dst_ptr
;
531 if (btrfs_inode_generation(path
->nodes
[0], dst_item
) == 0) {
532 btrfs_set_inode_generation(path
->nodes
[0], dst_item
,
537 btrfs_mark_buffer_dirty(path
->nodes
[0]);
538 btrfs_release_path(path
);
543 * simple helper to read an inode off the disk from a given root
544 * This can only be called for subvolume roots and not for the log
546 static noinline
struct inode
*read_one_inode(struct btrfs_root
*root
,
549 struct btrfs_key key
;
552 key
.objectid
= objectid
;
553 key
.type
= BTRFS_INODE_ITEM_KEY
;
555 inode
= btrfs_iget(root
->fs_info
->sb
, &key
, root
, NULL
);
558 } else if (is_bad_inode(inode
)) {
565 /* replays a single extent in 'eb' at 'slot' with 'key' into the
566 * subvolume 'root'. path is released on entry and should be released
569 * extents in the log tree have not been allocated out of the extent
570 * tree yet. So, this completes the allocation, taking a reference
571 * as required if the extent already exists or creating a new extent
572 * if it isn't in the extent allocation tree yet.
574 * The extent is inserted into the file, dropping any existing extents
575 * from the file that overlap the new one.
577 static noinline
int replay_one_extent(struct btrfs_trans_handle
*trans
,
578 struct btrfs_root
*root
,
579 struct btrfs_path
*path
,
580 struct extent_buffer
*eb
, int slot
,
581 struct btrfs_key
*key
)
585 u64 start
= key
->offset
;
587 struct btrfs_file_extent_item
*item
;
588 struct inode
*inode
= NULL
;
592 item
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
593 found_type
= btrfs_file_extent_type(eb
, item
);
595 if (found_type
== BTRFS_FILE_EXTENT_REG
||
596 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
597 nbytes
= btrfs_file_extent_num_bytes(eb
, item
);
598 extent_end
= start
+ nbytes
;
601 * We don't add to the inodes nbytes if we are prealloc or a
604 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0)
606 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
607 size
= btrfs_file_extent_inline_len(eb
, slot
, item
);
608 nbytes
= btrfs_file_extent_ram_bytes(eb
, item
);
609 extent_end
= ALIGN(start
+ size
, root
->sectorsize
);
615 inode
= read_one_inode(root
, key
->objectid
);
622 * first check to see if we already have this extent in the
623 * file. This must be done before the btrfs_drop_extents run
624 * so we don't try to drop this extent.
626 ret
= btrfs_lookup_file_extent(trans
, root
, path
, btrfs_ino(inode
),
630 (found_type
== BTRFS_FILE_EXTENT_REG
||
631 found_type
== BTRFS_FILE_EXTENT_PREALLOC
)) {
632 struct btrfs_file_extent_item cmp1
;
633 struct btrfs_file_extent_item cmp2
;
634 struct btrfs_file_extent_item
*existing
;
635 struct extent_buffer
*leaf
;
637 leaf
= path
->nodes
[0];
638 existing
= btrfs_item_ptr(leaf
, path
->slots
[0],
639 struct btrfs_file_extent_item
);
641 read_extent_buffer(eb
, &cmp1
, (unsigned long)item
,
643 read_extent_buffer(leaf
, &cmp2
, (unsigned long)existing
,
647 * we already have a pointer to this exact extent,
648 * we don't have to do anything
650 if (memcmp(&cmp1
, &cmp2
, sizeof(cmp1
)) == 0) {
651 btrfs_release_path(path
);
655 btrfs_release_path(path
);
657 /* drop any overlapping extents */
658 ret
= btrfs_drop_extents(trans
, root
, inode
, start
, extent_end
, 1);
662 if (found_type
== BTRFS_FILE_EXTENT_REG
||
663 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
665 unsigned long dest_offset
;
666 struct btrfs_key ins
;
668 ret
= btrfs_insert_empty_item(trans
, root
, path
, key
,
672 dest_offset
= btrfs_item_ptr_offset(path
->nodes
[0],
674 copy_extent_buffer(path
->nodes
[0], eb
, dest_offset
,
675 (unsigned long)item
, sizeof(*item
));
677 ins
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
678 ins
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
679 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
680 offset
= key
->offset
- btrfs_file_extent_offset(eb
, item
);
682 if (ins
.objectid
> 0) {
685 LIST_HEAD(ordered_sums
);
687 * is this extent already allocated in the extent
688 * allocation tree? If so, just add a reference
690 ret
= btrfs_lookup_data_extent(root
, ins
.objectid
,
693 ret
= btrfs_inc_extent_ref(trans
, root
,
694 ins
.objectid
, ins
.offset
,
695 0, root
->root_key
.objectid
,
696 key
->objectid
, offset
);
701 * insert the extent pointer in the extent
704 ret
= btrfs_alloc_logged_file_extent(trans
,
705 root
, root
->root_key
.objectid
,
706 key
->objectid
, offset
, &ins
);
710 btrfs_release_path(path
);
712 if (btrfs_file_extent_compression(eb
, item
)) {
713 csum_start
= ins
.objectid
;
714 csum_end
= csum_start
+ ins
.offset
;
716 csum_start
= ins
.objectid
+
717 btrfs_file_extent_offset(eb
, item
);
718 csum_end
= csum_start
+
719 btrfs_file_extent_num_bytes(eb
, item
);
722 ret
= btrfs_lookup_csums_range(root
->log_root
,
723 csum_start
, csum_end
- 1,
728 * Now delete all existing cums in the csum root that
729 * cover our range. We do this because we can have an
730 * extent that is completely referenced by one file
731 * extent item and partially referenced by another
732 * file extent item (like after using the clone or
733 * extent_same ioctls). In this case if we end up doing
734 * the replay of the one that partially references the
735 * extent first, and we do not do the csum deletion
736 * below, we can get 2 csum items in the csum tree that
737 * overlap each other. For example, imagine our log has
738 * the two following file extent items:
740 * key (257 EXTENT_DATA 409600)
741 * extent data disk byte 12845056 nr 102400
742 * extent data offset 20480 nr 20480 ram 102400
744 * key (257 EXTENT_DATA 819200)
745 * extent data disk byte 12845056 nr 102400
746 * extent data offset 0 nr 102400 ram 102400
748 * Where the second one fully references the 100K extent
749 * that starts at disk byte 12845056, and the log tree
750 * has a single csum item that covers the entire range
753 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
755 * After the first file extent item is replayed, the
756 * csum tree gets the following csum item:
758 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
760 * Which covers the 20K sub-range starting at offset 20K
761 * of our extent. Now when we replay the second file
762 * extent item, if we do not delete existing csum items
763 * that cover any of its blocks, we end up getting two
764 * csum items in our csum tree that overlap each other:
766 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
767 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
769 * Which is a problem, because after this anyone trying
770 * to lookup up for the checksum of any block of our
771 * extent starting at an offset of 40K or higher, will
772 * end up looking at the second csum item only, which
773 * does not contain the checksum for any block starting
774 * at offset 40K or higher of our extent.
776 while (!list_empty(&ordered_sums
)) {
777 struct btrfs_ordered_sum
*sums
;
778 sums
= list_entry(ordered_sums
.next
,
779 struct btrfs_ordered_sum
,
782 ret
= btrfs_del_csums(trans
,
783 root
->fs_info
->csum_root
,
787 ret
= btrfs_csum_file_blocks(trans
,
788 root
->fs_info
->csum_root
,
790 list_del(&sums
->list
);
796 btrfs_release_path(path
);
798 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
799 /* inline extents are easy, we just overwrite them */
800 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
805 inode_add_bytes(inode
, nbytes
);
806 ret
= btrfs_update_inode(trans
, root
, inode
);
814 * when cleaning up conflicts between the directory names in the
815 * subvolume, directory names in the log and directory names in the
816 * inode back references, we may have to unlink inodes from directories.
818 * This is a helper function to do the unlink of a specific directory
821 static noinline
int drop_one_dir_item(struct btrfs_trans_handle
*trans
,
822 struct btrfs_root
*root
,
823 struct btrfs_path
*path
,
825 struct btrfs_dir_item
*di
)
830 struct extent_buffer
*leaf
;
831 struct btrfs_key location
;
834 leaf
= path
->nodes
[0];
836 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
837 name_len
= btrfs_dir_name_len(leaf
, di
);
838 name
= kmalloc(name_len
, GFP_NOFS
);
842 read_extent_buffer(leaf
, name
, (unsigned long)(di
+ 1), name_len
);
843 btrfs_release_path(path
);
845 inode
= read_one_inode(root
, location
.objectid
);
851 ret
= link_to_fixup_dir(trans
, root
, path
, location
.objectid
);
855 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
, name
, name_len
);
859 ret
= btrfs_run_delayed_items(trans
, root
);
867 * helper function to see if a given name and sequence number found
868 * in an inode back reference are already in a directory and correctly
869 * point to this inode
871 static noinline
int inode_in_dir(struct btrfs_root
*root
,
872 struct btrfs_path
*path
,
873 u64 dirid
, u64 objectid
, u64 index
,
874 const char *name
, int name_len
)
876 struct btrfs_dir_item
*di
;
877 struct btrfs_key location
;
880 di
= btrfs_lookup_dir_index_item(NULL
, root
, path
, dirid
,
881 index
, name
, name_len
, 0);
882 if (di
&& !IS_ERR(di
)) {
883 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
884 if (location
.objectid
!= objectid
)
888 btrfs_release_path(path
);
890 di
= btrfs_lookup_dir_item(NULL
, root
, path
, dirid
, name
, name_len
, 0);
891 if (di
&& !IS_ERR(di
)) {
892 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &location
);
893 if (location
.objectid
!= objectid
)
899 btrfs_release_path(path
);
904 * helper function to check a log tree for a named back reference in
905 * an inode. This is used to decide if a back reference that is
906 * found in the subvolume conflicts with what we find in the log.
908 * inode backreferences may have multiple refs in a single item,
909 * during replay we process one reference at a time, and we don't
910 * want to delete valid links to a file from the subvolume if that
911 * link is also in the log.
913 static noinline
int backref_in_log(struct btrfs_root
*log
,
914 struct btrfs_key
*key
,
916 const char *name
, int namelen
)
918 struct btrfs_path
*path
;
919 struct btrfs_inode_ref
*ref
;
921 unsigned long ptr_end
;
922 unsigned long name_ptr
;
928 path
= btrfs_alloc_path();
932 ret
= btrfs_search_slot(NULL
, log
, key
, path
, 0, 0);
936 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
938 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
939 if (btrfs_find_name_in_ext_backref(path
, ref_objectid
,
940 name
, namelen
, NULL
))
946 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
947 ptr_end
= ptr
+ item_size
;
948 while (ptr
< ptr_end
) {
949 ref
= (struct btrfs_inode_ref
*)ptr
;
950 found_name_len
= btrfs_inode_ref_name_len(path
->nodes
[0], ref
);
951 if (found_name_len
== namelen
) {
952 name_ptr
= (unsigned long)(ref
+ 1);
953 ret
= memcmp_extent_buffer(path
->nodes
[0], name
,
960 ptr
= (unsigned long)(ref
+ 1) + found_name_len
;
963 btrfs_free_path(path
);
967 static inline int __add_inode_ref(struct btrfs_trans_handle
*trans
,
968 struct btrfs_root
*root
,
969 struct btrfs_path
*path
,
970 struct btrfs_root
*log_root
,
971 struct inode
*dir
, struct inode
*inode
,
972 struct extent_buffer
*eb
,
973 u64 inode_objectid
, u64 parent_objectid
,
974 u64 ref_index
, char *name
, int namelen
,
980 struct extent_buffer
*leaf
;
981 struct btrfs_dir_item
*di
;
982 struct btrfs_key search_key
;
983 struct btrfs_inode_extref
*extref
;
986 /* Search old style refs */
987 search_key
.objectid
= inode_objectid
;
988 search_key
.type
= BTRFS_INODE_REF_KEY
;
989 search_key
.offset
= parent_objectid
;
990 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
992 struct btrfs_inode_ref
*victim_ref
;
994 unsigned long ptr_end
;
996 leaf
= path
->nodes
[0];
998 /* are we trying to overwrite a back ref for the root directory
999 * if so, just jump out, we're done
1001 if (search_key
.objectid
== search_key
.offset
)
1004 /* check all the names in this back reference to see
1005 * if they are in the log. if so, we allow them to stay
1006 * otherwise they must be unlinked as a conflict
1008 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1009 ptr_end
= ptr
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1010 while (ptr
< ptr_end
) {
1011 victim_ref
= (struct btrfs_inode_ref
*)ptr
;
1012 victim_name_len
= btrfs_inode_ref_name_len(leaf
,
1014 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1018 read_extent_buffer(leaf
, victim_name
,
1019 (unsigned long)(victim_ref
+ 1),
1022 if (!backref_in_log(log_root
, &search_key
,
1027 btrfs_release_path(path
);
1029 ret
= btrfs_unlink_inode(trans
, root
, dir
,
1035 ret
= btrfs_run_delayed_items(trans
, root
);
1043 ptr
= (unsigned long)(victim_ref
+ 1) + victim_name_len
;
1047 * NOTE: we have searched root tree and checked the
1048 * coresponding ref, it does not need to check again.
1052 btrfs_release_path(path
);
1054 /* Same search but for extended refs */
1055 extref
= btrfs_lookup_inode_extref(NULL
, root
, path
, name
, namelen
,
1056 inode_objectid
, parent_objectid
, 0,
1058 if (!IS_ERR_OR_NULL(extref
)) {
1062 struct inode
*victim_parent
;
1064 leaf
= path
->nodes
[0];
1066 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1067 base
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1069 while (cur_offset
< item_size
) {
1070 extref
= (struct btrfs_inode_extref
*)(base
+ cur_offset
);
1072 victim_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1074 if (btrfs_inode_extref_parent(leaf
, extref
) != parent_objectid
)
1077 victim_name
= kmalloc(victim_name_len
, GFP_NOFS
);
1080 read_extent_buffer(leaf
, victim_name
, (unsigned long)&extref
->name
,
1083 search_key
.objectid
= inode_objectid
;
1084 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1085 search_key
.offset
= btrfs_extref_hash(parent_objectid
,
1089 if (!backref_in_log(log_root
, &search_key
,
1090 parent_objectid
, victim_name
,
1093 victim_parent
= read_one_inode(root
,
1095 if (victim_parent
) {
1097 btrfs_release_path(path
);
1099 ret
= btrfs_unlink_inode(trans
, root
,
1105 ret
= btrfs_run_delayed_items(
1108 iput(victim_parent
);
1119 cur_offset
+= victim_name_len
+ sizeof(*extref
);
1123 btrfs_release_path(path
);
1125 /* look for a conflicting sequence number */
1126 di
= btrfs_lookup_dir_index_item(trans
, root
, path
, btrfs_ino(dir
),
1127 ref_index
, name
, namelen
, 0);
1128 if (di
&& !IS_ERR(di
)) {
1129 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1133 btrfs_release_path(path
);
1135 /* look for a conflicing name */
1136 di
= btrfs_lookup_dir_item(trans
, root
, path
, btrfs_ino(dir
),
1138 if (di
&& !IS_ERR(di
)) {
1139 ret
= drop_one_dir_item(trans
, root
, path
, dir
, di
);
1143 btrfs_release_path(path
);
1148 static int extref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1149 u32
*namelen
, char **name
, u64
*index
,
1150 u64
*parent_objectid
)
1152 struct btrfs_inode_extref
*extref
;
1154 extref
= (struct btrfs_inode_extref
*)ref_ptr
;
1156 *namelen
= btrfs_inode_extref_name_len(eb
, extref
);
1157 *name
= kmalloc(*namelen
, GFP_NOFS
);
1161 read_extent_buffer(eb
, *name
, (unsigned long)&extref
->name
,
1164 *index
= btrfs_inode_extref_index(eb
, extref
);
1165 if (parent_objectid
)
1166 *parent_objectid
= btrfs_inode_extref_parent(eb
, extref
);
1171 static int ref_get_fields(struct extent_buffer
*eb
, unsigned long ref_ptr
,
1172 u32
*namelen
, char **name
, u64
*index
)
1174 struct btrfs_inode_ref
*ref
;
1176 ref
= (struct btrfs_inode_ref
*)ref_ptr
;
1178 *namelen
= btrfs_inode_ref_name_len(eb
, ref
);
1179 *name
= kmalloc(*namelen
, GFP_NOFS
);
1183 read_extent_buffer(eb
, *name
, (unsigned long)(ref
+ 1), *namelen
);
1185 *index
= btrfs_inode_ref_index(eb
, ref
);
1191 * replay one inode back reference item found in the log tree.
1192 * eb, slot and key refer to the buffer and key found in the log tree.
1193 * root is the destination we are replaying into, and path is for temp
1194 * use by this function. (it should be released on return).
1196 static noinline
int add_inode_ref(struct btrfs_trans_handle
*trans
,
1197 struct btrfs_root
*root
,
1198 struct btrfs_root
*log
,
1199 struct btrfs_path
*path
,
1200 struct extent_buffer
*eb
, int slot
,
1201 struct btrfs_key
*key
)
1203 struct inode
*dir
= NULL
;
1204 struct inode
*inode
= NULL
;
1205 unsigned long ref_ptr
;
1206 unsigned long ref_end
;
1210 int search_done
= 0;
1211 int log_ref_ver
= 0;
1212 u64 parent_objectid
;
1215 int ref_struct_size
;
1217 ref_ptr
= btrfs_item_ptr_offset(eb
, slot
);
1218 ref_end
= ref_ptr
+ btrfs_item_size_nr(eb
, slot
);
1220 if (key
->type
== BTRFS_INODE_EXTREF_KEY
) {
1221 struct btrfs_inode_extref
*r
;
1223 ref_struct_size
= sizeof(struct btrfs_inode_extref
);
1225 r
= (struct btrfs_inode_extref
*)ref_ptr
;
1226 parent_objectid
= btrfs_inode_extref_parent(eb
, r
);
1228 ref_struct_size
= sizeof(struct btrfs_inode_ref
);
1229 parent_objectid
= key
->offset
;
1231 inode_objectid
= key
->objectid
;
1234 * it is possible that we didn't log all the parent directories
1235 * for a given inode. If we don't find the dir, just don't
1236 * copy the back ref in. The link count fixup code will take
1239 dir
= read_one_inode(root
, parent_objectid
);
1245 inode
= read_one_inode(root
, inode_objectid
);
1251 while (ref_ptr
< ref_end
) {
1253 ret
= extref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1254 &ref_index
, &parent_objectid
);
1256 * parent object can change from one array
1260 dir
= read_one_inode(root
, parent_objectid
);
1266 ret
= ref_get_fields(eb
, ref_ptr
, &namelen
, &name
,
1272 /* if we already have a perfect match, we're done */
1273 if (!inode_in_dir(root
, path
, btrfs_ino(dir
), btrfs_ino(inode
),
1274 ref_index
, name
, namelen
)) {
1276 * look for a conflicting back reference in the
1277 * metadata. if we find one we have to unlink that name
1278 * of the file before we add our new link. Later on, we
1279 * overwrite any existing back reference, and we don't
1280 * want to create dangling pointers in the directory.
1284 ret
= __add_inode_ref(trans
, root
, path
, log
,
1288 ref_index
, name
, namelen
,
1297 /* insert our name */
1298 ret
= btrfs_add_link(trans
, dir
, inode
, name
, namelen
,
1303 btrfs_update_inode(trans
, root
, inode
);
1306 ref_ptr
= (unsigned long)(ref_ptr
+ ref_struct_size
) + namelen
;
1315 /* finally write the back reference in the inode */
1316 ret
= overwrite_item(trans
, root
, path
, eb
, slot
, key
);
1318 btrfs_release_path(path
);
1325 static int insert_orphan_item(struct btrfs_trans_handle
*trans
,
1326 struct btrfs_root
*root
, u64 ino
)
1330 ret
= btrfs_insert_orphan_item(trans
, root
, ino
);
1337 static int count_inode_extrefs(struct btrfs_root
*root
,
1338 struct inode
*inode
, struct btrfs_path
*path
)
1342 unsigned int nlink
= 0;
1345 u64 inode_objectid
= btrfs_ino(inode
);
1348 struct btrfs_inode_extref
*extref
;
1349 struct extent_buffer
*leaf
;
1352 ret
= btrfs_find_one_extref(root
, inode_objectid
, offset
, path
,
1357 leaf
= path
->nodes
[0];
1358 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1359 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1362 while (cur_offset
< item_size
) {
1363 extref
= (struct btrfs_inode_extref
*) (ptr
+ cur_offset
);
1364 name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
1368 cur_offset
+= name_len
+ sizeof(*extref
);
1372 btrfs_release_path(path
);
1374 btrfs_release_path(path
);
1376 if (ret
< 0 && ret
!= -ENOENT
)
1381 static int count_inode_refs(struct btrfs_root
*root
,
1382 struct inode
*inode
, struct btrfs_path
*path
)
1385 struct btrfs_key key
;
1386 unsigned int nlink
= 0;
1388 unsigned long ptr_end
;
1390 u64 ino
= btrfs_ino(inode
);
1393 key
.type
= BTRFS_INODE_REF_KEY
;
1394 key
.offset
= (u64
)-1;
1397 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1401 if (path
->slots
[0] == 0)
1406 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1408 if (key
.objectid
!= ino
||
1409 key
.type
!= BTRFS_INODE_REF_KEY
)
1411 ptr
= btrfs_item_ptr_offset(path
->nodes
[0], path
->slots
[0]);
1412 ptr_end
= ptr
+ btrfs_item_size_nr(path
->nodes
[0],
1414 while (ptr
< ptr_end
) {
1415 struct btrfs_inode_ref
*ref
;
1417 ref
= (struct btrfs_inode_ref
*)ptr
;
1418 name_len
= btrfs_inode_ref_name_len(path
->nodes
[0],
1420 ptr
= (unsigned long)(ref
+ 1) + name_len
;
1424 if (key
.offset
== 0)
1426 if (path
->slots
[0] > 0) {
1431 btrfs_release_path(path
);
1433 btrfs_release_path(path
);
1439 * There are a few corners where the link count of the file can't
1440 * be properly maintained during replay. So, instead of adding
1441 * lots of complexity to the log code, we just scan the backrefs
1442 * for any file that has been through replay.
1444 * The scan will update the link count on the inode to reflect the
1445 * number of back refs found. If it goes down to zero, the iput
1446 * will free the inode.
1448 static noinline
int fixup_inode_link_count(struct btrfs_trans_handle
*trans
,
1449 struct btrfs_root
*root
,
1450 struct inode
*inode
)
1452 struct btrfs_path
*path
;
1455 u64 ino
= btrfs_ino(inode
);
1457 path
= btrfs_alloc_path();
1461 ret
= count_inode_refs(root
, inode
, path
);
1467 ret
= count_inode_extrefs(root
, inode
, path
);
1475 if (nlink
!= inode
->i_nlink
) {
1476 set_nlink(inode
, nlink
);
1477 btrfs_update_inode(trans
, root
, inode
);
1479 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1481 if (inode
->i_nlink
== 0) {
1482 if (S_ISDIR(inode
->i_mode
)) {
1483 ret
= replay_dir_deletes(trans
, root
, NULL
, path
,
1488 ret
= insert_orphan_item(trans
, root
, ino
);
1492 btrfs_free_path(path
);
1496 static noinline
int fixup_inode_link_counts(struct btrfs_trans_handle
*trans
,
1497 struct btrfs_root
*root
,
1498 struct btrfs_path
*path
)
1501 struct btrfs_key key
;
1502 struct inode
*inode
;
1504 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1505 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1506 key
.offset
= (u64
)-1;
1508 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1513 if (path
->slots
[0] == 0)
1518 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1519 if (key
.objectid
!= BTRFS_TREE_LOG_FIXUP_OBJECTID
||
1520 key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
1523 ret
= btrfs_del_item(trans
, root
, path
);
1527 btrfs_release_path(path
);
1528 inode
= read_one_inode(root
, key
.offset
);
1532 ret
= fixup_inode_link_count(trans
, root
, inode
);
1538 * fixup on a directory may create new entries,
1539 * make sure we always look for the highset possible
1542 key
.offset
= (u64
)-1;
1546 btrfs_release_path(path
);
1552 * record a given inode in the fixup dir so we can check its link
1553 * count when replay is done. The link count is incremented here
1554 * so the inode won't go away until we check it
1556 static noinline
int link_to_fixup_dir(struct btrfs_trans_handle
*trans
,
1557 struct btrfs_root
*root
,
1558 struct btrfs_path
*path
,
1561 struct btrfs_key key
;
1563 struct inode
*inode
;
1565 inode
= read_one_inode(root
, objectid
);
1569 key
.objectid
= BTRFS_TREE_LOG_FIXUP_OBJECTID
;
1570 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
1571 key
.offset
= objectid
;
1573 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1575 btrfs_release_path(path
);
1577 if (!inode
->i_nlink
)
1578 set_nlink(inode
, 1);
1581 ret
= btrfs_update_inode(trans
, root
, inode
);
1582 } else if (ret
== -EEXIST
) {
1585 BUG(); /* Logic Error */
1593 * when replaying the log for a directory, we only insert names
1594 * for inodes that actually exist. This means an fsync on a directory
1595 * does not implicitly fsync all the new files in it
1597 static noinline
int insert_one_name(struct btrfs_trans_handle
*trans
,
1598 struct btrfs_root
*root
,
1599 u64 dirid
, u64 index
,
1600 char *name
, int name_len
,
1601 struct btrfs_key
*location
)
1603 struct inode
*inode
;
1607 inode
= read_one_inode(root
, location
->objectid
);
1611 dir
= read_one_inode(root
, dirid
);
1617 ret
= btrfs_add_link(trans
, dir
, inode
, name
, name_len
, 1, index
);
1619 /* FIXME, put inode into FIXUP list */
1627 * Return true if an inode reference exists in the log for the given name,
1628 * inode and parent inode.
1630 static bool name_in_log_ref(struct btrfs_root
*log_root
,
1631 const char *name
, const int name_len
,
1632 const u64 dirid
, const u64 ino
)
1634 struct btrfs_key search_key
;
1636 search_key
.objectid
= ino
;
1637 search_key
.type
= BTRFS_INODE_REF_KEY
;
1638 search_key
.offset
= dirid
;
1639 if (backref_in_log(log_root
, &search_key
, dirid
, name
, name_len
))
1642 search_key
.type
= BTRFS_INODE_EXTREF_KEY
;
1643 search_key
.offset
= btrfs_extref_hash(dirid
, name
, name_len
);
1644 if (backref_in_log(log_root
, &search_key
, dirid
, name
, name_len
))
1651 * take a single entry in a log directory item and replay it into
1654 * if a conflicting item exists in the subdirectory already,
1655 * the inode it points to is unlinked and put into the link count
1658 * If a name from the log points to a file or directory that does
1659 * not exist in the FS, it is skipped. fsyncs on directories
1660 * do not force down inodes inside that directory, just changes to the
1661 * names or unlinks in a directory.
1663 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1664 * non-existing inode) and 1 if the name was replayed.
1666 static noinline
int replay_one_name(struct btrfs_trans_handle
*trans
,
1667 struct btrfs_root
*root
,
1668 struct btrfs_path
*path
,
1669 struct extent_buffer
*eb
,
1670 struct btrfs_dir_item
*di
,
1671 struct btrfs_key
*key
)
1675 struct btrfs_dir_item
*dst_di
;
1676 struct btrfs_key found_key
;
1677 struct btrfs_key log_key
;
1682 bool update_size
= (key
->type
== BTRFS_DIR_INDEX_KEY
);
1683 bool name_added
= false;
1685 dir
= read_one_inode(root
, key
->objectid
);
1689 name_len
= btrfs_dir_name_len(eb
, di
);
1690 name
= kmalloc(name_len
, GFP_NOFS
);
1696 log_type
= btrfs_dir_type(eb
, di
);
1697 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1700 btrfs_dir_item_key_to_cpu(eb
, di
, &log_key
);
1701 exists
= btrfs_lookup_inode(trans
, root
, path
, &log_key
, 0);
1706 btrfs_release_path(path
);
1708 if (key
->type
== BTRFS_DIR_ITEM_KEY
) {
1709 dst_di
= btrfs_lookup_dir_item(trans
, root
, path
, key
->objectid
,
1711 } else if (key
->type
== BTRFS_DIR_INDEX_KEY
) {
1712 dst_di
= btrfs_lookup_dir_index_item(trans
, root
, path
,
1721 if (IS_ERR_OR_NULL(dst_di
)) {
1722 /* we need a sequence number to insert, so we only
1723 * do inserts for the BTRFS_DIR_INDEX_KEY types
1725 if (key
->type
!= BTRFS_DIR_INDEX_KEY
)
1730 btrfs_dir_item_key_to_cpu(path
->nodes
[0], dst_di
, &found_key
);
1731 /* the existing item matches the logged item */
1732 if (found_key
.objectid
== log_key
.objectid
&&
1733 found_key
.type
== log_key
.type
&&
1734 found_key
.offset
== log_key
.offset
&&
1735 btrfs_dir_type(path
->nodes
[0], dst_di
) == log_type
) {
1736 update_size
= false;
1741 * don't drop the conflicting directory entry if the inode
1742 * for the new entry doesn't exist
1747 ret
= drop_one_dir_item(trans
, root
, path
, dir
, dst_di
);
1751 if (key
->type
== BTRFS_DIR_INDEX_KEY
)
1754 btrfs_release_path(path
);
1755 if (!ret
&& update_size
) {
1756 btrfs_i_size_write(dir
, dir
->i_size
+ name_len
* 2);
1757 ret
= btrfs_update_inode(trans
, root
, dir
);
1761 if (!ret
&& name_added
)
1766 if (name_in_log_ref(root
->log_root
, name
, name_len
,
1767 key
->objectid
, log_key
.objectid
)) {
1768 /* The dentry will be added later. */
1770 update_size
= false;
1773 btrfs_release_path(path
);
1774 ret
= insert_one_name(trans
, root
, key
->objectid
, key
->offset
,
1775 name
, name_len
, &log_key
);
1776 if (ret
&& ret
!= -ENOENT
&& ret
!= -EEXIST
)
1780 update_size
= false;
1786 * find all the names in a directory item and reconcile them into
1787 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1788 * one name in a directory item, but the same code gets used for
1789 * both directory index types
1791 static noinline
int replay_one_dir_item(struct btrfs_trans_handle
*trans
,
1792 struct btrfs_root
*root
,
1793 struct btrfs_path
*path
,
1794 struct extent_buffer
*eb
, int slot
,
1795 struct btrfs_key
*key
)
1798 u32 item_size
= btrfs_item_size_nr(eb
, slot
);
1799 struct btrfs_dir_item
*di
;
1802 unsigned long ptr_end
;
1803 struct btrfs_path
*fixup_path
= NULL
;
1805 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1806 ptr_end
= ptr
+ item_size
;
1807 while (ptr
< ptr_end
) {
1808 di
= (struct btrfs_dir_item
*)ptr
;
1809 if (verify_dir_item(root
, eb
, di
))
1811 name_len
= btrfs_dir_name_len(eb
, di
);
1812 ret
= replay_one_name(trans
, root
, path
, eb
, di
, key
);
1815 ptr
= (unsigned long)(di
+ 1);
1819 * If this entry refers to a non-directory (directories can not
1820 * have a link count > 1) and it was added in the transaction
1821 * that was not committed, make sure we fixup the link count of
1822 * the inode it the entry points to. Otherwise something like
1823 * the following would result in a directory pointing to an
1824 * inode with a wrong link that does not account for this dir
1832 * ln testdir/bar testdir/bar_link
1833 * ln testdir/foo testdir/foo_link
1834 * xfs_io -c "fsync" testdir/bar
1838 * mount fs, log replay happens
1840 * File foo would remain with a link count of 1 when it has two
1841 * entries pointing to it in the directory testdir. This would
1842 * make it impossible to ever delete the parent directory has
1843 * it would result in stale dentries that can never be deleted.
1845 if (ret
== 1 && btrfs_dir_type(eb
, di
) != BTRFS_FT_DIR
) {
1846 struct btrfs_key di_key
;
1849 fixup_path
= btrfs_alloc_path();
1856 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
1857 ret
= link_to_fixup_dir(trans
, root
, fixup_path
,
1864 btrfs_free_path(fixup_path
);
1869 * directory replay has two parts. There are the standard directory
1870 * items in the log copied from the subvolume, and range items
1871 * created in the log while the subvolume was logged.
1873 * The range items tell us which parts of the key space the log
1874 * is authoritative for. During replay, if a key in the subvolume
1875 * directory is in a logged range item, but not actually in the log
1876 * that means it was deleted from the directory before the fsync
1877 * and should be removed.
1879 static noinline
int find_dir_range(struct btrfs_root
*root
,
1880 struct btrfs_path
*path
,
1881 u64 dirid
, int key_type
,
1882 u64
*start_ret
, u64
*end_ret
)
1884 struct btrfs_key key
;
1886 struct btrfs_dir_log_item
*item
;
1890 if (*start_ret
== (u64
)-1)
1893 key
.objectid
= dirid
;
1894 key
.type
= key_type
;
1895 key
.offset
= *start_ret
;
1897 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1901 if (path
->slots
[0] == 0)
1906 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1908 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1912 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1913 struct btrfs_dir_log_item
);
1914 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1916 if (*start_ret
>= key
.offset
&& *start_ret
<= found_end
) {
1918 *start_ret
= key
.offset
;
1919 *end_ret
= found_end
;
1924 /* check the next slot in the tree to see if it is a valid item */
1925 nritems
= btrfs_header_nritems(path
->nodes
[0]);
1926 if (path
->slots
[0] >= nritems
) {
1927 ret
= btrfs_next_leaf(root
, path
);
1934 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1936 if (key
.type
!= key_type
|| key
.objectid
!= dirid
) {
1940 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1941 struct btrfs_dir_log_item
);
1942 found_end
= btrfs_dir_log_end(path
->nodes
[0], item
);
1943 *start_ret
= key
.offset
;
1944 *end_ret
= found_end
;
1947 btrfs_release_path(path
);
1952 * this looks for a given directory item in the log. If the directory
1953 * item is not in the log, the item is removed and the inode it points
1956 static noinline
int check_item_in_log(struct btrfs_trans_handle
*trans
,
1957 struct btrfs_root
*root
,
1958 struct btrfs_root
*log
,
1959 struct btrfs_path
*path
,
1960 struct btrfs_path
*log_path
,
1962 struct btrfs_key
*dir_key
)
1965 struct extent_buffer
*eb
;
1968 struct btrfs_dir_item
*di
;
1969 struct btrfs_dir_item
*log_di
;
1972 unsigned long ptr_end
;
1974 struct inode
*inode
;
1975 struct btrfs_key location
;
1978 eb
= path
->nodes
[0];
1979 slot
= path
->slots
[0];
1980 item_size
= btrfs_item_size_nr(eb
, slot
);
1981 ptr
= btrfs_item_ptr_offset(eb
, slot
);
1982 ptr_end
= ptr
+ item_size
;
1983 while (ptr
< ptr_end
) {
1984 di
= (struct btrfs_dir_item
*)ptr
;
1985 if (verify_dir_item(root
, eb
, di
)) {
1990 name_len
= btrfs_dir_name_len(eb
, di
);
1991 name
= kmalloc(name_len
, GFP_NOFS
);
1996 read_extent_buffer(eb
, name
, (unsigned long)(di
+ 1),
1999 if (log
&& dir_key
->type
== BTRFS_DIR_ITEM_KEY
) {
2000 log_di
= btrfs_lookup_dir_item(trans
, log
, log_path
,
2003 } else if (log
&& dir_key
->type
== BTRFS_DIR_INDEX_KEY
) {
2004 log_di
= btrfs_lookup_dir_index_item(trans
, log
,
2010 if (!log_di
|| (IS_ERR(log_di
) && PTR_ERR(log_di
) == -ENOENT
)) {
2011 btrfs_dir_item_key_to_cpu(eb
, di
, &location
);
2012 btrfs_release_path(path
);
2013 btrfs_release_path(log_path
);
2014 inode
= read_one_inode(root
, location
.objectid
);
2020 ret
= link_to_fixup_dir(trans
, root
,
2021 path
, location
.objectid
);
2029 ret
= btrfs_unlink_inode(trans
, root
, dir
, inode
,
2032 ret
= btrfs_run_delayed_items(trans
, root
);
2038 /* there might still be more names under this key
2039 * check and repeat if required
2041 ret
= btrfs_search_slot(NULL
, root
, dir_key
, path
,
2047 } else if (IS_ERR(log_di
)) {
2049 return PTR_ERR(log_di
);
2051 btrfs_release_path(log_path
);
2054 ptr
= (unsigned long)(di
+ 1);
2059 btrfs_release_path(path
);
2060 btrfs_release_path(log_path
);
2064 static int replay_xattr_deletes(struct btrfs_trans_handle
*trans
,
2065 struct btrfs_root
*root
,
2066 struct btrfs_root
*log
,
2067 struct btrfs_path
*path
,
2070 struct btrfs_key search_key
;
2071 struct btrfs_path
*log_path
;
2076 log_path
= btrfs_alloc_path();
2080 search_key
.objectid
= ino
;
2081 search_key
.type
= BTRFS_XATTR_ITEM_KEY
;
2082 search_key
.offset
= 0;
2084 ret
= btrfs_search_slot(NULL
, root
, &search_key
, path
, 0, 0);
2088 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2089 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
2090 struct btrfs_key key
;
2091 struct btrfs_dir_item
*di
;
2092 struct btrfs_dir_item
*log_di
;
2096 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, i
);
2097 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
) {
2102 di
= btrfs_item_ptr(path
->nodes
[0], i
, struct btrfs_dir_item
);
2103 total_size
= btrfs_item_size_nr(path
->nodes
[0], i
);
2105 while (cur
< total_size
) {
2106 u16 name_len
= btrfs_dir_name_len(path
->nodes
[0], di
);
2107 u16 data_len
= btrfs_dir_data_len(path
->nodes
[0], di
);
2108 u32 this_len
= sizeof(*di
) + name_len
+ data_len
;
2111 name
= kmalloc(name_len
, GFP_NOFS
);
2116 read_extent_buffer(path
->nodes
[0], name
,
2117 (unsigned long)(di
+ 1), name_len
);
2119 log_di
= btrfs_lookup_xattr(NULL
, log
, log_path
, ino
,
2121 btrfs_release_path(log_path
);
2123 /* Doesn't exist in log tree, so delete it. */
2124 btrfs_release_path(path
);
2125 di
= btrfs_lookup_xattr(trans
, root
, path
, ino
,
2126 name
, name_len
, -1);
2133 ret
= btrfs_delete_one_dir_name(trans
, root
,
2137 btrfs_release_path(path
);
2142 if (IS_ERR(log_di
)) {
2143 ret
= PTR_ERR(log_di
);
2147 di
= (struct btrfs_dir_item
*)((char *)di
+ this_len
);
2150 ret
= btrfs_next_leaf(root
, path
);
2156 btrfs_free_path(log_path
);
2157 btrfs_release_path(path
);
2163 * deletion replay happens before we copy any new directory items
2164 * out of the log or out of backreferences from inodes. It
2165 * scans the log to find ranges of keys that log is authoritative for,
2166 * and then scans the directory to find items in those ranges that are
2167 * not present in the log.
2169 * Anything we don't find in the log is unlinked and removed from the
2172 static noinline
int replay_dir_deletes(struct btrfs_trans_handle
*trans
,
2173 struct btrfs_root
*root
,
2174 struct btrfs_root
*log
,
2175 struct btrfs_path
*path
,
2176 u64 dirid
, int del_all
)
2180 int key_type
= BTRFS_DIR_LOG_ITEM_KEY
;
2182 struct btrfs_key dir_key
;
2183 struct btrfs_key found_key
;
2184 struct btrfs_path
*log_path
;
2187 dir_key
.objectid
= dirid
;
2188 dir_key
.type
= BTRFS_DIR_ITEM_KEY
;
2189 log_path
= btrfs_alloc_path();
2193 dir
= read_one_inode(root
, dirid
);
2194 /* it isn't an error if the inode isn't there, that can happen
2195 * because we replay the deletes before we copy in the inode item
2199 btrfs_free_path(log_path
);
2207 range_end
= (u64
)-1;
2209 ret
= find_dir_range(log
, path
, dirid
, key_type
,
2210 &range_start
, &range_end
);
2215 dir_key
.offset
= range_start
;
2218 ret
= btrfs_search_slot(NULL
, root
, &dir_key
, path
,
2223 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2224 if (path
->slots
[0] >= nritems
) {
2225 ret
= btrfs_next_leaf(root
, path
);
2229 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2231 if (found_key
.objectid
!= dirid
||
2232 found_key
.type
!= dir_key
.type
)
2235 if (found_key
.offset
> range_end
)
2238 ret
= check_item_in_log(trans
, root
, log
, path
,
2243 if (found_key
.offset
== (u64
)-1)
2245 dir_key
.offset
= found_key
.offset
+ 1;
2247 btrfs_release_path(path
);
2248 if (range_end
== (u64
)-1)
2250 range_start
= range_end
+ 1;
2255 if (key_type
== BTRFS_DIR_LOG_ITEM_KEY
) {
2256 key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
2257 dir_key
.type
= BTRFS_DIR_INDEX_KEY
;
2258 btrfs_release_path(path
);
2262 btrfs_release_path(path
);
2263 btrfs_free_path(log_path
);
2269 * the process_func used to replay items from the log tree. This
2270 * gets called in two different stages. The first stage just looks
2271 * for inodes and makes sure they are all copied into the subvolume.
2273 * The second stage copies all the other item types from the log into
2274 * the subvolume. The two stage approach is slower, but gets rid of
2275 * lots of complexity around inodes referencing other inodes that exist
2276 * only in the log (references come from either directory items or inode
2279 static int replay_one_buffer(struct btrfs_root
*log
, struct extent_buffer
*eb
,
2280 struct walk_control
*wc
, u64 gen
)
2283 struct btrfs_path
*path
;
2284 struct btrfs_root
*root
= wc
->replay_dest
;
2285 struct btrfs_key key
;
2290 ret
= btrfs_read_buffer(eb
, gen
);
2294 level
= btrfs_header_level(eb
);
2299 path
= btrfs_alloc_path();
2303 nritems
= btrfs_header_nritems(eb
);
2304 for (i
= 0; i
< nritems
; i
++) {
2305 btrfs_item_key_to_cpu(eb
, &key
, i
);
2307 /* inode keys are done during the first stage */
2308 if (key
.type
== BTRFS_INODE_ITEM_KEY
&&
2309 wc
->stage
== LOG_WALK_REPLAY_INODES
) {
2310 struct btrfs_inode_item
*inode_item
;
2313 inode_item
= btrfs_item_ptr(eb
, i
,
2314 struct btrfs_inode_item
);
2315 ret
= replay_xattr_deletes(wc
->trans
, root
, log
,
2316 path
, key
.objectid
);
2319 mode
= btrfs_inode_mode(eb
, inode_item
);
2320 if (S_ISDIR(mode
)) {
2321 ret
= replay_dir_deletes(wc
->trans
,
2322 root
, log
, path
, key
.objectid
, 0);
2326 ret
= overwrite_item(wc
->trans
, root
, path
,
2331 /* for regular files, make sure corresponding
2332 * orhpan item exist. extents past the new EOF
2333 * will be truncated later by orphan cleanup.
2335 if (S_ISREG(mode
)) {
2336 ret
= insert_orphan_item(wc
->trans
, root
,
2342 ret
= link_to_fixup_dir(wc
->trans
, root
,
2343 path
, key
.objectid
);
2348 if (key
.type
== BTRFS_DIR_INDEX_KEY
&&
2349 wc
->stage
== LOG_WALK_REPLAY_DIR_INDEX
) {
2350 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2356 if (wc
->stage
< LOG_WALK_REPLAY_ALL
)
2359 /* these keys are simply copied */
2360 if (key
.type
== BTRFS_XATTR_ITEM_KEY
) {
2361 ret
= overwrite_item(wc
->trans
, root
, path
,
2365 } else if (key
.type
== BTRFS_INODE_REF_KEY
||
2366 key
.type
== BTRFS_INODE_EXTREF_KEY
) {
2367 ret
= add_inode_ref(wc
->trans
, root
, log
, path
,
2369 if (ret
&& ret
!= -ENOENT
)
2372 } else if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
2373 ret
= replay_one_extent(wc
->trans
, root
, path
,
2377 } else if (key
.type
== BTRFS_DIR_ITEM_KEY
) {
2378 ret
= replay_one_dir_item(wc
->trans
, root
, path
,
2384 btrfs_free_path(path
);
2388 static noinline
int walk_down_log_tree(struct btrfs_trans_handle
*trans
,
2389 struct btrfs_root
*root
,
2390 struct btrfs_path
*path
, int *level
,
2391 struct walk_control
*wc
)
2396 struct extent_buffer
*next
;
2397 struct extent_buffer
*cur
;
2398 struct extent_buffer
*parent
;
2402 WARN_ON(*level
< 0);
2403 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2405 while (*level
> 0) {
2406 WARN_ON(*level
< 0);
2407 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2408 cur
= path
->nodes
[*level
];
2410 WARN_ON(btrfs_header_level(cur
) != *level
);
2412 if (path
->slots
[*level
] >=
2413 btrfs_header_nritems(cur
))
2416 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2417 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2418 blocksize
= root
->nodesize
;
2420 parent
= path
->nodes
[*level
];
2421 root_owner
= btrfs_header_owner(parent
);
2423 next
= btrfs_find_create_tree_block(root
, bytenr
);
2428 ret
= wc
->process_func(root
, next
, wc
, ptr_gen
);
2430 free_extent_buffer(next
);
2434 path
->slots
[*level
]++;
2436 ret
= btrfs_read_buffer(next
, ptr_gen
);
2438 free_extent_buffer(next
);
2443 btrfs_tree_lock(next
);
2444 btrfs_set_lock_blocking(next
);
2445 clean_tree_block(trans
, root
->fs_info
,
2447 btrfs_wait_tree_block_writeback(next
);
2448 btrfs_tree_unlock(next
);
2451 WARN_ON(root_owner
!=
2452 BTRFS_TREE_LOG_OBJECTID
);
2453 ret
= btrfs_free_and_pin_reserved_extent(root
,
2456 free_extent_buffer(next
);
2460 free_extent_buffer(next
);
2463 ret
= btrfs_read_buffer(next
, ptr_gen
);
2465 free_extent_buffer(next
);
2469 WARN_ON(*level
<= 0);
2470 if (path
->nodes
[*level
-1])
2471 free_extent_buffer(path
->nodes
[*level
-1]);
2472 path
->nodes
[*level
-1] = next
;
2473 *level
= btrfs_header_level(next
);
2474 path
->slots
[*level
] = 0;
2477 WARN_ON(*level
< 0);
2478 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2480 path
->slots
[*level
] = btrfs_header_nritems(path
->nodes
[*level
]);
2486 static noinline
int walk_up_log_tree(struct btrfs_trans_handle
*trans
,
2487 struct btrfs_root
*root
,
2488 struct btrfs_path
*path
, int *level
,
2489 struct walk_control
*wc
)
2496 for (i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2497 slot
= path
->slots
[i
];
2498 if (slot
+ 1 < btrfs_header_nritems(path
->nodes
[i
])) {
2501 WARN_ON(*level
== 0);
2504 struct extent_buffer
*parent
;
2505 if (path
->nodes
[*level
] == root
->node
)
2506 parent
= path
->nodes
[*level
];
2508 parent
= path
->nodes
[*level
+ 1];
2510 root_owner
= btrfs_header_owner(parent
);
2511 ret
= wc
->process_func(root
, path
->nodes
[*level
], wc
,
2512 btrfs_header_generation(path
->nodes
[*level
]));
2517 struct extent_buffer
*next
;
2519 next
= path
->nodes
[*level
];
2522 btrfs_tree_lock(next
);
2523 btrfs_set_lock_blocking(next
);
2524 clean_tree_block(trans
, root
->fs_info
,
2526 btrfs_wait_tree_block_writeback(next
);
2527 btrfs_tree_unlock(next
);
2530 WARN_ON(root_owner
!= BTRFS_TREE_LOG_OBJECTID
);
2531 ret
= btrfs_free_and_pin_reserved_extent(root
,
2532 path
->nodes
[*level
]->start
,
2533 path
->nodes
[*level
]->len
);
2537 free_extent_buffer(path
->nodes
[*level
]);
2538 path
->nodes
[*level
] = NULL
;
2546 * drop the reference count on the tree rooted at 'snap'. This traverses
2547 * the tree freeing any blocks that have a ref count of zero after being
2550 static int walk_log_tree(struct btrfs_trans_handle
*trans
,
2551 struct btrfs_root
*log
, struct walk_control
*wc
)
2556 struct btrfs_path
*path
;
2559 path
= btrfs_alloc_path();
2563 level
= btrfs_header_level(log
->node
);
2565 path
->nodes
[level
] = log
->node
;
2566 extent_buffer_get(log
->node
);
2567 path
->slots
[level
] = 0;
2570 wret
= walk_down_log_tree(trans
, log
, path
, &level
, wc
);
2578 wret
= walk_up_log_tree(trans
, log
, path
, &level
, wc
);
2587 /* was the root node processed? if not, catch it here */
2588 if (path
->nodes
[orig_level
]) {
2589 ret
= wc
->process_func(log
, path
->nodes
[orig_level
], wc
,
2590 btrfs_header_generation(path
->nodes
[orig_level
]));
2594 struct extent_buffer
*next
;
2596 next
= path
->nodes
[orig_level
];
2599 btrfs_tree_lock(next
);
2600 btrfs_set_lock_blocking(next
);
2601 clean_tree_block(trans
, log
->fs_info
, next
);
2602 btrfs_wait_tree_block_writeback(next
);
2603 btrfs_tree_unlock(next
);
2606 WARN_ON(log
->root_key
.objectid
!=
2607 BTRFS_TREE_LOG_OBJECTID
);
2608 ret
= btrfs_free_and_pin_reserved_extent(log
, next
->start
,
2616 btrfs_free_path(path
);
2621 * helper function to update the item for a given subvolumes log root
2622 * in the tree of log roots
2624 static int update_log_root(struct btrfs_trans_handle
*trans
,
2625 struct btrfs_root
*log
)
2629 if (log
->log_transid
== 1) {
2630 /* insert root item on the first sync */
2631 ret
= btrfs_insert_root(trans
, log
->fs_info
->log_root_tree
,
2632 &log
->root_key
, &log
->root_item
);
2634 ret
= btrfs_update_root(trans
, log
->fs_info
->log_root_tree
,
2635 &log
->root_key
, &log
->root_item
);
2640 static void wait_log_commit(struct btrfs_root
*root
, int transid
)
2643 int index
= transid
% 2;
2646 * we only allow two pending log transactions at a time,
2647 * so we know that if ours is more than 2 older than the
2648 * current transaction, we're done
2651 prepare_to_wait(&root
->log_commit_wait
[index
],
2652 &wait
, TASK_UNINTERRUPTIBLE
);
2653 mutex_unlock(&root
->log_mutex
);
2655 if (root
->log_transid_committed
< transid
&&
2656 atomic_read(&root
->log_commit
[index
]))
2659 finish_wait(&root
->log_commit_wait
[index
], &wait
);
2660 mutex_lock(&root
->log_mutex
);
2661 } while (root
->log_transid_committed
< transid
&&
2662 atomic_read(&root
->log_commit
[index
]));
2665 static void wait_for_writer(struct btrfs_root
*root
)
2669 while (atomic_read(&root
->log_writers
)) {
2670 prepare_to_wait(&root
->log_writer_wait
,
2671 &wait
, TASK_UNINTERRUPTIBLE
);
2672 mutex_unlock(&root
->log_mutex
);
2673 if (atomic_read(&root
->log_writers
))
2675 finish_wait(&root
->log_writer_wait
, &wait
);
2676 mutex_lock(&root
->log_mutex
);
2680 static inline void btrfs_remove_log_ctx(struct btrfs_root
*root
,
2681 struct btrfs_log_ctx
*ctx
)
2686 mutex_lock(&root
->log_mutex
);
2687 list_del_init(&ctx
->list
);
2688 mutex_unlock(&root
->log_mutex
);
2692 * Invoked in log mutex context, or be sure there is no other task which
2693 * can access the list.
2695 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root
*root
,
2696 int index
, int error
)
2698 struct btrfs_log_ctx
*ctx
;
2701 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
2705 list_for_each_entry(ctx
, &root
->log_ctxs
[index
], list
)
2706 ctx
->log_ret
= error
;
2708 INIT_LIST_HEAD(&root
->log_ctxs
[index
]);
2712 * btrfs_sync_log does sends a given tree log down to the disk and
2713 * updates the super blocks to record it. When this call is done,
2714 * you know that any inodes previously logged are safely on disk only
2717 * Any other return value means you need to call btrfs_commit_transaction.
2718 * Some of the edge cases for fsyncing directories that have had unlinks
2719 * or renames done in the past mean that sometimes the only safe
2720 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2721 * that has happened.
2723 int btrfs_sync_log(struct btrfs_trans_handle
*trans
,
2724 struct btrfs_root
*root
, struct btrfs_log_ctx
*ctx
)
2730 struct btrfs_root
*log
= root
->log_root
;
2731 struct btrfs_root
*log_root_tree
= root
->fs_info
->log_root_tree
;
2732 int log_transid
= 0;
2733 struct btrfs_log_ctx root_log_ctx
;
2734 struct blk_plug plug
;
2736 mutex_lock(&root
->log_mutex
);
2737 log_transid
= ctx
->log_transid
;
2738 if (root
->log_transid_committed
>= log_transid
) {
2739 mutex_unlock(&root
->log_mutex
);
2740 return ctx
->log_ret
;
2743 index1
= log_transid
% 2;
2744 if (atomic_read(&root
->log_commit
[index1
])) {
2745 wait_log_commit(root
, log_transid
);
2746 mutex_unlock(&root
->log_mutex
);
2747 return ctx
->log_ret
;
2749 ASSERT(log_transid
== root
->log_transid
);
2750 atomic_set(&root
->log_commit
[index1
], 1);
2752 /* wait for previous tree log sync to complete */
2753 if (atomic_read(&root
->log_commit
[(index1
+ 1) % 2]))
2754 wait_log_commit(root
, log_transid
- 1);
2757 int batch
= atomic_read(&root
->log_batch
);
2758 /* when we're on an ssd, just kick the log commit out */
2759 if (!btrfs_test_opt(root
, SSD
) &&
2760 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS
, &root
->state
)) {
2761 mutex_unlock(&root
->log_mutex
);
2762 schedule_timeout_uninterruptible(1);
2763 mutex_lock(&root
->log_mutex
);
2765 wait_for_writer(root
);
2766 if (batch
== atomic_read(&root
->log_batch
))
2770 /* bail out if we need to do a full commit */
2771 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
2773 btrfs_free_logged_extents(log
, log_transid
);
2774 mutex_unlock(&root
->log_mutex
);
2778 if (log_transid
% 2 == 0)
2779 mark
= EXTENT_DIRTY
;
2783 /* we start IO on all the marked extents here, but we don't actually
2784 * wait for them until later.
2786 blk_start_plug(&plug
);
2787 ret
= btrfs_write_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2789 blk_finish_plug(&plug
);
2790 btrfs_abort_transaction(trans
, root
, ret
);
2791 btrfs_free_logged_extents(log
, log_transid
);
2792 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2793 mutex_unlock(&root
->log_mutex
);
2797 btrfs_set_root_node(&log
->root_item
, log
->node
);
2799 root
->log_transid
++;
2800 log
->log_transid
= root
->log_transid
;
2801 root
->log_start_pid
= 0;
2803 * IO has been started, blocks of the log tree have WRITTEN flag set
2804 * in their headers. new modifications of the log will be written to
2805 * new positions. so it's safe to allow log writers to go in.
2807 mutex_unlock(&root
->log_mutex
);
2809 btrfs_init_log_ctx(&root_log_ctx
);
2811 mutex_lock(&log_root_tree
->log_mutex
);
2812 atomic_inc(&log_root_tree
->log_batch
);
2813 atomic_inc(&log_root_tree
->log_writers
);
2815 index2
= log_root_tree
->log_transid
% 2;
2816 list_add_tail(&root_log_ctx
.list
, &log_root_tree
->log_ctxs
[index2
]);
2817 root_log_ctx
.log_transid
= log_root_tree
->log_transid
;
2819 mutex_unlock(&log_root_tree
->log_mutex
);
2821 ret
= update_log_root(trans
, log
);
2823 mutex_lock(&log_root_tree
->log_mutex
);
2824 if (atomic_dec_and_test(&log_root_tree
->log_writers
)) {
2826 * Implicit memory barrier after atomic_dec_and_test
2828 if (waitqueue_active(&log_root_tree
->log_writer_wait
))
2829 wake_up(&log_root_tree
->log_writer_wait
);
2833 if (!list_empty(&root_log_ctx
.list
))
2834 list_del_init(&root_log_ctx
.list
);
2836 blk_finish_plug(&plug
);
2837 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2839 if (ret
!= -ENOSPC
) {
2840 btrfs_abort_transaction(trans
, root
, ret
);
2841 mutex_unlock(&log_root_tree
->log_mutex
);
2844 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2845 btrfs_free_logged_extents(log
, log_transid
);
2846 mutex_unlock(&log_root_tree
->log_mutex
);
2851 if (log_root_tree
->log_transid_committed
>= root_log_ctx
.log_transid
) {
2852 blk_finish_plug(&plug
);
2853 mutex_unlock(&log_root_tree
->log_mutex
);
2854 ret
= root_log_ctx
.log_ret
;
2858 index2
= root_log_ctx
.log_transid
% 2;
2859 if (atomic_read(&log_root_tree
->log_commit
[index2
])) {
2860 blk_finish_plug(&plug
);
2861 ret
= btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
,
2863 btrfs_wait_logged_extents(trans
, log
, log_transid
);
2864 wait_log_commit(log_root_tree
,
2865 root_log_ctx
.log_transid
);
2866 mutex_unlock(&log_root_tree
->log_mutex
);
2868 ret
= root_log_ctx
.log_ret
;
2871 ASSERT(root_log_ctx
.log_transid
== log_root_tree
->log_transid
);
2872 atomic_set(&log_root_tree
->log_commit
[index2
], 1);
2874 if (atomic_read(&log_root_tree
->log_commit
[(index2
+ 1) % 2])) {
2875 wait_log_commit(log_root_tree
,
2876 root_log_ctx
.log_transid
- 1);
2879 wait_for_writer(log_root_tree
);
2882 * now that we've moved on to the tree of log tree roots,
2883 * check the full commit flag again
2885 if (btrfs_need_log_full_commit(root
->fs_info
, trans
)) {
2886 blk_finish_plug(&plug
);
2887 btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2888 btrfs_free_logged_extents(log
, log_transid
);
2889 mutex_unlock(&log_root_tree
->log_mutex
);
2891 goto out_wake_log_root
;
2894 ret
= btrfs_write_marked_extents(log_root_tree
,
2895 &log_root_tree
->dirty_log_pages
,
2896 EXTENT_DIRTY
| EXTENT_NEW
);
2897 blk_finish_plug(&plug
);
2899 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2900 btrfs_abort_transaction(trans
, root
, ret
);
2901 btrfs_free_logged_extents(log
, log_transid
);
2902 mutex_unlock(&log_root_tree
->log_mutex
);
2903 goto out_wake_log_root
;
2905 ret
= btrfs_wait_marked_extents(log
, &log
->dirty_log_pages
, mark
);
2907 ret
= btrfs_wait_marked_extents(log_root_tree
,
2908 &log_root_tree
->dirty_log_pages
,
2909 EXTENT_NEW
| EXTENT_DIRTY
);
2911 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2912 btrfs_free_logged_extents(log
, log_transid
);
2913 mutex_unlock(&log_root_tree
->log_mutex
);
2914 goto out_wake_log_root
;
2916 btrfs_wait_logged_extents(trans
, log
, log_transid
);
2918 btrfs_set_super_log_root(root
->fs_info
->super_for_commit
,
2919 log_root_tree
->node
->start
);
2920 btrfs_set_super_log_root_level(root
->fs_info
->super_for_commit
,
2921 btrfs_header_level(log_root_tree
->node
));
2923 log_root_tree
->log_transid
++;
2924 mutex_unlock(&log_root_tree
->log_mutex
);
2927 * nobody else is going to jump in and write the the ctree
2928 * super here because the log_commit atomic below is protecting
2929 * us. We must be called with a transaction handle pinning
2930 * the running transaction open, so a full commit can't hop
2931 * in and cause problems either.
2933 ret
= write_ctree_super(trans
, root
->fs_info
->tree_root
, 1);
2935 btrfs_set_log_full_commit(root
->fs_info
, trans
);
2936 btrfs_abort_transaction(trans
, root
, ret
);
2937 goto out_wake_log_root
;
2940 mutex_lock(&root
->log_mutex
);
2941 if (root
->last_log_commit
< log_transid
)
2942 root
->last_log_commit
= log_transid
;
2943 mutex_unlock(&root
->log_mutex
);
2947 * We needn't get log_mutex here because we are sure all
2948 * the other tasks are blocked.
2950 btrfs_remove_all_log_ctxs(log_root_tree
, index2
, ret
);
2952 mutex_lock(&log_root_tree
->log_mutex
);
2953 log_root_tree
->log_transid_committed
++;
2954 atomic_set(&log_root_tree
->log_commit
[index2
], 0);
2955 mutex_unlock(&log_root_tree
->log_mutex
);
2958 * The barrier before waitqueue_active is implied by mutex_unlock
2960 if (waitqueue_active(&log_root_tree
->log_commit_wait
[index2
]))
2961 wake_up(&log_root_tree
->log_commit_wait
[index2
]);
2964 btrfs_remove_all_log_ctxs(root
, index1
, ret
);
2966 mutex_lock(&root
->log_mutex
);
2967 root
->log_transid_committed
++;
2968 atomic_set(&root
->log_commit
[index1
], 0);
2969 mutex_unlock(&root
->log_mutex
);
2972 * The barrier before waitqueue_active is implied by mutex_unlock
2974 if (waitqueue_active(&root
->log_commit_wait
[index1
]))
2975 wake_up(&root
->log_commit_wait
[index1
]);
2979 static void free_log_tree(struct btrfs_trans_handle
*trans
,
2980 struct btrfs_root
*log
)
2985 struct walk_control wc
= {
2987 .process_func
= process_one_buffer
2990 ret
= walk_log_tree(trans
, log
, &wc
);
2991 /* I don't think this can happen but just in case */
2993 btrfs_abort_transaction(trans
, log
, ret
);
2996 ret
= find_first_extent_bit(&log
->dirty_log_pages
,
2997 0, &start
, &end
, EXTENT_DIRTY
| EXTENT_NEW
,
3002 clear_extent_bits(&log
->dirty_log_pages
, start
, end
,
3003 EXTENT_DIRTY
| EXTENT_NEW
, GFP_NOFS
);
3007 * We may have short-circuited the log tree with the full commit logic
3008 * and left ordered extents on our list, so clear these out to keep us
3009 * from leaking inodes and memory.
3011 btrfs_free_logged_extents(log
, 0);
3012 btrfs_free_logged_extents(log
, 1);
3014 free_extent_buffer(log
->node
);
3019 * free all the extents used by the tree log. This should be called
3020 * at commit time of the full transaction
3022 int btrfs_free_log(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
)
3024 if (root
->log_root
) {
3025 free_log_tree(trans
, root
->log_root
);
3026 root
->log_root
= NULL
;
3031 int btrfs_free_log_root_tree(struct btrfs_trans_handle
*trans
,
3032 struct btrfs_fs_info
*fs_info
)
3034 if (fs_info
->log_root_tree
) {
3035 free_log_tree(trans
, fs_info
->log_root_tree
);
3036 fs_info
->log_root_tree
= NULL
;
3042 * If both a file and directory are logged, and unlinks or renames are
3043 * mixed in, we have a few interesting corners:
3045 * create file X in dir Y
3046 * link file X to X.link in dir Y
3048 * unlink file X but leave X.link
3051 * After a crash we would expect only X.link to exist. But file X
3052 * didn't get fsync'd again so the log has back refs for X and X.link.
3054 * We solve this by removing directory entries and inode backrefs from the
3055 * log when a file that was logged in the current transaction is
3056 * unlinked. Any later fsync will include the updated log entries, and
3057 * we'll be able to reconstruct the proper directory items from backrefs.
3059 * This optimizations allows us to avoid relogging the entire inode
3060 * or the entire directory.
3062 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle
*trans
,
3063 struct btrfs_root
*root
,
3064 const char *name
, int name_len
,
3065 struct inode
*dir
, u64 index
)
3067 struct btrfs_root
*log
;
3068 struct btrfs_dir_item
*di
;
3069 struct btrfs_path
*path
;
3073 u64 dir_ino
= btrfs_ino(dir
);
3075 if (BTRFS_I(dir
)->logged_trans
< trans
->transid
)
3078 ret
= join_running_log_trans(root
);
3082 mutex_lock(&BTRFS_I(dir
)->log_mutex
);
3084 log
= root
->log_root
;
3085 path
= btrfs_alloc_path();
3091 di
= btrfs_lookup_dir_item(trans
, log
, path
, dir_ino
,
3092 name
, name_len
, -1);
3098 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3099 bytes_del
+= name_len
;
3105 btrfs_release_path(path
);
3106 di
= btrfs_lookup_dir_index_item(trans
, log
, path
, dir_ino
,
3107 index
, name
, name_len
, -1);
3113 ret
= btrfs_delete_one_dir_name(trans
, log
, path
, di
);
3114 bytes_del
+= name_len
;
3121 /* update the directory size in the log to reflect the names
3125 struct btrfs_key key
;
3127 key
.objectid
= dir_ino
;
3129 key
.type
= BTRFS_INODE_ITEM_KEY
;
3130 btrfs_release_path(path
);
3132 ret
= btrfs_search_slot(trans
, log
, &key
, path
, 0, 1);
3138 struct btrfs_inode_item
*item
;
3141 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3142 struct btrfs_inode_item
);
3143 i_size
= btrfs_inode_size(path
->nodes
[0], item
);
3144 if (i_size
> bytes_del
)
3145 i_size
-= bytes_del
;
3148 btrfs_set_inode_size(path
->nodes
[0], item
, i_size
);
3149 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3152 btrfs_release_path(path
);
3155 btrfs_free_path(path
);
3157 mutex_unlock(&BTRFS_I(dir
)->log_mutex
);
3158 if (ret
== -ENOSPC
) {
3159 btrfs_set_log_full_commit(root
->fs_info
, trans
);
3162 btrfs_abort_transaction(trans
, root
, ret
);
3164 btrfs_end_log_trans(root
);
3169 /* see comments for btrfs_del_dir_entries_in_log */
3170 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle
*trans
,
3171 struct btrfs_root
*root
,
3172 const char *name
, int name_len
,
3173 struct inode
*inode
, u64 dirid
)
3175 struct btrfs_root
*log
;
3179 if (BTRFS_I(inode
)->logged_trans
< trans
->transid
)
3182 ret
= join_running_log_trans(root
);
3185 log
= root
->log_root
;
3186 mutex_lock(&BTRFS_I(inode
)->log_mutex
);
3188 ret
= btrfs_del_inode_ref(trans
, log
, name
, name_len
, btrfs_ino(inode
),
3190 mutex_unlock(&BTRFS_I(inode
)->log_mutex
);
3191 if (ret
== -ENOSPC
) {
3192 btrfs_set_log_full_commit(root
->fs_info
, trans
);
3194 } else if (ret
< 0 && ret
!= -ENOENT
)
3195 btrfs_abort_transaction(trans
, root
, ret
);
3196 btrfs_end_log_trans(root
);
3202 * creates a range item in the log for 'dirid'. first_offset and
3203 * last_offset tell us which parts of the key space the log should
3204 * be considered authoritative for.
3206 static noinline
int insert_dir_log_key(struct btrfs_trans_handle
*trans
,
3207 struct btrfs_root
*log
,
3208 struct btrfs_path
*path
,
3209 int key_type
, u64 dirid
,
3210 u64 first_offset
, u64 last_offset
)
3213 struct btrfs_key key
;
3214 struct btrfs_dir_log_item
*item
;
3216 key
.objectid
= dirid
;
3217 key
.offset
= first_offset
;
3218 if (key_type
== BTRFS_DIR_ITEM_KEY
)
3219 key
.type
= BTRFS_DIR_LOG_ITEM_KEY
;
3221 key
.type
= BTRFS_DIR_LOG_INDEX_KEY
;
3222 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
, sizeof(*item
));
3226 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3227 struct btrfs_dir_log_item
);
3228 btrfs_set_dir_log_end(path
->nodes
[0], item
, last_offset
);
3229 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3230 btrfs_release_path(path
);
3235 * log all the items included in the current transaction for a given
3236 * directory. This also creates the range items in the log tree required
3237 * to replay anything deleted before the fsync
3239 static noinline
int log_dir_items(struct btrfs_trans_handle
*trans
,
3240 struct btrfs_root
*root
, struct inode
*inode
,
3241 struct btrfs_path
*path
,
3242 struct btrfs_path
*dst_path
, int key_type
,
3243 struct btrfs_log_ctx
*ctx
,
3244 u64 min_offset
, u64
*last_offset_ret
)
3246 struct btrfs_key min_key
;
3247 struct btrfs_root
*log
= root
->log_root
;
3248 struct extent_buffer
*src
;
3253 u64 first_offset
= min_offset
;
3254 u64 last_offset
= (u64
)-1;
3255 u64 ino
= btrfs_ino(inode
);
3257 log
= root
->log_root
;
3259 min_key
.objectid
= ino
;
3260 min_key
.type
= key_type
;
3261 min_key
.offset
= min_offset
;
3263 ret
= btrfs_search_forward(root
, &min_key
, path
, trans
->transid
);
3266 * we didn't find anything from this transaction, see if there
3267 * is anything at all
3269 if (ret
!= 0 || min_key
.objectid
!= ino
|| min_key
.type
!= key_type
) {
3270 min_key
.objectid
= ino
;
3271 min_key
.type
= key_type
;
3272 min_key
.offset
= (u64
)-1;
3273 btrfs_release_path(path
);
3274 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3276 btrfs_release_path(path
);
3279 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3281 /* if ret == 0 there are items for this type,
3282 * create a range to tell us the last key of this type.
3283 * otherwise, there are no items in this directory after
3284 * *min_offset, and we create a range to indicate that.
3287 struct btrfs_key tmp
;
3288 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
,
3290 if (key_type
== tmp
.type
)
3291 first_offset
= max(min_offset
, tmp
.offset
) + 1;
3296 /* go backward to find any previous key */
3297 ret
= btrfs_previous_item(root
, path
, ino
, key_type
);
3299 struct btrfs_key tmp
;
3300 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3301 if (key_type
== tmp
.type
) {
3302 first_offset
= tmp
.offset
;
3303 ret
= overwrite_item(trans
, log
, dst_path
,
3304 path
->nodes
[0], path
->slots
[0],
3312 btrfs_release_path(path
);
3314 /* find the first key from this transaction again */
3315 ret
= btrfs_search_slot(NULL
, root
, &min_key
, path
, 0, 0);
3316 if (WARN_ON(ret
!= 0))
3320 * we have a block from this transaction, log every item in it
3321 * from our directory
3324 struct btrfs_key tmp
;
3325 src
= path
->nodes
[0];
3326 nritems
= btrfs_header_nritems(src
);
3327 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
3328 struct btrfs_dir_item
*di
;
3330 btrfs_item_key_to_cpu(src
, &min_key
, i
);
3332 if (min_key
.objectid
!= ino
|| min_key
.type
!= key_type
)
3334 ret
= overwrite_item(trans
, log
, dst_path
, src
, i
,
3342 * We must make sure that when we log a directory entry,
3343 * the corresponding inode, after log replay, has a
3344 * matching link count. For example:
3350 * xfs_io -c "fsync" mydir
3352 * <mount fs and log replay>
3354 * Would result in a fsync log that when replayed, our
3355 * file inode would have a link count of 1, but we get
3356 * two directory entries pointing to the same inode.
3357 * After removing one of the names, it would not be
3358 * possible to remove the other name, which resulted
3359 * always in stale file handle errors, and would not
3360 * be possible to rmdir the parent directory, since
3361 * its i_size could never decrement to the value
3362 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3364 di
= btrfs_item_ptr(src
, i
, struct btrfs_dir_item
);
3365 btrfs_dir_item_key_to_cpu(src
, di
, &tmp
);
3367 (btrfs_dir_transid(src
, di
) == trans
->transid
||
3368 btrfs_dir_type(src
, di
) == BTRFS_FT_DIR
) &&
3369 tmp
.type
!= BTRFS_ROOT_ITEM_KEY
)
3370 ctx
->log_new_dentries
= true;
3372 path
->slots
[0] = nritems
;
3375 * look ahead to the next item and see if it is also
3376 * from this directory and from this transaction
3378 ret
= btrfs_next_leaf(root
, path
);
3380 last_offset
= (u64
)-1;
3383 btrfs_item_key_to_cpu(path
->nodes
[0], &tmp
, path
->slots
[0]);
3384 if (tmp
.objectid
!= ino
|| tmp
.type
!= key_type
) {
3385 last_offset
= (u64
)-1;
3388 if (btrfs_header_generation(path
->nodes
[0]) != trans
->transid
) {
3389 ret
= overwrite_item(trans
, log
, dst_path
,
3390 path
->nodes
[0], path
->slots
[0],
3395 last_offset
= tmp
.offset
;
3400 btrfs_release_path(path
);
3401 btrfs_release_path(dst_path
);
3404 *last_offset_ret
= last_offset
;
3406 * insert the log range keys to indicate where the log
3409 ret
= insert_dir_log_key(trans
, log
, path
, key_type
,
3410 ino
, first_offset
, last_offset
);
3418 * logging directories is very similar to logging inodes, We find all the items
3419 * from the current transaction and write them to the log.
3421 * The recovery code scans the directory in the subvolume, and if it finds a
3422 * key in the range logged that is not present in the log tree, then it means
3423 * that dir entry was unlinked during the transaction.
3425 * In order for that scan to work, we must include one key smaller than
3426 * the smallest logged by this transaction and one key larger than the largest
3427 * key logged by this transaction.
3429 static noinline
int log_directory_changes(struct btrfs_trans_handle
*trans
,
3430 struct btrfs_root
*root
, struct inode
*inode
,
3431 struct btrfs_path
*path
,
3432 struct btrfs_path
*dst_path
,
3433 struct btrfs_log_ctx
*ctx
)
3438 int key_type
= BTRFS_DIR_ITEM_KEY
;
3444 ret
= log_dir_items(trans
, root
, inode
, path
,
3445 dst_path
, key_type
, ctx
, min_key
,
3449 if (max_key
== (u64
)-1)
3451 min_key
= max_key
+ 1;
3454 if (key_type
== BTRFS_DIR_ITEM_KEY
) {
3455 key_type
= BTRFS_DIR_INDEX_KEY
;
3462 * a helper function to drop items from the log before we relog an
3463 * inode. max_key_type indicates the highest item type to remove.
3464 * This cannot be run for file data extents because it does not
3465 * free the extents they point to.
3467 static int drop_objectid_items(struct btrfs_trans_handle
*trans
,
3468 struct btrfs_root
*log
,
3469 struct btrfs_path
*path
,
3470 u64 objectid
, int max_key_type
)
3473 struct btrfs_key key
;
3474 struct btrfs_key found_key
;
3477 key
.objectid
= objectid
;
3478 key
.type
= max_key_type
;
3479 key
.offset
= (u64
)-1;
3482 ret
= btrfs_search_slot(trans
, log
, &key
, path
, -1, 1);
3483 BUG_ON(ret
== 0); /* Logic error */
3487 if (path
->slots
[0] == 0)
3491 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
3494 if (found_key
.objectid
!= objectid
)
3497 found_key
.offset
= 0;
3499 ret
= btrfs_bin_search(path
->nodes
[0], &found_key
, 0,
3502 ret
= btrfs_del_items(trans
, log
, path
, start_slot
,
3503 path
->slots
[0] - start_slot
+ 1);
3505 * If start slot isn't 0 then we don't need to re-search, we've
3506 * found the last guy with the objectid in this tree.
3508 if (ret
|| start_slot
!= 0)
3510 btrfs_release_path(path
);
3512 btrfs_release_path(path
);
3518 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3519 struct extent_buffer
*leaf
,
3520 struct btrfs_inode_item
*item
,
3521 struct inode
*inode
, int log_inode_only
,
3524 struct btrfs_map_token token
;
3526 btrfs_init_map_token(&token
);
3528 if (log_inode_only
) {
3529 /* set the generation to zero so the recover code
3530 * can tell the difference between an logging
3531 * just to say 'this inode exists' and a logging
3532 * to say 'update this inode with these values'
3534 btrfs_set_token_inode_generation(leaf
, item
, 0, &token
);
3535 btrfs_set_token_inode_size(leaf
, item
, logged_isize
, &token
);
3537 btrfs_set_token_inode_generation(leaf
, item
,
3538 BTRFS_I(inode
)->generation
,
3540 btrfs_set_token_inode_size(leaf
, item
, inode
->i_size
, &token
);
3543 btrfs_set_token_inode_uid(leaf
, item
, i_uid_read(inode
), &token
);
3544 btrfs_set_token_inode_gid(leaf
, item
, i_gid_read(inode
), &token
);
3545 btrfs_set_token_inode_mode(leaf
, item
, inode
->i_mode
, &token
);
3546 btrfs_set_token_inode_nlink(leaf
, item
, inode
->i_nlink
, &token
);
3548 btrfs_set_token_timespec_sec(leaf
, &item
->atime
,
3549 inode
->i_atime
.tv_sec
, &token
);
3550 btrfs_set_token_timespec_nsec(leaf
, &item
->atime
,
3551 inode
->i_atime
.tv_nsec
, &token
);
3553 btrfs_set_token_timespec_sec(leaf
, &item
->mtime
,
3554 inode
->i_mtime
.tv_sec
, &token
);
3555 btrfs_set_token_timespec_nsec(leaf
, &item
->mtime
,
3556 inode
->i_mtime
.tv_nsec
, &token
);
3558 btrfs_set_token_timespec_sec(leaf
, &item
->ctime
,
3559 inode
->i_ctime
.tv_sec
, &token
);
3560 btrfs_set_token_timespec_nsec(leaf
, &item
->ctime
,
3561 inode
->i_ctime
.tv_nsec
, &token
);
3563 btrfs_set_token_inode_nbytes(leaf
, item
, inode_get_bytes(inode
),
3566 btrfs_set_token_inode_sequence(leaf
, item
, inode
->i_version
, &token
);
3567 btrfs_set_token_inode_transid(leaf
, item
, trans
->transid
, &token
);
3568 btrfs_set_token_inode_rdev(leaf
, item
, inode
->i_rdev
, &token
);
3569 btrfs_set_token_inode_flags(leaf
, item
, BTRFS_I(inode
)->flags
, &token
);
3570 btrfs_set_token_inode_block_group(leaf
, item
, 0, &token
);
3573 static int log_inode_item(struct btrfs_trans_handle
*trans
,
3574 struct btrfs_root
*log
, struct btrfs_path
*path
,
3575 struct inode
*inode
)
3577 struct btrfs_inode_item
*inode_item
;
3580 ret
= btrfs_insert_empty_item(trans
, log
, path
,
3581 &BTRFS_I(inode
)->location
,
3582 sizeof(*inode_item
));
3583 if (ret
&& ret
!= -EEXIST
)
3585 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3586 struct btrfs_inode_item
);
3587 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
, 0, 0);
3588 btrfs_release_path(path
);
3592 static noinline
int copy_items(struct btrfs_trans_handle
*trans
,
3593 struct inode
*inode
,
3594 struct btrfs_path
*dst_path
,
3595 struct btrfs_path
*src_path
, u64
*last_extent
,
3596 int start_slot
, int nr
, int inode_only
,
3599 unsigned long src_offset
;
3600 unsigned long dst_offset
;
3601 struct btrfs_root
*log
= BTRFS_I(inode
)->root
->log_root
;
3602 struct btrfs_file_extent_item
*extent
;
3603 struct btrfs_inode_item
*inode_item
;
3604 struct extent_buffer
*src
= src_path
->nodes
[0];
3605 struct btrfs_key first_key
, last_key
, key
;
3607 struct btrfs_key
*ins_keys
;
3611 struct list_head ordered_sums
;
3612 int skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3613 bool has_extents
= false;
3614 bool need_find_last_extent
= true;
3617 INIT_LIST_HEAD(&ordered_sums
);
3619 ins_data
= kmalloc(nr
* sizeof(struct btrfs_key
) +
3620 nr
* sizeof(u32
), GFP_NOFS
);
3624 first_key
.objectid
= (u64
)-1;
3626 ins_sizes
= (u32
*)ins_data
;
3627 ins_keys
= (struct btrfs_key
*)(ins_data
+ nr
* sizeof(u32
));
3629 for (i
= 0; i
< nr
; i
++) {
3630 ins_sizes
[i
] = btrfs_item_size_nr(src
, i
+ start_slot
);
3631 btrfs_item_key_to_cpu(src
, ins_keys
+ i
, i
+ start_slot
);
3633 ret
= btrfs_insert_empty_items(trans
, log
, dst_path
,
3634 ins_keys
, ins_sizes
, nr
);
3640 for (i
= 0; i
< nr
; i
++, dst_path
->slots
[0]++) {
3641 dst_offset
= btrfs_item_ptr_offset(dst_path
->nodes
[0],
3642 dst_path
->slots
[0]);
3644 src_offset
= btrfs_item_ptr_offset(src
, start_slot
+ i
);
3646 if ((i
== (nr
- 1)))
3647 last_key
= ins_keys
[i
];
3649 if (ins_keys
[i
].type
== BTRFS_INODE_ITEM_KEY
) {
3650 inode_item
= btrfs_item_ptr(dst_path
->nodes
[0],
3652 struct btrfs_inode_item
);
3653 fill_inode_item(trans
, dst_path
->nodes
[0], inode_item
,
3654 inode
, inode_only
== LOG_INODE_EXISTS
,
3657 copy_extent_buffer(dst_path
->nodes
[0], src
, dst_offset
,
3658 src_offset
, ins_sizes
[i
]);
3662 * We set need_find_last_extent here in case we know we were
3663 * processing other items and then walk into the first extent in
3664 * the inode. If we don't hit an extent then nothing changes,
3665 * we'll do the last search the next time around.
3667 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
) {
3669 if (first_key
.objectid
== (u64
)-1)
3670 first_key
= ins_keys
[i
];
3672 need_find_last_extent
= false;
3675 /* take a reference on file data extents so that truncates
3676 * or deletes of this inode don't have to relog the inode
3679 if (ins_keys
[i
].type
== BTRFS_EXTENT_DATA_KEY
&&
3682 extent
= btrfs_item_ptr(src
, start_slot
+ i
,
3683 struct btrfs_file_extent_item
);
3685 if (btrfs_file_extent_generation(src
, extent
) < trans
->transid
)
3688 found_type
= btrfs_file_extent_type(src
, extent
);
3689 if (found_type
== BTRFS_FILE_EXTENT_REG
) {
3691 ds
= btrfs_file_extent_disk_bytenr(src
,
3693 /* ds == 0 is a hole */
3697 dl
= btrfs_file_extent_disk_num_bytes(src
,
3699 cs
= btrfs_file_extent_offset(src
, extent
);
3700 cl
= btrfs_file_extent_num_bytes(src
,
3702 if (btrfs_file_extent_compression(src
,
3708 ret
= btrfs_lookup_csums_range(
3709 log
->fs_info
->csum_root
,
3710 ds
+ cs
, ds
+ cs
+ cl
- 1,
3713 btrfs_release_path(dst_path
);
3721 btrfs_mark_buffer_dirty(dst_path
->nodes
[0]);
3722 btrfs_release_path(dst_path
);
3726 * we have to do this after the loop above to avoid changing the
3727 * log tree while trying to change the log tree.
3730 while (!list_empty(&ordered_sums
)) {
3731 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
3732 struct btrfs_ordered_sum
,
3735 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
3736 list_del(&sums
->list
);
3743 if (need_find_last_extent
&& *last_extent
== first_key
.offset
) {
3745 * We don't have any leafs between our current one and the one
3746 * we processed before that can have file extent items for our
3747 * inode (and have a generation number smaller than our current
3750 need_find_last_extent
= false;
3754 * Because we use btrfs_search_forward we could skip leaves that were
3755 * not modified and then assume *last_extent is valid when it really
3756 * isn't. So back up to the previous leaf and read the end of the last
3757 * extent before we go and fill in holes.
3759 if (need_find_last_extent
) {
3762 ret
= btrfs_prev_leaf(BTRFS_I(inode
)->root
, src_path
);
3767 if (src_path
->slots
[0])
3768 src_path
->slots
[0]--;
3769 src
= src_path
->nodes
[0];
3770 btrfs_item_key_to_cpu(src
, &key
, src_path
->slots
[0]);
3771 if (key
.objectid
!= btrfs_ino(inode
) ||
3772 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
3774 extent
= btrfs_item_ptr(src
, src_path
->slots
[0],
3775 struct btrfs_file_extent_item
);
3776 if (btrfs_file_extent_type(src
, extent
) ==
3777 BTRFS_FILE_EXTENT_INLINE
) {
3778 len
= btrfs_file_extent_inline_len(src
,
3781 *last_extent
= ALIGN(key
.offset
+ len
,
3784 len
= btrfs_file_extent_num_bytes(src
, extent
);
3785 *last_extent
= key
.offset
+ len
;
3789 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3790 * things could have happened
3792 * 1) A merge could have happened, so we could currently be on a leaf
3793 * that holds what we were copying in the first place.
3794 * 2) A split could have happened, and now not all of the items we want
3795 * are on the same leaf.
3797 * So we need to adjust how we search for holes, we need to drop the
3798 * path and re-search for the first extent key we found, and then walk
3799 * forward until we hit the last one we copied.
3801 if (need_find_last_extent
) {
3802 /* btrfs_prev_leaf could return 1 without releasing the path */
3803 btrfs_release_path(src_path
);
3804 ret
= btrfs_search_slot(NULL
, BTRFS_I(inode
)->root
, &first_key
,
3809 src
= src_path
->nodes
[0];
3810 i
= src_path
->slots
[0];
3816 * Ok so here we need to go through and fill in any holes we may have
3817 * to make sure that holes are punched for those areas in case they had
3818 * extents previously.
3824 if (i
>= btrfs_header_nritems(src_path
->nodes
[0])) {
3825 ret
= btrfs_next_leaf(BTRFS_I(inode
)->root
, src_path
);
3829 src
= src_path
->nodes
[0];
3833 btrfs_item_key_to_cpu(src
, &key
, i
);
3834 if (!btrfs_comp_cpu_keys(&key
, &last_key
))
3836 if (key
.objectid
!= btrfs_ino(inode
) ||
3837 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
3841 extent
= btrfs_item_ptr(src
, i
, struct btrfs_file_extent_item
);
3842 if (btrfs_file_extent_type(src
, extent
) ==
3843 BTRFS_FILE_EXTENT_INLINE
) {
3844 len
= btrfs_file_extent_inline_len(src
, i
, extent
);
3845 extent_end
= ALIGN(key
.offset
+ len
, log
->sectorsize
);
3847 len
= btrfs_file_extent_num_bytes(src
, extent
);
3848 extent_end
= key
.offset
+ len
;
3852 if (*last_extent
== key
.offset
) {
3853 *last_extent
= extent_end
;
3856 offset
= *last_extent
;
3857 len
= key
.offset
- *last_extent
;
3858 ret
= btrfs_insert_file_extent(trans
, log
, btrfs_ino(inode
),
3859 offset
, 0, 0, len
, 0, len
, 0,
3863 *last_extent
= extent_end
;
3866 * Need to let the callers know we dropped the path so they should
3869 if (!ret
&& need_find_last_extent
)
3874 static int extent_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3876 struct extent_map
*em1
, *em2
;
3878 em1
= list_entry(a
, struct extent_map
, list
);
3879 em2
= list_entry(b
, struct extent_map
, list
);
3881 if (em1
->start
< em2
->start
)
3883 else if (em1
->start
> em2
->start
)
3888 static int wait_ordered_extents(struct btrfs_trans_handle
*trans
,
3889 struct inode
*inode
,
3890 struct btrfs_root
*root
,
3891 const struct extent_map
*em
,
3892 const struct list_head
*logged_list
,
3893 bool *ordered_io_error
)
3895 struct btrfs_ordered_extent
*ordered
;
3896 struct btrfs_root
*log
= root
->log_root
;
3897 u64 mod_start
= em
->mod_start
;
3898 u64 mod_len
= em
->mod_len
;
3899 const bool skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
3902 LIST_HEAD(ordered_sums
);
3905 *ordered_io_error
= false;
3907 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
3908 em
->block_start
== EXTENT_MAP_HOLE
)
3912 * Wait far any ordered extent that covers our extent map. If it
3913 * finishes without an error, first check and see if our csums are on
3914 * our outstanding ordered extents.
3916 list_for_each_entry(ordered
, logged_list
, log_list
) {
3917 struct btrfs_ordered_sum
*sum
;
3922 if (ordered
->file_offset
+ ordered
->len
<= mod_start
||
3923 mod_start
+ mod_len
<= ordered
->file_offset
)
3926 if (!test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
) &&
3927 !test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
) &&
3928 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
)) {
3929 const u64 start
= ordered
->file_offset
;
3930 const u64 end
= ordered
->file_offset
+ ordered
->len
- 1;
3932 WARN_ON(ordered
->inode
!= inode
);
3933 filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
3936 wait_event(ordered
->wait
,
3937 (test_bit(BTRFS_ORDERED_IO_DONE
, &ordered
->flags
) ||
3938 test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
)));
3940 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
)) {
3942 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3943 * i_mapping flags, so that the next fsync won't get
3944 * an outdated io error too.
3946 btrfs_inode_check_errors(inode
);
3947 *ordered_io_error
= true;
3951 * We are going to copy all the csums on this ordered extent, so
3952 * go ahead and adjust mod_start and mod_len in case this
3953 * ordered extent has already been logged.
3955 if (ordered
->file_offset
> mod_start
) {
3956 if (ordered
->file_offset
+ ordered
->len
>=
3957 mod_start
+ mod_len
)
3958 mod_len
= ordered
->file_offset
- mod_start
;
3960 * If we have this case
3962 * |--------- logged extent ---------|
3963 * |----- ordered extent ----|
3965 * Just don't mess with mod_start and mod_len, we'll
3966 * just end up logging more csums than we need and it
3970 if (ordered
->file_offset
+ ordered
->len
<
3971 mod_start
+ mod_len
) {
3972 mod_len
= (mod_start
+ mod_len
) -
3973 (ordered
->file_offset
+ ordered
->len
);
3974 mod_start
= ordered
->file_offset
+
3985 * To keep us from looping for the above case of an ordered
3986 * extent that falls inside of the logged extent.
3988 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM
,
3992 list_for_each_entry(sum
, &ordered
->list
, list
) {
3993 ret
= btrfs_csum_file_blocks(trans
, log
, sum
);
3999 if (*ordered_io_error
|| !mod_len
|| ret
|| skip_csum
)
4002 if (em
->compress_type
) {
4004 csum_len
= max(em
->block_len
, em
->orig_block_len
);
4006 csum_offset
= mod_start
- em
->start
;
4010 /* block start is already adjusted for the file extent offset. */
4011 ret
= btrfs_lookup_csums_range(log
->fs_info
->csum_root
,
4012 em
->block_start
+ csum_offset
,
4013 em
->block_start
+ csum_offset
+
4014 csum_len
- 1, &ordered_sums
, 0);
4018 while (!list_empty(&ordered_sums
)) {
4019 struct btrfs_ordered_sum
*sums
= list_entry(ordered_sums
.next
,
4020 struct btrfs_ordered_sum
,
4023 ret
= btrfs_csum_file_blocks(trans
, log
, sums
);
4024 list_del(&sums
->list
);
4031 static int log_one_extent(struct btrfs_trans_handle
*trans
,
4032 struct inode
*inode
, struct btrfs_root
*root
,
4033 const struct extent_map
*em
,
4034 struct btrfs_path
*path
,
4035 const struct list_head
*logged_list
,
4036 struct btrfs_log_ctx
*ctx
)
4038 struct btrfs_root
*log
= root
->log_root
;
4039 struct btrfs_file_extent_item
*fi
;
4040 struct extent_buffer
*leaf
;
4041 struct btrfs_map_token token
;
4042 struct btrfs_key key
;
4043 u64 extent_offset
= em
->start
- em
->orig_start
;
4046 int extent_inserted
= 0;
4047 bool ordered_io_err
= false;
4049 ret
= wait_ordered_extents(trans
, inode
, root
, em
, logged_list
,
4054 if (ordered_io_err
) {
4059 btrfs_init_map_token(&token
);
4061 ret
= __btrfs_drop_extents(trans
, log
, inode
, path
, em
->start
,
4062 em
->start
+ em
->len
, NULL
, 0, 1,
4063 sizeof(*fi
), &extent_inserted
);
4067 if (!extent_inserted
) {
4068 key
.objectid
= btrfs_ino(inode
);
4069 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4070 key
.offset
= em
->start
;
4072 ret
= btrfs_insert_empty_item(trans
, log
, path
, &key
,
4077 leaf
= path
->nodes
[0];
4078 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4079 struct btrfs_file_extent_item
);
4081 btrfs_set_token_file_extent_generation(leaf
, fi
, trans
->transid
,
4083 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
4084 btrfs_set_token_file_extent_type(leaf
, fi
,
4085 BTRFS_FILE_EXTENT_PREALLOC
,
4088 btrfs_set_token_file_extent_type(leaf
, fi
,
4089 BTRFS_FILE_EXTENT_REG
,
4092 block_len
= max(em
->block_len
, em
->orig_block_len
);
4093 if (em
->compress_type
!= BTRFS_COMPRESS_NONE
) {
4094 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
4097 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
4099 } else if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
4100 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
,
4102 extent_offset
, &token
);
4103 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, block_len
,
4106 btrfs_set_token_file_extent_disk_bytenr(leaf
, fi
, 0, &token
);
4107 btrfs_set_token_file_extent_disk_num_bytes(leaf
, fi
, 0,
4111 btrfs_set_token_file_extent_offset(leaf
, fi
, extent_offset
, &token
);
4112 btrfs_set_token_file_extent_num_bytes(leaf
, fi
, em
->len
, &token
);
4113 btrfs_set_token_file_extent_ram_bytes(leaf
, fi
, em
->ram_bytes
, &token
);
4114 btrfs_set_token_file_extent_compression(leaf
, fi
, em
->compress_type
,
4116 btrfs_set_token_file_extent_encryption(leaf
, fi
, 0, &token
);
4117 btrfs_set_token_file_extent_other_encoding(leaf
, fi
, 0, &token
);
4118 btrfs_mark_buffer_dirty(leaf
);
4120 btrfs_release_path(path
);
4125 static int btrfs_log_changed_extents(struct btrfs_trans_handle
*trans
,
4126 struct btrfs_root
*root
,
4127 struct inode
*inode
,
4128 struct btrfs_path
*path
,
4129 struct list_head
*logged_list
,
4130 struct btrfs_log_ctx
*ctx
)
4132 struct extent_map
*em
, *n
;
4133 struct list_head extents
;
4134 struct extent_map_tree
*tree
= &BTRFS_I(inode
)->extent_tree
;
4139 INIT_LIST_HEAD(&extents
);
4141 write_lock(&tree
->lock
);
4142 test_gen
= root
->fs_info
->last_trans_committed
;
4144 list_for_each_entry_safe(em
, n
, &tree
->modified_extents
, list
) {
4145 list_del_init(&em
->list
);
4148 * Just an arbitrary number, this can be really CPU intensive
4149 * once we start getting a lot of extents, and really once we
4150 * have a bunch of extents we just want to commit since it will
4153 if (++num
> 32768) {
4154 list_del_init(&tree
->modified_extents
);
4159 if (em
->generation
<= test_gen
)
4161 /* Need a ref to keep it from getting evicted from cache */
4162 atomic_inc(&em
->refs
);
4163 set_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
4164 list_add_tail(&em
->list
, &extents
);
4168 list_sort(NULL
, &extents
, extent_cmp
);
4171 while (!list_empty(&extents
)) {
4172 em
= list_entry(extents
.next
, struct extent_map
, list
);
4174 list_del_init(&em
->list
);
4177 * If we had an error we just need to delete everybody from our
4181 clear_em_logging(tree
, em
);
4182 free_extent_map(em
);
4186 write_unlock(&tree
->lock
);
4188 ret
= log_one_extent(trans
, inode
, root
, em
, path
, logged_list
,
4190 write_lock(&tree
->lock
);
4191 clear_em_logging(tree
, em
);
4192 free_extent_map(em
);
4194 WARN_ON(!list_empty(&extents
));
4195 write_unlock(&tree
->lock
);
4197 btrfs_release_path(path
);
4201 static int logged_inode_size(struct btrfs_root
*log
, struct inode
*inode
,
4202 struct btrfs_path
*path
, u64
*size_ret
)
4204 struct btrfs_key key
;
4207 key
.objectid
= btrfs_ino(inode
);
4208 key
.type
= BTRFS_INODE_ITEM_KEY
;
4211 ret
= btrfs_search_slot(NULL
, log
, &key
, path
, 0, 0);
4214 } else if (ret
> 0) {
4217 struct btrfs_inode_item
*item
;
4219 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4220 struct btrfs_inode_item
);
4221 *size_ret
= btrfs_inode_size(path
->nodes
[0], item
);
4224 btrfs_release_path(path
);
4229 * At the moment we always log all xattrs. This is to figure out at log replay
4230 * time which xattrs must have their deletion replayed. If a xattr is missing
4231 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4232 * because if a xattr is deleted, the inode is fsynced and a power failure
4233 * happens, causing the log to be replayed the next time the fs is mounted,
4234 * we want the xattr to not exist anymore (same behaviour as other filesystems
4235 * with a journal, ext3/4, xfs, f2fs, etc).
4237 static int btrfs_log_all_xattrs(struct btrfs_trans_handle
*trans
,
4238 struct btrfs_root
*root
,
4239 struct inode
*inode
,
4240 struct btrfs_path
*path
,
4241 struct btrfs_path
*dst_path
)
4244 struct btrfs_key key
;
4245 const u64 ino
= btrfs_ino(inode
);
4250 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4253 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4258 int slot
= path
->slots
[0];
4259 struct extent_buffer
*leaf
= path
->nodes
[0];
4260 int nritems
= btrfs_header_nritems(leaf
);
4262 if (slot
>= nritems
) {
4264 u64 last_extent
= 0;
4266 ret
= copy_items(trans
, inode
, dst_path
, path
,
4267 &last_extent
, start_slot
,
4269 /* can't be 1, extent items aren't processed */
4275 ret
= btrfs_next_leaf(root
, path
);
4283 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4284 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_XATTR_ITEM_KEY
)
4294 u64 last_extent
= 0;
4296 ret
= copy_items(trans
, inode
, dst_path
, path
,
4297 &last_extent
, start_slot
,
4299 /* can't be 1, extent items aren't processed */
4309 * If the no holes feature is enabled we need to make sure any hole between the
4310 * last extent and the i_size of our inode is explicitly marked in the log. This
4311 * is to make sure that doing something like:
4313 * 1) create file with 128Kb of data
4314 * 2) truncate file to 64Kb
4315 * 3) truncate file to 256Kb
4317 * 5) <crash/power failure>
4318 * 6) mount fs and trigger log replay
4320 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4321 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4322 * file correspond to a hole. The presence of explicit holes in a log tree is
4323 * what guarantees that log replay will remove/adjust file extent items in the
4326 * Here we do not need to care about holes between extents, that is already done
4327 * by copy_items(). We also only need to do this in the full sync path, where we
4328 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4329 * lookup the list of modified extent maps and if any represents a hole, we
4330 * insert a corresponding extent representing a hole in the log tree.
4332 static int btrfs_log_trailing_hole(struct btrfs_trans_handle
*trans
,
4333 struct btrfs_root
*root
,
4334 struct inode
*inode
,
4335 struct btrfs_path
*path
)
4338 struct btrfs_key key
;
4341 struct extent_buffer
*leaf
;
4342 struct btrfs_root
*log
= root
->log_root
;
4343 const u64 ino
= btrfs_ino(inode
);
4344 const u64 i_size
= i_size_read(inode
);
4346 if (!btrfs_fs_incompat(root
->fs_info
, NO_HOLES
))
4350 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4351 key
.offset
= (u64
)-1;
4353 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4358 ASSERT(path
->slots
[0] > 0);
4360 leaf
= path
->nodes
[0];
4361 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4363 if (key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
4364 /* inode does not have any extents */
4368 struct btrfs_file_extent_item
*extent
;
4372 * If there's an extent beyond i_size, an explicit hole was
4373 * already inserted by copy_items().
4375 if (key
.offset
>= i_size
)
4378 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
4379 struct btrfs_file_extent_item
);
4381 if (btrfs_file_extent_type(leaf
, extent
) ==
4382 BTRFS_FILE_EXTENT_INLINE
) {
4383 len
= btrfs_file_extent_inline_len(leaf
,
4386 ASSERT(len
== i_size
);
4390 len
= btrfs_file_extent_num_bytes(leaf
, extent
);
4391 /* Last extent goes beyond i_size, no need to log a hole. */
4392 if (key
.offset
+ len
> i_size
)
4394 hole_start
= key
.offset
+ len
;
4395 hole_size
= i_size
- hole_start
;
4397 btrfs_release_path(path
);
4399 /* Last extent ends at i_size. */
4403 hole_size
= ALIGN(hole_size
, root
->sectorsize
);
4404 ret
= btrfs_insert_file_extent(trans
, log
, ino
, hole_start
, 0, 0,
4405 hole_size
, 0, hole_size
, 0, 0, 0);
4409 /* log a single inode in the tree log.
4410 * At least one parent directory for this inode must exist in the tree
4411 * or be logged already.
4413 * Any items from this inode changed by the current transaction are copied
4414 * to the log tree. An extra reference is taken on any extents in this
4415 * file, allowing us to avoid a whole pile of corner cases around logging
4416 * blocks that have been removed from the tree.
4418 * See LOG_INODE_ALL and related defines for a description of what inode_only
4421 * This handles both files and directories.
4423 static int btrfs_log_inode(struct btrfs_trans_handle
*trans
,
4424 struct btrfs_root
*root
, struct inode
*inode
,
4428 struct btrfs_log_ctx
*ctx
)
4430 struct btrfs_path
*path
;
4431 struct btrfs_path
*dst_path
;
4432 struct btrfs_key min_key
;
4433 struct btrfs_key max_key
;
4434 struct btrfs_root
*log
= root
->log_root
;
4435 struct extent_buffer
*src
= NULL
;
4436 LIST_HEAD(logged_list
);
4437 u64 last_extent
= 0;
4441 int ins_start_slot
= 0;
4443 bool fast_search
= false;
4444 u64 ino
= btrfs_ino(inode
);
4445 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
4446 u64 logged_isize
= 0;
4447 bool need_log_inode_item
= true;
4449 path
= btrfs_alloc_path();
4452 dst_path
= btrfs_alloc_path();
4454 btrfs_free_path(path
);
4458 min_key
.objectid
= ino
;
4459 min_key
.type
= BTRFS_INODE_ITEM_KEY
;
4462 max_key
.objectid
= ino
;
4465 /* today the code can only do partial logging of directories */
4466 if (S_ISDIR(inode
->i_mode
) ||
4467 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4468 &BTRFS_I(inode
)->runtime_flags
) &&
4469 inode_only
== LOG_INODE_EXISTS
))
4470 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4472 max_key
.type
= (u8
)-1;
4473 max_key
.offset
= (u64
)-1;
4476 * Only run delayed items if we are a dir or a new file.
4477 * Otherwise commit the delayed inode only, which is needed in
4478 * order for the log replay code to mark inodes for link count
4479 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4481 if (S_ISDIR(inode
->i_mode
) ||
4482 BTRFS_I(inode
)->generation
> root
->fs_info
->last_trans_committed
)
4483 ret
= btrfs_commit_inode_delayed_items(trans
, inode
);
4485 ret
= btrfs_commit_inode_delayed_inode(inode
);
4488 btrfs_free_path(path
);
4489 btrfs_free_path(dst_path
);
4493 mutex_lock(&BTRFS_I(inode
)->log_mutex
);
4495 btrfs_get_logged_extents(inode
, &logged_list
, start
, end
);
4498 * a brute force approach to making sure we get the most uptodate
4499 * copies of everything.
4501 if (S_ISDIR(inode
->i_mode
)) {
4502 int max_key_type
= BTRFS_DIR_LOG_INDEX_KEY
;
4504 if (inode_only
== LOG_INODE_EXISTS
)
4505 max_key_type
= BTRFS_XATTR_ITEM_KEY
;
4506 ret
= drop_objectid_items(trans
, log
, path
, ino
, max_key_type
);
4508 if (inode_only
== LOG_INODE_EXISTS
) {
4510 * Make sure the new inode item we write to the log has
4511 * the same isize as the current one (if it exists).
4512 * This is necessary to prevent data loss after log
4513 * replay, and also to prevent doing a wrong expanding
4514 * truncate - for e.g. create file, write 4K into offset
4515 * 0, fsync, write 4K into offset 4096, add hard link,
4516 * fsync some other file (to sync log), power fail - if
4517 * we use the inode's current i_size, after log replay
4518 * we get a 8Kb file, with the last 4Kb extent as a hole
4519 * (zeroes), as if an expanding truncate happened,
4520 * instead of getting a file of 4Kb only.
4522 err
= logged_inode_size(log
, inode
, path
,
4527 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4528 &BTRFS_I(inode
)->runtime_flags
)) {
4529 if (inode_only
== LOG_INODE_EXISTS
) {
4530 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4531 ret
= drop_objectid_items(trans
, log
, path
, ino
,
4534 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4535 &BTRFS_I(inode
)->runtime_flags
);
4536 clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
4537 &BTRFS_I(inode
)->runtime_flags
);
4539 ret
= btrfs_truncate_inode_items(trans
,
4545 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING
,
4546 &BTRFS_I(inode
)->runtime_flags
) ||
4547 inode_only
== LOG_INODE_EXISTS
) {
4548 if (inode_only
== LOG_INODE_ALL
)
4550 max_key
.type
= BTRFS_XATTR_ITEM_KEY
;
4551 ret
= drop_objectid_items(trans
, log
, path
, ino
,
4554 if (inode_only
== LOG_INODE_ALL
)
4567 ret
= btrfs_search_forward(root
, &min_key
,
4568 path
, trans
->transid
);
4572 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4573 if (min_key
.objectid
!= ino
)
4575 if (min_key
.type
> max_key
.type
)
4578 if (min_key
.type
== BTRFS_INODE_ITEM_KEY
)
4579 need_log_inode_item
= false;
4581 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4582 if (min_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
4585 ret
= copy_items(trans
, inode
, dst_path
, path
,
4586 &last_extent
, ins_start_slot
,
4587 ins_nr
, inode_only
, logged_isize
);
4594 btrfs_release_path(path
);
4600 src
= path
->nodes
[0];
4601 if (ins_nr
&& ins_start_slot
+ ins_nr
== path
->slots
[0]) {
4604 } else if (!ins_nr
) {
4605 ins_start_slot
= path
->slots
[0];
4610 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4611 ins_start_slot
, ins_nr
, inode_only
,
4619 btrfs_release_path(path
);
4623 ins_start_slot
= path
->slots
[0];
4626 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4628 if (path
->slots
[0] < nritems
) {
4629 btrfs_item_key_to_cpu(path
->nodes
[0], &min_key
,
4634 ret
= copy_items(trans
, inode
, dst_path
, path
,
4635 &last_extent
, ins_start_slot
,
4636 ins_nr
, inode_only
, logged_isize
);
4644 btrfs_release_path(path
);
4646 if (min_key
.offset
< (u64
)-1) {
4648 } else if (min_key
.type
< max_key
.type
) {
4656 ret
= copy_items(trans
, inode
, dst_path
, path
, &last_extent
,
4657 ins_start_slot
, ins_nr
, inode_only
,
4667 btrfs_release_path(path
);
4668 btrfs_release_path(dst_path
);
4669 err
= btrfs_log_all_xattrs(trans
, root
, inode
, path
, dst_path
);
4672 if (max_key
.type
>= BTRFS_EXTENT_DATA_KEY
&& !fast_search
) {
4673 btrfs_release_path(path
);
4674 btrfs_release_path(dst_path
);
4675 err
= btrfs_log_trailing_hole(trans
, root
, inode
, path
);
4680 btrfs_release_path(path
);
4681 btrfs_release_path(dst_path
);
4682 if (need_log_inode_item
) {
4683 err
= log_inode_item(trans
, log
, dst_path
, inode
);
4689 * Some ordered extents started by fsync might have completed
4690 * before we collected the ordered extents in logged_list, which
4691 * means they're gone, not in our logged_list nor in the inode's
4692 * ordered tree. We want the application/user space to know an
4693 * error happened while attempting to persist file data so that
4694 * it can take proper action. If such error happened, we leave
4695 * without writing to the log tree and the fsync must report the
4696 * file data write error and not commit the current transaction.
4698 err
= btrfs_inode_check_errors(inode
);
4703 ret
= btrfs_log_changed_extents(trans
, root
, inode
, dst_path
,
4709 } else if (inode_only
== LOG_INODE_ALL
) {
4710 struct extent_map
*em
, *n
;
4712 write_lock(&em_tree
->lock
);
4714 * We can't just remove every em if we're called for a ranged
4715 * fsync - that is, one that doesn't cover the whole possible
4716 * file range (0 to LLONG_MAX). This is because we can have
4717 * em's that fall outside the range we're logging and therefore
4718 * their ordered operations haven't completed yet
4719 * (btrfs_finish_ordered_io() not invoked yet). This means we
4720 * didn't get their respective file extent item in the fs/subvol
4721 * tree yet, and need to let the next fast fsync (one which
4722 * consults the list of modified extent maps) find the em so
4723 * that it logs a matching file extent item and waits for the
4724 * respective ordered operation to complete (if it's still
4727 * Removing every em outside the range we're logging would make
4728 * the next fast fsync not log their matching file extent items,
4729 * therefore making us lose data after a log replay.
4731 list_for_each_entry_safe(em
, n
, &em_tree
->modified_extents
,
4733 const u64 mod_end
= em
->mod_start
+ em
->mod_len
- 1;
4735 if (em
->mod_start
>= start
&& mod_end
<= end
)
4736 list_del_init(&em
->list
);
4738 write_unlock(&em_tree
->lock
);
4741 if (inode_only
== LOG_INODE_ALL
&& S_ISDIR(inode
->i_mode
)) {
4742 ret
= log_directory_changes(trans
, root
, inode
, path
, dst_path
,
4750 spin_lock(&BTRFS_I(inode
)->lock
);
4751 BTRFS_I(inode
)->logged_trans
= trans
->transid
;
4752 BTRFS_I(inode
)->last_log_commit
= BTRFS_I(inode
)->last_sub_trans
;
4753 spin_unlock(&BTRFS_I(inode
)->lock
);
4756 btrfs_put_logged_extents(&logged_list
);
4758 btrfs_submit_logged_extents(&logged_list
, log
);
4759 mutex_unlock(&BTRFS_I(inode
)->log_mutex
);
4761 btrfs_free_path(path
);
4762 btrfs_free_path(dst_path
);
4767 * follow the dentry parent pointers up the chain and see if any
4768 * of the directories in it require a full commit before they can
4769 * be logged. Returns zero if nothing special needs to be done or 1 if
4770 * a full commit is required.
4772 static noinline
int check_parent_dirs_for_sync(struct btrfs_trans_handle
*trans
,
4773 struct inode
*inode
,
4774 struct dentry
*parent
,
4775 struct super_block
*sb
,
4779 struct btrfs_root
*root
;
4780 struct dentry
*old_parent
= NULL
;
4781 struct inode
*orig_inode
= inode
;
4784 * for regular files, if its inode is already on disk, we don't
4785 * have to worry about the parents at all. This is because
4786 * we can use the last_unlink_trans field to record renames
4787 * and other fun in this file.
4789 if (S_ISREG(inode
->i_mode
) &&
4790 BTRFS_I(inode
)->generation
<= last_committed
&&
4791 BTRFS_I(inode
)->last_unlink_trans
<= last_committed
)
4794 if (!S_ISDIR(inode
->i_mode
)) {
4795 if (!parent
|| d_really_is_negative(parent
) || sb
!= d_inode(parent
)->i_sb
)
4797 inode
= d_inode(parent
);
4802 * If we are logging a directory then we start with our inode,
4803 * not our parents inode, so we need to skipp setting the
4804 * logged_trans so that further down in the log code we don't
4805 * think this inode has already been logged.
4807 if (inode
!= orig_inode
)
4808 BTRFS_I(inode
)->logged_trans
= trans
->transid
;
4811 if (BTRFS_I(inode
)->last_unlink_trans
> last_committed
) {
4812 root
= BTRFS_I(inode
)->root
;
4815 * make sure any commits to the log are forced
4816 * to be full commits
4818 btrfs_set_log_full_commit(root
->fs_info
, trans
);
4823 if (!parent
|| d_really_is_negative(parent
) || sb
!= d_inode(parent
)->i_sb
)
4826 if (IS_ROOT(parent
))
4829 parent
= dget_parent(parent
);
4831 old_parent
= parent
;
4832 inode
= d_inode(parent
);
4840 struct btrfs_dir_list
{
4842 struct list_head list
;
4846 * Log the inodes of the new dentries of a directory. See log_dir_items() for
4847 * details about the why it is needed.
4848 * This is a recursive operation - if an existing dentry corresponds to a
4849 * directory, that directory's new entries are logged too (same behaviour as
4850 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
4851 * the dentries point to we do not lock their i_mutex, otherwise lockdep
4852 * complains about the following circular lock dependency / possible deadlock:
4856 * lock(&type->i_mutex_dir_key#3/2);
4857 * lock(sb_internal#2);
4858 * lock(&type->i_mutex_dir_key#3/2);
4859 * lock(&sb->s_type->i_mutex_key#14);
4861 * Where sb_internal is the lock (a counter that works as a lock) acquired by
4862 * sb_start_intwrite() in btrfs_start_transaction().
4863 * Not locking i_mutex of the inodes is still safe because:
4865 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
4866 * that while logging the inode new references (names) are added or removed
4867 * from the inode, leaving the logged inode item with a link count that does
4868 * not match the number of logged inode reference items. This is fine because
4869 * at log replay time we compute the real number of links and correct the
4870 * link count in the inode item (see replay_one_buffer() and
4871 * link_to_fixup_dir());
4873 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
4874 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
4875 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
4876 * has a size that doesn't match the sum of the lengths of all the logged
4877 * names. This does not result in a problem because if a dir_item key is
4878 * logged but its matching dir_index key is not logged, at log replay time we
4879 * don't use it to replay the respective name (see replay_one_name()). On the
4880 * other hand if only the dir_index key ends up being logged, the respective
4881 * name is added to the fs/subvol tree with both the dir_item and dir_index
4882 * keys created (see replay_one_name()).
4883 * The directory's inode item with a wrong i_size is not a problem as well,
4884 * since we don't use it at log replay time to set the i_size in the inode
4885 * item of the fs/subvol tree (see overwrite_item()).
4887 static int log_new_dir_dentries(struct btrfs_trans_handle
*trans
,
4888 struct btrfs_root
*root
,
4889 struct inode
*start_inode
,
4890 struct btrfs_log_ctx
*ctx
)
4892 struct btrfs_root
*log
= root
->log_root
;
4893 struct btrfs_path
*path
;
4894 LIST_HEAD(dir_list
);
4895 struct btrfs_dir_list
*dir_elem
;
4898 path
= btrfs_alloc_path();
4902 dir_elem
= kmalloc(sizeof(*dir_elem
), GFP_NOFS
);
4904 btrfs_free_path(path
);
4907 dir_elem
->ino
= btrfs_ino(start_inode
);
4908 list_add_tail(&dir_elem
->list
, &dir_list
);
4910 while (!list_empty(&dir_list
)) {
4911 struct extent_buffer
*leaf
;
4912 struct btrfs_key min_key
;
4916 dir_elem
= list_first_entry(&dir_list
, struct btrfs_dir_list
,
4919 goto next_dir_inode
;
4921 min_key
.objectid
= dir_elem
->ino
;
4922 min_key
.type
= BTRFS_DIR_ITEM_KEY
;
4925 btrfs_release_path(path
);
4926 ret
= btrfs_search_forward(log
, &min_key
, path
, trans
->transid
);
4928 goto next_dir_inode
;
4929 } else if (ret
> 0) {
4931 goto next_dir_inode
;
4935 leaf
= path
->nodes
[0];
4936 nritems
= btrfs_header_nritems(leaf
);
4937 for (i
= path
->slots
[0]; i
< nritems
; i
++) {
4938 struct btrfs_dir_item
*di
;
4939 struct btrfs_key di_key
;
4940 struct inode
*di_inode
;
4941 struct btrfs_dir_list
*new_dir_elem
;
4942 int log_mode
= LOG_INODE_EXISTS
;
4945 btrfs_item_key_to_cpu(leaf
, &min_key
, i
);
4946 if (min_key
.objectid
!= dir_elem
->ino
||
4947 min_key
.type
!= BTRFS_DIR_ITEM_KEY
)
4948 goto next_dir_inode
;
4950 di
= btrfs_item_ptr(leaf
, i
, struct btrfs_dir_item
);
4951 type
= btrfs_dir_type(leaf
, di
);
4952 if (btrfs_dir_transid(leaf
, di
) < trans
->transid
&&
4953 type
!= BTRFS_FT_DIR
)
4955 btrfs_dir_item_key_to_cpu(leaf
, di
, &di_key
);
4956 if (di_key
.type
== BTRFS_ROOT_ITEM_KEY
)
4959 di_inode
= btrfs_iget(root
->fs_info
->sb
, &di_key
,
4961 if (IS_ERR(di_inode
)) {
4962 ret
= PTR_ERR(di_inode
);
4963 goto next_dir_inode
;
4966 if (btrfs_inode_in_log(di_inode
, trans
->transid
)) {
4971 ctx
->log_new_dentries
= false;
4972 if (type
== BTRFS_FT_DIR
)
4973 log_mode
= LOG_INODE_ALL
;
4974 btrfs_release_path(path
);
4975 ret
= btrfs_log_inode(trans
, root
, di_inode
,
4976 log_mode
, 0, LLONG_MAX
, ctx
);
4979 goto next_dir_inode
;
4980 if (ctx
->log_new_dentries
) {
4981 new_dir_elem
= kmalloc(sizeof(*new_dir_elem
),
4983 if (!new_dir_elem
) {
4985 goto next_dir_inode
;
4987 new_dir_elem
->ino
= di_key
.objectid
;
4988 list_add_tail(&new_dir_elem
->list
, &dir_list
);
4993 ret
= btrfs_next_leaf(log
, path
);
4995 goto next_dir_inode
;
4996 } else if (ret
> 0) {
4998 goto next_dir_inode
;
5002 if (min_key
.offset
< (u64
)-1) {
5007 list_del(&dir_elem
->list
);
5011 btrfs_free_path(path
);
5015 static int btrfs_log_all_parents(struct btrfs_trans_handle
*trans
,
5016 struct inode
*inode
,
5017 struct btrfs_log_ctx
*ctx
)
5020 struct btrfs_path
*path
;
5021 struct btrfs_key key
;
5022 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5023 const u64 ino
= btrfs_ino(inode
);
5025 path
= btrfs_alloc_path();
5028 path
->skip_locking
= 1;
5029 path
->search_commit_root
= 1;
5032 key
.type
= BTRFS_INODE_REF_KEY
;
5034 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5039 struct extent_buffer
*leaf
= path
->nodes
[0];
5040 int slot
= path
->slots
[0];
5045 if (slot
>= btrfs_header_nritems(leaf
)) {
5046 ret
= btrfs_next_leaf(root
, path
);
5054 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
5055 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5056 if (key
.objectid
!= ino
|| key
.type
> BTRFS_INODE_EXTREF_KEY
)
5059 item_size
= btrfs_item_size_nr(leaf
, slot
);
5060 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
5061 while (cur_offset
< item_size
) {
5062 struct btrfs_key inode_key
;
5063 struct inode
*dir_inode
;
5065 inode_key
.type
= BTRFS_INODE_ITEM_KEY
;
5066 inode_key
.offset
= 0;
5068 if (key
.type
== BTRFS_INODE_EXTREF_KEY
) {
5069 struct btrfs_inode_extref
*extref
;
5071 extref
= (struct btrfs_inode_extref
*)
5073 inode_key
.objectid
= btrfs_inode_extref_parent(
5075 cur_offset
+= sizeof(*extref
);
5076 cur_offset
+= btrfs_inode_extref_name_len(leaf
,
5079 inode_key
.objectid
= key
.offset
;
5080 cur_offset
= item_size
;
5083 dir_inode
= btrfs_iget(root
->fs_info
->sb
, &inode_key
,
5085 /* If parent inode was deleted, skip it. */
5086 if (IS_ERR(dir_inode
))
5089 ret
= btrfs_log_inode(trans
, root
, dir_inode
,
5090 LOG_INODE_ALL
, 0, LLONG_MAX
, ctx
);
5099 btrfs_free_path(path
);
5104 * helper function around btrfs_log_inode to make sure newly created
5105 * parent directories also end up in the log. A minimal inode and backref
5106 * only logging is done of any parent directories that are older than
5107 * the last committed transaction
5109 static int btrfs_log_inode_parent(struct btrfs_trans_handle
*trans
,
5110 struct btrfs_root
*root
, struct inode
*inode
,
5111 struct dentry
*parent
,
5115 struct btrfs_log_ctx
*ctx
)
5117 int inode_only
= exists_only
? LOG_INODE_EXISTS
: LOG_INODE_ALL
;
5118 struct super_block
*sb
;
5119 struct dentry
*old_parent
= NULL
;
5121 u64 last_committed
= root
->fs_info
->last_trans_committed
;
5122 bool log_dentries
= false;
5123 struct inode
*orig_inode
= inode
;
5127 if (btrfs_test_opt(root
, NOTREELOG
)) {
5133 * The prev transaction commit doesn't complete, we need do
5134 * full commit by ourselves.
5136 if (root
->fs_info
->last_trans_log_full_commit
>
5137 root
->fs_info
->last_trans_committed
) {
5142 if (root
!= BTRFS_I(inode
)->root
||
5143 btrfs_root_refs(&root
->root_item
) == 0) {
5148 ret
= check_parent_dirs_for_sync(trans
, inode
, parent
,
5149 sb
, last_committed
);
5153 if (btrfs_inode_in_log(inode
, trans
->transid
)) {
5154 ret
= BTRFS_NO_LOG_SYNC
;
5158 ret
= start_log_trans(trans
, root
, ctx
);
5162 ret
= btrfs_log_inode(trans
, root
, inode
, inode_only
, start
, end
, ctx
);
5167 * for regular files, if its inode is already on disk, we don't
5168 * have to worry about the parents at all. This is because
5169 * we can use the last_unlink_trans field to record renames
5170 * and other fun in this file.
5172 if (S_ISREG(inode
->i_mode
) &&
5173 BTRFS_I(inode
)->generation
<= last_committed
&&
5174 BTRFS_I(inode
)->last_unlink_trans
<= last_committed
) {
5179 if (S_ISDIR(inode
->i_mode
) && ctx
&& ctx
->log_new_dentries
)
5180 log_dentries
= true;
5183 * On unlink we must make sure all our current and old parent directores
5184 * inodes are fully logged. This is to prevent leaving dangling
5185 * directory index entries in directories that were our parents but are
5186 * not anymore. Not doing this results in old parent directory being
5187 * impossible to delete after log replay (rmdir will always fail with
5188 * error -ENOTEMPTY).
5194 * ln testdir/foo testdir/bar
5196 * unlink testdir/bar
5197 * xfs_io -c fsync testdir/foo
5199 * mount fs, triggers log replay
5201 * If we don't log the parent directory (testdir), after log replay the
5202 * directory still has an entry pointing to the file inode using the bar
5203 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5204 * the file inode has a link count of 1.
5210 * ln foo testdir/foo2
5211 * ln foo testdir/foo3
5213 * unlink testdir/foo3
5214 * xfs_io -c fsync foo
5216 * mount fs, triggers log replay
5218 * Similar as the first example, after log replay the parent directory
5219 * testdir still has an entry pointing to the inode file with name foo3
5220 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5221 * and has a link count of 2.
5223 if (BTRFS_I(inode
)->last_unlink_trans
> last_committed
) {
5224 ret
= btrfs_log_all_parents(trans
, orig_inode
, ctx
);
5230 if (!parent
|| d_really_is_negative(parent
) || sb
!= d_inode(parent
)->i_sb
)
5233 inode
= d_inode(parent
);
5234 if (root
!= BTRFS_I(inode
)->root
)
5237 if (BTRFS_I(inode
)->generation
> last_committed
) {
5238 ret
= btrfs_log_inode(trans
, root
, inode
,
5244 if (IS_ROOT(parent
))
5247 parent
= dget_parent(parent
);
5249 old_parent
= parent
;
5252 ret
= log_new_dir_dentries(trans
, root
, orig_inode
, ctx
);
5258 btrfs_set_log_full_commit(root
->fs_info
, trans
);
5263 btrfs_remove_log_ctx(root
, ctx
);
5264 btrfs_end_log_trans(root
);
5270 * it is not safe to log dentry if the chunk root has added new
5271 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5272 * If this returns 1, you must commit the transaction to safely get your
5275 int btrfs_log_dentry_safe(struct btrfs_trans_handle
*trans
,
5276 struct btrfs_root
*root
, struct dentry
*dentry
,
5279 struct btrfs_log_ctx
*ctx
)
5281 struct dentry
*parent
= dget_parent(dentry
);
5284 ret
= btrfs_log_inode_parent(trans
, root
, d_inode(dentry
), parent
,
5285 start
, end
, 0, ctx
);
5292 * should be called during mount to recover any replay any log trees
5295 int btrfs_recover_log_trees(struct btrfs_root
*log_root_tree
)
5298 struct btrfs_path
*path
;
5299 struct btrfs_trans_handle
*trans
;
5300 struct btrfs_key key
;
5301 struct btrfs_key found_key
;
5302 struct btrfs_key tmp_key
;
5303 struct btrfs_root
*log
;
5304 struct btrfs_fs_info
*fs_info
= log_root_tree
->fs_info
;
5305 struct walk_control wc
= {
5306 .process_func
= process_one_buffer
,
5310 path
= btrfs_alloc_path();
5314 fs_info
->log_root_recovering
= 1;
5316 trans
= btrfs_start_transaction(fs_info
->tree_root
, 0);
5317 if (IS_ERR(trans
)) {
5318 ret
= PTR_ERR(trans
);
5325 ret
= walk_log_tree(trans
, log_root_tree
, &wc
);
5327 btrfs_std_error(fs_info
, ret
, "Failed to pin buffers while "
5328 "recovering log root tree.");
5333 key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
5334 key
.offset
= (u64
)-1;
5335 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5338 ret
= btrfs_search_slot(NULL
, log_root_tree
, &key
, path
, 0, 0);
5341 btrfs_std_error(fs_info
, ret
,
5342 "Couldn't find tree log root.");
5346 if (path
->slots
[0] == 0)
5350 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
5352 btrfs_release_path(path
);
5353 if (found_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
5356 log
= btrfs_read_fs_root(log_root_tree
, &found_key
);
5359 btrfs_std_error(fs_info
, ret
,
5360 "Couldn't read tree log root.");
5364 tmp_key
.objectid
= found_key
.offset
;
5365 tmp_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5366 tmp_key
.offset
= (u64
)-1;
5368 wc
.replay_dest
= btrfs_read_fs_root_no_name(fs_info
, &tmp_key
);
5369 if (IS_ERR(wc
.replay_dest
)) {
5370 ret
= PTR_ERR(wc
.replay_dest
);
5371 free_extent_buffer(log
->node
);
5372 free_extent_buffer(log
->commit_root
);
5374 btrfs_std_error(fs_info
, ret
, "Couldn't read target root "
5375 "for tree log recovery.");
5379 wc
.replay_dest
->log_root
= log
;
5380 btrfs_record_root_in_trans(trans
, wc
.replay_dest
);
5381 ret
= walk_log_tree(trans
, log
, &wc
);
5383 if (!ret
&& wc
.stage
== LOG_WALK_REPLAY_ALL
) {
5384 ret
= fixup_inode_link_counts(trans
, wc
.replay_dest
,
5388 key
.offset
= found_key
.offset
- 1;
5389 wc
.replay_dest
->log_root
= NULL
;
5390 free_extent_buffer(log
->node
);
5391 free_extent_buffer(log
->commit_root
);
5397 if (found_key
.offset
== 0)
5400 btrfs_release_path(path
);
5402 /* step one is to pin it all, step two is to replay just inodes */
5405 wc
.process_func
= replay_one_buffer
;
5406 wc
.stage
= LOG_WALK_REPLAY_INODES
;
5409 /* step three is to replay everything */
5410 if (wc
.stage
< LOG_WALK_REPLAY_ALL
) {
5415 btrfs_free_path(path
);
5417 /* step 4: commit the transaction, which also unpins the blocks */
5418 ret
= btrfs_commit_transaction(trans
, fs_info
->tree_root
);
5422 free_extent_buffer(log_root_tree
->node
);
5423 log_root_tree
->log_root
= NULL
;
5424 fs_info
->log_root_recovering
= 0;
5425 kfree(log_root_tree
);
5430 btrfs_end_transaction(wc
.trans
, fs_info
->tree_root
);
5431 btrfs_free_path(path
);
5436 * there are some corner cases where we want to force a full
5437 * commit instead of allowing a directory to be logged.
5439 * They revolve around files there were unlinked from the directory, and
5440 * this function updates the parent directory so that a full commit is
5441 * properly done if it is fsync'd later after the unlinks are done.
5443 void btrfs_record_unlink_dir(struct btrfs_trans_handle
*trans
,
5444 struct inode
*dir
, struct inode
*inode
,
5448 * when we're logging a file, if it hasn't been renamed
5449 * or unlinked, and its inode is fully committed on disk,
5450 * we don't have to worry about walking up the directory chain
5451 * to log its parents.
5453 * So, we use the last_unlink_trans field to put this transid
5454 * into the file. When the file is logged we check it and
5455 * don't log the parents if the file is fully on disk.
5457 if (S_ISREG(inode
->i_mode
))
5458 BTRFS_I(inode
)->last_unlink_trans
= trans
->transid
;
5461 * if this directory was already logged any new
5462 * names for this file/dir will get recorded
5465 if (BTRFS_I(dir
)->logged_trans
== trans
->transid
)
5469 * if the inode we're about to unlink was logged,
5470 * the log will be properly updated for any new names
5472 if (BTRFS_I(inode
)->logged_trans
== trans
->transid
)
5476 * when renaming files across directories, if the directory
5477 * there we're unlinking from gets fsync'd later on, there's
5478 * no way to find the destination directory later and fsync it
5479 * properly. So, we have to be conservative and force commits
5480 * so the new name gets discovered.
5485 /* we can safely do the unlink without any special recording */
5489 BTRFS_I(dir
)->last_unlink_trans
= trans
->transid
;
5493 * Call this after adding a new name for a file and it will properly
5494 * update the log to reflect the new name.
5496 * It will return zero if all goes well, and it will return 1 if a
5497 * full transaction commit is required.
5499 int btrfs_log_new_name(struct btrfs_trans_handle
*trans
,
5500 struct inode
*inode
, struct inode
*old_dir
,
5501 struct dentry
*parent
)
5503 struct btrfs_root
* root
= BTRFS_I(inode
)->root
;
5506 * this will force the logging code to walk the dentry chain
5509 if (S_ISREG(inode
->i_mode
))
5510 BTRFS_I(inode
)->last_unlink_trans
= trans
->transid
;
5513 * if this inode hasn't been logged and directory we're renaming it
5514 * from hasn't been logged, we don't need to log it
5516 if (BTRFS_I(inode
)->logged_trans
<=
5517 root
->fs_info
->last_trans_committed
&&
5518 (!old_dir
|| BTRFS_I(old_dir
)->logged_trans
<=
5519 root
->fs_info
->last_trans_committed
))
5522 return btrfs_log_inode_parent(trans
, root
, inode
, parent
, 0,
5523 LLONG_MAX
, 1, NULL
);