scsi: cxgb4i: fix Tx skb leak
[linux/fpc-iii.git] / fs / btrfs / tree-log.c
blob309313b716171c0ae8bb6b513b47a369a5c829c1
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
23 #include "tree-log.h"
24 #include "disk-io.h"
25 #include "locking.h"
26 #include "print-tree.h"
27 #include "backref.h"
28 #include "hash.h"
29 #include "compression.h"
30 #include "qgroup.h"
32 /* magic values for the inode_only field in btrfs_log_inode:
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 * during log replay
38 #define LOG_INODE_ALL 0
39 #define LOG_INODE_EXISTS 1
40 #define LOG_OTHER_INODE 2
43 * directory trouble cases
45 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
46 * log, we must force a full commit before doing an fsync of the directory
47 * where the unlink was done.
48 * ---> record transid of last unlink/rename per directory
50 * mkdir foo/some_dir
51 * normal commit
52 * rename foo/some_dir foo2/some_dir
53 * mkdir foo/some_dir
54 * fsync foo/some_dir/some_file
56 * The fsync above will unlink the original some_dir without recording
57 * it in its new location (foo2). After a crash, some_dir will be gone
58 * unless the fsync of some_file forces a full commit
60 * 2) we must log any new names for any file or dir that is in the fsync
61 * log. ---> check inode while renaming/linking.
63 * 2a) we must log any new names for any file or dir during rename
64 * when the directory they are being removed from was logged.
65 * ---> check inode and old parent dir during rename
67 * 2a is actually the more important variant. With the extra logging
68 * a crash might unlink the old name without recreating the new one
70 * 3) after a crash, we must go through any directories with a link count
71 * of zero and redo the rm -rf
73 * mkdir f1/foo
74 * normal commit
75 * rm -rf f1/foo
76 * fsync(f1)
78 * The directory f1 was fully removed from the FS, but fsync was never
79 * called on f1, only its parent dir. After a crash the rm -rf must
80 * be replayed. This must be able to recurse down the entire
81 * directory tree. The inode link count fixup code takes care of the
82 * ugly details.
86 * stages for the tree walking. The first
87 * stage (0) is to only pin down the blocks we find
88 * the second stage (1) is to make sure that all the inodes
89 * we find in the log are created in the subvolume.
91 * The last stage is to deal with directories and links and extents
92 * and all the other fun semantics
94 #define LOG_WALK_PIN_ONLY 0
95 #define LOG_WALK_REPLAY_INODES 1
96 #define LOG_WALK_REPLAY_DIR_INDEX 2
97 #define LOG_WALK_REPLAY_ALL 3
99 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
100 struct btrfs_root *root, struct inode *inode,
101 int inode_only,
102 const loff_t start,
103 const loff_t end,
104 struct btrfs_log_ctx *ctx);
105 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
107 struct btrfs_path *path, u64 objectid);
108 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
109 struct btrfs_root *root,
110 struct btrfs_root *log,
111 struct btrfs_path *path,
112 u64 dirid, int del_all);
115 * tree logging is a special write ahead log used to make sure that
116 * fsyncs and O_SYNCs can happen without doing full tree commits.
118 * Full tree commits are expensive because they require commonly
119 * modified blocks to be recowed, creating many dirty pages in the
120 * extent tree an 4x-6x higher write load than ext3.
122 * Instead of doing a tree commit on every fsync, we use the
123 * key ranges and transaction ids to find items for a given file or directory
124 * that have changed in this transaction. Those items are copied into
125 * a special tree (one per subvolume root), that tree is written to disk
126 * and then the fsync is considered complete.
128 * After a crash, items are copied out of the log-tree back into the
129 * subvolume tree. Any file data extents found are recorded in the extent
130 * allocation tree, and the log-tree freed.
132 * The log tree is read three times, once to pin down all the extents it is
133 * using in ram and once, once to create all the inodes logged in the tree
134 * and once to do all the other items.
138 * start a sub transaction and setup the log tree
139 * this increments the log tree writer count to make the people
140 * syncing the tree wait for us to finish
142 static int start_log_trans(struct btrfs_trans_handle *trans,
143 struct btrfs_root *root,
144 struct btrfs_log_ctx *ctx)
146 int ret = 0;
148 mutex_lock(&root->log_mutex);
150 if (root->log_root) {
151 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
152 ret = -EAGAIN;
153 goto out;
156 if (!root->log_start_pid) {
157 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
158 root->log_start_pid = current->pid;
159 } else if (root->log_start_pid != current->pid) {
160 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
162 } else {
163 mutex_lock(&root->fs_info->tree_log_mutex);
164 if (!root->fs_info->log_root_tree)
165 ret = btrfs_init_log_root_tree(trans, root->fs_info);
166 mutex_unlock(&root->fs_info->tree_log_mutex);
167 if (ret)
168 goto out;
170 ret = btrfs_add_log_tree(trans, root);
171 if (ret)
172 goto out;
174 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
175 root->log_start_pid = current->pid;
178 atomic_inc(&root->log_batch);
179 atomic_inc(&root->log_writers);
180 if (ctx) {
181 int index = root->log_transid % 2;
182 list_add_tail(&ctx->list, &root->log_ctxs[index]);
183 ctx->log_transid = root->log_transid;
186 out:
187 mutex_unlock(&root->log_mutex);
188 return ret;
192 * returns 0 if there was a log transaction running and we were able
193 * to join, or returns -ENOENT if there were not transactions
194 * in progress
196 static int join_running_log_trans(struct btrfs_root *root)
198 int ret = -ENOENT;
200 smp_mb();
201 if (!root->log_root)
202 return -ENOENT;
204 mutex_lock(&root->log_mutex);
205 if (root->log_root) {
206 ret = 0;
207 atomic_inc(&root->log_writers);
209 mutex_unlock(&root->log_mutex);
210 return ret;
214 * This either makes the current running log transaction wait
215 * until you call btrfs_end_log_trans() or it makes any future
216 * log transactions wait until you call btrfs_end_log_trans()
218 int btrfs_pin_log_trans(struct btrfs_root *root)
220 int ret = -ENOENT;
222 mutex_lock(&root->log_mutex);
223 atomic_inc(&root->log_writers);
224 mutex_unlock(&root->log_mutex);
225 return ret;
229 * indicate we're done making changes to the log tree
230 * and wake up anyone waiting to do a sync
232 void btrfs_end_log_trans(struct btrfs_root *root)
234 if (atomic_dec_and_test(&root->log_writers)) {
236 * Implicit memory barrier after atomic_dec_and_test
238 if (waitqueue_active(&root->log_writer_wait))
239 wake_up(&root->log_writer_wait);
245 * the walk control struct is used to pass state down the chain when
246 * processing the log tree. The stage field tells us which part
247 * of the log tree processing we are currently doing. The others
248 * are state fields used for that specific part
250 struct walk_control {
251 /* should we free the extent on disk when done? This is used
252 * at transaction commit time while freeing a log tree
254 int free;
256 /* should we write out the extent buffer? This is used
257 * while flushing the log tree to disk during a sync
259 int write;
261 /* should we wait for the extent buffer io to finish? Also used
262 * while flushing the log tree to disk for a sync
264 int wait;
266 /* pin only walk, we record which extents on disk belong to the
267 * log trees
269 int pin;
271 /* what stage of the replay code we're currently in */
272 int stage;
274 /* the root we are currently replaying */
275 struct btrfs_root *replay_dest;
277 /* the trans handle for the current replay */
278 struct btrfs_trans_handle *trans;
280 /* the function that gets used to process blocks we find in the
281 * tree. Note the extent_buffer might not be up to date when it is
282 * passed in, and it must be checked or read if you need the data
283 * inside it
285 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
286 struct walk_control *wc, u64 gen);
290 * process_func used to pin down extents, write them or wait on them
292 static int process_one_buffer(struct btrfs_root *log,
293 struct extent_buffer *eb,
294 struct walk_control *wc, u64 gen)
296 int ret = 0;
299 * If this fs is mixed then we need to be able to process the leaves to
300 * pin down any logged extents, so we have to read the block.
302 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
303 ret = btrfs_read_buffer(eb, gen);
304 if (ret)
305 return ret;
308 if (wc->pin)
309 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
310 eb->start, eb->len);
312 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
313 if (wc->pin && btrfs_header_level(eb) == 0)
314 ret = btrfs_exclude_logged_extents(log, eb);
315 if (wc->write)
316 btrfs_write_tree_block(eb);
317 if (wc->wait)
318 btrfs_wait_tree_block_writeback(eb);
320 return ret;
324 * Item overwrite used by replay and tree logging. eb, slot and key all refer
325 * to the src data we are copying out.
327 * root is the tree we are copying into, and path is a scratch
328 * path for use in this function (it should be released on entry and
329 * will be released on exit).
331 * If the key is already in the destination tree the existing item is
332 * overwritten. If the existing item isn't big enough, it is extended.
333 * If it is too large, it is truncated.
335 * If the key isn't in the destination yet, a new item is inserted.
337 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
338 struct btrfs_root *root,
339 struct btrfs_path *path,
340 struct extent_buffer *eb, int slot,
341 struct btrfs_key *key)
343 int ret;
344 u32 item_size;
345 u64 saved_i_size = 0;
346 int save_old_i_size = 0;
347 unsigned long src_ptr;
348 unsigned long dst_ptr;
349 int overwrite_root = 0;
350 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
352 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
353 overwrite_root = 1;
355 item_size = btrfs_item_size_nr(eb, slot);
356 src_ptr = btrfs_item_ptr_offset(eb, slot);
358 /* look for the key in the destination tree */
359 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
360 if (ret < 0)
361 return ret;
363 if (ret == 0) {
364 char *src_copy;
365 char *dst_copy;
366 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
367 path->slots[0]);
368 if (dst_size != item_size)
369 goto insert;
371 if (item_size == 0) {
372 btrfs_release_path(path);
373 return 0;
375 dst_copy = kmalloc(item_size, GFP_NOFS);
376 src_copy = kmalloc(item_size, GFP_NOFS);
377 if (!dst_copy || !src_copy) {
378 btrfs_release_path(path);
379 kfree(dst_copy);
380 kfree(src_copy);
381 return -ENOMEM;
384 read_extent_buffer(eb, src_copy, src_ptr, item_size);
386 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
387 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
388 item_size);
389 ret = memcmp(dst_copy, src_copy, item_size);
391 kfree(dst_copy);
392 kfree(src_copy);
394 * they have the same contents, just return, this saves
395 * us from cowing blocks in the destination tree and doing
396 * extra writes that may not have been done by a previous
397 * sync
399 if (ret == 0) {
400 btrfs_release_path(path);
401 return 0;
405 * We need to load the old nbytes into the inode so when we
406 * replay the extents we've logged we get the right nbytes.
408 if (inode_item) {
409 struct btrfs_inode_item *item;
410 u64 nbytes;
411 u32 mode;
413 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
414 struct btrfs_inode_item);
415 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
416 item = btrfs_item_ptr(eb, slot,
417 struct btrfs_inode_item);
418 btrfs_set_inode_nbytes(eb, item, nbytes);
421 * If this is a directory we need to reset the i_size to
422 * 0 so that we can set it up properly when replaying
423 * the rest of the items in this log.
425 mode = btrfs_inode_mode(eb, item);
426 if (S_ISDIR(mode))
427 btrfs_set_inode_size(eb, item, 0);
429 } else if (inode_item) {
430 struct btrfs_inode_item *item;
431 u32 mode;
434 * New inode, set nbytes to 0 so that the nbytes comes out
435 * properly when we replay the extents.
437 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
438 btrfs_set_inode_nbytes(eb, item, 0);
441 * If this is a directory we need to reset the i_size to 0 so
442 * that we can set it up properly when replaying the rest of
443 * the items in this log.
445 mode = btrfs_inode_mode(eb, item);
446 if (S_ISDIR(mode))
447 btrfs_set_inode_size(eb, item, 0);
449 insert:
450 btrfs_release_path(path);
451 /* try to insert the key into the destination tree */
452 path->skip_release_on_error = 1;
453 ret = btrfs_insert_empty_item(trans, root, path,
454 key, item_size);
455 path->skip_release_on_error = 0;
457 /* make sure any existing item is the correct size */
458 if (ret == -EEXIST || ret == -EOVERFLOW) {
459 u32 found_size;
460 found_size = btrfs_item_size_nr(path->nodes[0],
461 path->slots[0]);
462 if (found_size > item_size)
463 btrfs_truncate_item(root, path, item_size, 1);
464 else if (found_size < item_size)
465 btrfs_extend_item(root, path,
466 item_size - found_size);
467 } else if (ret) {
468 return ret;
470 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
471 path->slots[0]);
473 /* don't overwrite an existing inode if the generation number
474 * was logged as zero. This is done when the tree logging code
475 * is just logging an inode to make sure it exists after recovery.
477 * Also, don't overwrite i_size on directories during replay.
478 * log replay inserts and removes directory items based on the
479 * state of the tree found in the subvolume, and i_size is modified
480 * as it goes
482 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
483 struct btrfs_inode_item *src_item;
484 struct btrfs_inode_item *dst_item;
486 src_item = (struct btrfs_inode_item *)src_ptr;
487 dst_item = (struct btrfs_inode_item *)dst_ptr;
489 if (btrfs_inode_generation(eb, src_item) == 0) {
490 struct extent_buffer *dst_eb = path->nodes[0];
491 const u64 ino_size = btrfs_inode_size(eb, src_item);
494 * For regular files an ino_size == 0 is used only when
495 * logging that an inode exists, as part of a directory
496 * fsync, and the inode wasn't fsynced before. In this
497 * case don't set the size of the inode in the fs/subvol
498 * tree, otherwise we would be throwing valid data away.
500 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
501 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
502 ino_size != 0) {
503 struct btrfs_map_token token;
505 btrfs_init_map_token(&token);
506 btrfs_set_token_inode_size(dst_eb, dst_item,
507 ino_size, &token);
509 goto no_copy;
512 if (overwrite_root &&
513 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
514 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
515 save_old_i_size = 1;
516 saved_i_size = btrfs_inode_size(path->nodes[0],
517 dst_item);
521 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
522 src_ptr, item_size);
524 if (save_old_i_size) {
525 struct btrfs_inode_item *dst_item;
526 dst_item = (struct btrfs_inode_item *)dst_ptr;
527 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
530 /* make sure the generation is filled in */
531 if (key->type == BTRFS_INODE_ITEM_KEY) {
532 struct btrfs_inode_item *dst_item;
533 dst_item = (struct btrfs_inode_item *)dst_ptr;
534 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
535 btrfs_set_inode_generation(path->nodes[0], dst_item,
536 trans->transid);
539 no_copy:
540 btrfs_mark_buffer_dirty(path->nodes[0]);
541 btrfs_release_path(path);
542 return 0;
546 * simple helper to read an inode off the disk from a given root
547 * This can only be called for subvolume roots and not for the log
549 static noinline struct inode *read_one_inode(struct btrfs_root *root,
550 u64 objectid)
552 struct btrfs_key key;
553 struct inode *inode;
555 key.objectid = objectid;
556 key.type = BTRFS_INODE_ITEM_KEY;
557 key.offset = 0;
558 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
559 if (IS_ERR(inode)) {
560 inode = NULL;
561 } else if (is_bad_inode(inode)) {
562 iput(inode);
563 inode = NULL;
565 return inode;
568 /* replays a single extent in 'eb' at 'slot' with 'key' into the
569 * subvolume 'root'. path is released on entry and should be released
570 * on exit.
572 * extents in the log tree have not been allocated out of the extent
573 * tree yet. So, this completes the allocation, taking a reference
574 * as required if the extent already exists or creating a new extent
575 * if it isn't in the extent allocation tree yet.
577 * The extent is inserted into the file, dropping any existing extents
578 * from the file that overlap the new one.
580 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
581 struct btrfs_root *root,
582 struct btrfs_path *path,
583 struct extent_buffer *eb, int slot,
584 struct btrfs_key *key)
586 int found_type;
587 u64 extent_end;
588 u64 start = key->offset;
589 u64 nbytes = 0;
590 struct btrfs_file_extent_item *item;
591 struct inode *inode = NULL;
592 unsigned long size;
593 int ret = 0;
595 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
596 found_type = btrfs_file_extent_type(eb, item);
598 if (found_type == BTRFS_FILE_EXTENT_REG ||
599 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
600 nbytes = btrfs_file_extent_num_bytes(eb, item);
601 extent_end = start + nbytes;
604 * We don't add to the inodes nbytes if we are prealloc or a
605 * hole.
607 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
608 nbytes = 0;
609 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
610 size = btrfs_file_extent_inline_len(eb, slot, item);
611 nbytes = btrfs_file_extent_ram_bytes(eb, item);
612 extent_end = ALIGN(start + size, root->sectorsize);
613 } else {
614 ret = 0;
615 goto out;
618 inode = read_one_inode(root, key->objectid);
619 if (!inode) {
620 ret = -EIO;
621 goto out;
625 * first check to see if we already have this extent in the
626 * file. This must be done before the btrfs_drop_extents run
627 * so we don't try to drop this extent.
629 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
630 start, 0);
632 if (ret == 0 &&
633 (found_type == BTRFS_FILE_EXTENT_REG ||
634 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
635 struct btrfs_file_extent_item cmp1;
636 struct btrfs_file_extent_item cmp2;
637 struct btrfs_file_extent_item *existing;
638 struct extent_buffer *leaf;
640 leaf = path->nodes[0];
641 existing = btrfs_item_ptr(leaf, path->slots[0],
642 struct btrfs_file_extent_item);
644 read_extent_buffer(eb, &cmp1, (unsigned long)item,
645 sizeof(cmp1));
646 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
647 sizeof(cmp2));
650 * we already have a pointer to this exact extent,
651 * we don't have to do anything
653 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
654 btrfs_release_path(path);
655 goto out;
658 btrfs_release_path(path);
660 /* drop any overlapping extents */
661 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
662 if (ret)
663 goto out;
665 if (found_type == BTRFS_FILE_EXTENT_REG ||
666 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
667 u64 offset;
668 unsigned long dest_offset;
669 struct btrfs_key ins;
671 ret = btrfs_insert_empty_item(trans, root, path, key,
672 sizeof(*item));
673 if (ret)
674 goto out;
675 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
676 path->slots[0]);
677 copy_extent_buffer(path->nodes[0], eb, dest_offset,
678 (unsigned long)item, sizeof(*item));
680 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
681 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
682 ins.type = BTRFS_EXTENT_ITEM_KEY;
683 offset = key->offset - btrfs_file_extent_offset(eb, item);
686 * Manually record dirty extent, as here we did a shallow
687 * file extent item copy and skip normal backref update,
688 * but modifying extent tree all by ourselves.
689 * So need to manually record dirty extent for qgroup,
690 * as the owner of the file extent changed from log tree
691 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
693 ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
694 btrfs_file_extent_disk_bytenr(eb, item),
695 btrfs_file_extent_disk_num_bytes(eb, item),
696 GFP_NOFS);
697 if (ret < 0)
698 goto out;
700 if (ins.objectid > 0) {
701 u64 csum_start;
702 u64 csum_end;
703 LIST_HEAD(ordered_sums);
705 * is this extent already allocated in the extent
706 * allocation tree? If so, just add a reference
708 ret = btrfs_lookup_data_extent(root, ins.objectid,
709 ins.offset);
710 if (ret == 0) {
711 ret = btrfs_inc_extent_ref(trans, root,
712 ins.objectid, ins.offset,
713 0, root->root_key.objectid,
714 key->objectid, offset);
715 if (ret)
716 goto out;
717 } else {
719 * insert the extent pointer in the extent
720 * allocation tree
722 ret = btrfs_alloc_logged_file_extent(trans,
723 root, root->root_key.objectid,
724 key->objectid, offset, &ins);
725 if (ret)
726 goto out;
728 btrfs_release_path(path);
730 if (btrfs_file_extent_compression(eb, item)) {
731 csum_start = ins.objectid;
732 csum_end = csum_start + ins.offset;
733 } else {
734 csum_start = ins.objectid +
735 btrfs_file_extent_offset(eb, item);
736 csum_end = csum_start +
737 btrfs_file_extent_num_bytes(eb, item);
740 ret = btrfs_lookup_csums_range(root->log_root,
741 csum_start, csum_end - 1,
742 &ordered_sums, 0);
743 if (ret)
744 goto out;
746 * Now delete all existing cums in the csum root that
747 * cover our range. We do this because we can have an
748 * extent that is completely referenced by one file
749 * extent item and partially referenced by another
750 * file extent item (like after using the clone or
751 * extent_same ioctls). In this case if we end up doing
752 * the replay of the one that partially references the
753 * extent first, and we do not do the csum deletion
754 * below, we can get 2 csum items in the csum tree that
755 * overlap each other. For example, imagine our log has
756 * the two following file extent items:
758 * key (257 EXTENT_DATA 409600)
759 * extent data disk byte 12845056 nr 102400
760 * extent data offset 20480 nr 20480 ram 102400
762 * key (257 EXTENT_DATA 819200)
763 * extent data disk byte 12845056 nr 102400
764 * extent data offset 0 nr 102400 ram 102400
766 * Where the second one fully references the 100K extent
767 * that starts at disk byte 12845056, and the log tree
768 * has a single csum item that covers the entire range
769 * of the extent:
771 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
773 * After the first file extent item is replayed, the
774 * csum tree gets the following csum item:
776 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
778 * Which covers the 20K sub-range starting at offset 20K
779 * of our extent. Now when we replay the second file
780 * extent item, if we do not delete existing csum items
781 * that cover any of its blocks, we end up getting two
782 * csum items in our csum tree that overlap each other:
784 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
785 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
787 * Which is a problem, because after this anyone trying
788 * to lookup up for the checksum of any block of our
789 * extent starting at an offset of 40K or higher, will
790 * end up looking at the second csum item only, which
791 * does not contain the checksum for any block starting
792 * at offset 40K or higher of our extent.
794 while (!list_empty(&ordered_sums)) {
795 struct btrfs_ordered_sum *sums;
796 sums = list_entry(ordered_sums.next,
797 struct btrfs_ordered_sum,
798 list);
799 if (!ret)
800 ret = btrfs_del_csums(trans,
801 root->fs_info->csum_root,
802 sums->bytenr,
803 sums->len);
804 if (!ret)
805 ret = btrfs_csum_file_blocks(trans,
806 root->fs_info->csum_root,
807 sums);
808 list_del(&sums->list);
809 kfree(sums);
811 if (ret)
812 goto out;
813 } else {
814 btrfs_release_path(path);
816 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
817 /* inline extents are easy, we just overwrite them */
818 ret = overwrite_item(trans, root, path, eb, slot, key);
819 if (ret)
820 goto out;
823 inode_add_bytes(inode, nbytes);
824 ret = btrfs_update_inode(trans, root, inode);
825 out:
826 if (inode)
827 iput(inode);
828 return ret;
832 * when cleaning up conflicts between the directory names in the
833 * subvolume, directory names in the log and directory names in the
834 * inode back references, we may have to unlink inodes from directories.
836 * This is a helper function to do the unlink of a specific directory
837 * item
839 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
840 struct btrfs_root *root,
841 struct btrfs_path *path,
842 struct inode *dir,
843 struct btrfs_dir_item *di)
845 struct inode *inode;
846 char *name;
847 int name_len;
848 struct extent_buffer *leaf;
849 struct btrfs_key location;
850 int ret;
852 leaf = path->nodes[0];
854 btrfs_dir_item_key_to_cpu(leaf, di, &location);
855 name_len = btrfs_dir_name_len(leaf, di);
856 name = kmalloc(name_len, GFP_NOFS);
857 if (!name)
858 return -ENOMEM;
860 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
861 btrfs_release_path(path);
863 inode = read_one_inode(root, location.objectid);
864 if (!inode) {
865 ret = -EIO;
866 goto out;
869 ret = link_to_fixup_dir(trans, root, path, location.objectid);
870 if (ret)
871 goto out;
873 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
874 if (ret)
875 goto out;
876 else
877 ret = btrfs_run_delayed_items(trans, root);
878 out:
879 kfree(name);
880 iput(inode);
881 return ret;
885 * helper function to see if a given name and sequence number found
886 * in an inode back reference are already in a directory and correctly
887 * point to this inode
889 static noinline int inode_in_dir(struct btrfs_root *root,
890 struct btrfs_path *path,
891 u64 dirid, u64 objectid, u64 index,
892 const char *name, int name_len)
894 struct btrfs_dir_item *di;
895 struct btrfs_key location;
896 int match = 0;
898 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
899 index, name, name_len, 0);
900 if (di && !IS_ERR(di)) {
901 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
902 if (location.objectid != objectid)
903 goto out;
904 } else
905 goto out;
906 btrfs_release_path(path);
908 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
909 if (di && !IS_ERR(di)) {
910 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
911 if (location.objectid != objectid)
912 goto out;
913 } else
914 goto out;
915 match = 1;
916 out:
917 btrfs_release_path(path);
918 return match;
922 * helper function to check a log tree for a named back reference in
923 * an inode. This is used to decide if a back reference that is
924 * found in the subvolume conflicts with what we find in the log.
926 * inode backreferences may have multiple refs in a single item,
927 * during replay we process one reference at a time, and we don't
928 * want to delete valid links to a file from the subvolume if that
929 * link is also in the log.
931 static noinline int backref_in_log(struct btrfs_root *log,
932 struct btrfs_key *key,
933 u64 ref_objectid,
934 const char *name, int namelen)
936 struct btrfs_path *path;
937 struct btrfs_inode_ref *ref;
938 unsigned long ptr;
939 unsigned long ptr_end;
940 unsigned long name_ptr;
941 int found_name_len;
942 int item_size;
943 int ret;
944 int match = 0;
946 path = btrfs_alloc_path();
947 if (!path)
948 return -ENOMEM;
950 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
951 if (ret != 0)
952 goto out;
954 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
956 if (key->type == BTRFS_INODE_EXTREF_KEY) {
957 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
958 name, namelen, NULL))
959 match = 1;
961 goto out;
964 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
965 ptr_end = ptr + item_size;
966 while (ptr < ptr_end) {
967 ref = (struct btrfs_inode_ref *)ptr;
968 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
969 if (found_name_len == namelen) {
970 name_ptr = (unsigned long)(ref + 1);
971 ret = memcmp_extent_buffer(path->nodes[0], name,
972 name_ptr, namelen);
973 if (ret == 0) {
974 match = 1;
975 goto out;
978 ptr = (unsigned long)(ref + 1) + found_name_len;
980 out:
981 btrfs_free_path(path);
982 return match;
985 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
986 struct btrfs_root *root,
987 struct btrfs_path *path,
988 struct btrfs_root *log_root,
989 struct inode *dir, struct inode *inode,
990 struct extent_buffer *eb,
991 u64 inode_objectid, u64 parent_objectid,
992 u64 ref_index, char *name, int namelen,
993 int *search_done)
995 int ret;
996 char *victim_name;
997 int victim_name_len;
998 struct extent_buffer *leaf;
999 struct btrfs_dir_item *di;
1000 struct btrfs_key search_key;
1001 struct btrfs_inode_extref *extref;
1003 again:
1004 /* Search old style refs */
1005 search_key.objectid = inode_objectid;
1006 search_key.type = BTRFS_INODE_REF_KEY;
1007 search_key.offset = parent_objectid;
1008 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1009 if (ret == 0) {
1010 struct btrfs_inode_ref *victim_ref;
1011 unsigned long ptr;
1012 unsigned long ptr_end;
1014 leaf = path->nodes[0];
1016 /* are we trying to overwrite a back ref for the root directory
1017 * if so, just jump out, we're done
1019 if (search_key.objectid == search_key.offset)
1020 return 1;
1022 /* check all the names in this back reference to see
1023 * if they are in the log. if so, we allow them to stay
1024 * otherwise they must be unlinked as a conflict
1026 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1027 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1028 while (ptr < ptr_end) {
1029 victim_ref = (struct btrfs_inode_ref *)ptr;
1030 victim_name_len = btrfs_inode_ref_name_len(leaf,
1031 victim_ref);
1032 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1033 if (!victim_name)
1034 return -ENOMEM;
1036 read_extent_buffer(leaf, victim_name,
1037 (unsigned long)(victim_ref + 1),
1038 victim_name_len);
1040 if (!backref_in_log(log_root, &search_key,
1041 parent_objectid,
1042 victim_name,
1043 victim_name_len)) {
1044 inc_nlink(inode);
1045 btrfs_release_path(path);
1047 ret = btrfs_unlink_inode(trans, root, dir,
1048 inode, victim_name,
1049 victim_name_len);
1050 kfree(victim_name);
1051 if (ret)
1052 return ret;
1053 ret = btrfs_run_delayed_items(trans, root);
1054 if (ret)
1055 return ret;
1056 *search_done = 1;
1057 goto again;
1059 kfree(victim_name);
1061 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1065 * NOTE: we have searched root tree and checked the
1066 * corresponding ref, it does not need to check again.
1068 *search_done = 1;
1070 btrfs_release_path(path);
1072 /* Same search but for extended refs */
1073 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1074 inode_objectid, parent_objectid, 0,
1076 if (!IS_ERR_OR_NULL(extref)) {
1077 u32 item_size;
1078 u32 cur_offset = 0;
1079 unsigned long base;
1080 struct inode *victim_parent;
1082 leaf = path->nodes[0];
1084 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1085 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1087 while (cur_offset < item_size) {
1088 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1090 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1092 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1093 goto next;
1095 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1096 if (!victim_name)
1097 return -ENOMEM;
1098 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1099 victim_name_len);
1101 search_key.objectid = inode_objectid;
1102 search_key.type = BTRFS_INODE_EXTREF_KEY;
1103 search_key.offset = btrfs_extref_hash(parent_objectid,
1104 victim_name,
1105 victim_name_len);
1106 ret = 0;
1107 if (!backref_in_log(log_root, &search_key,
1108 parent_objectid, victim_name,
1109 victim_name_len)) {
1110 ret = -ENOENT;
1111 victim_parent = read_one_inode(root,
1112 parent_objectid);
1113 if (victim_parent) {
1114 inc_nlink(inode);
1115 btrfs_release_path(path);
1117 ret = btrfs_unlink_inode(trans, root,
1118 victim_parent,
1119 inode,
1120 victim_name,
1121 victim_name_len);
1122 if (!ret)
1123 ret = btrfs_run_delayed_items(
1124 trans, root);
1126 iput(victim_parent);
1127 kfree(victim_name);
1128 if (ret)
1129 return ret;
1130 *search_done = 1;
1131 goto again;
1133 kfree(victim_name);
1134 if (ret)
1135 return ret;
1136 next:
1137 cur_offset += victim_name_len + sizeof(*extref);
1139 *search_done = 1;
1141 btrfs_release_path(path);
1143 /* look for a conflicting sequence number */
1144 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1145 ref_index, name, namelen, 0);
1146 if (di && !IS_ERR(di)) {
1147 ret = drop_one_dir_item(trans, root, path, dir, di);
1148 if (ret)
1149 return ret;
1151 btrfs_release_path(path);
1153 /* look for a conflicing name */
1154 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1155 name, namelen, 0);
1156 if (di && !IS_ERR(di)) {
1157 ret = drop_one_dir_item(trans, root, path, dir, di);
1158 if (ret)
1159 return ret;
1161 btrfs_release_path(path);
1163 return 0;
1166 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1167 u32 *namelen, char **name, u64 *index,
1168 u64 *parent_objectid)
1170 struct btrfs_inode_extref *extref;
1172 extref = (struct btrfs_inode_extref *)ref_ptr;
1174 *namelen = btrfs_inode_extref_name_len(eb, extref);
1175 *name = kmalloc(*namelen, GFP_NOFS);
1176 if (*name == NULL)
1177 return -ENOMEM;
1179 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1180 *namelen);
1182 *index = btrfs_inode_extref_index(eb, extref);
1183 if (parent_objectid)
1184 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1186 return 0;
1189 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1190 u32 *namelen, char **name, u64 *index)
1192 struct btrfs_inode_ref *ref;
1194 ref = (struct btrfs_inode_ref *)ref_ptr;
1196 *namelen = btrfs_inode_ref_name_len(eb, ref);
1197 *name = kmalloc(*namelen, GFP_NOFS);
1198 if (*name == NULL)
1199 return -ENOMEM;
1201 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1203 *index = btrfs_inode_ref_index(eb, ref);
1205 return 0;
1209 * replay one inode back reference item found in the log tree.
1210 * eb, slot and key refer to the buffer and key found in the log tree.
1211 * root is the destination we are replaying into, and path is for temp
1212 * use by this function. (it should be released on return).
1214 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1215 struct btrfs_root *root,
1216 struct btrfs_root *log,
1217 struct btrfs_path *path,
1218 struct extent_buffer *eb, int slot,
1219 struct btrfs_key *key)
1221 struct inode *dir = NULL;
1222 struct inode *inode = NULL;
1223 unsigned long ref_ptr;
1224 unsigned long ref_end;
1225 char *name = NULL;
1226 int namelen;
1227 int ret;
1228 int search_done = 0;
1229 int log_ref_ver = 0;
1230 u64 parent_objectid;
1231 u64 inode_objectid;
1232 u64 ref_index = 0;
1233 int ref_struct_size;
1235 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1236 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1238 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1239 struct btrfs_inode_extref *r;
1241 ref_struct_size = sizeof(struct btrfs_inode_extref);
1242 log_ref_ver = 1;
1243 r = (struct btrfs_inode_extref *)ref_ptr;
1244 parent_objectid = btrfs_inode_extref_parent(eb, r);
1245 } else {
1246 ref_struct_size = sizeof(struct btrfs_inode_ref);
1247 parent_objectid = key->offset;
1249 inode_objectid = key->objectid;
1252 * it is possible that we didn't log all the parent directories
1253 * for a given inode. If we don't find the dir, just don't
1254 * copy the back ref in. The link count fixup code will take
1255 * care of the rest
1257 dir = read_one_inode(root, parent_objectid);
1258 if (!dir) {
1259 ret = -ENOENT;
1260 goto out;
1263 inode = read_one_inode(root, inode_objectid);
1264 if (!inode) {
1265 ret = -EIO;
1266 goto out;
1269 while (ref_ptr < ref_end) {
1270 if (log_ref_ver) {
1271 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1272 &ref_index, &parent_objectid);
1274 * parent object can change from one array
1275 * item to another.
1277 if (!dir)
1278 dir = read_one_inode(root, parent_objectid);
1279 if (!dir) {
1280 ret = -ENOENT;
1281 goto out;
1283 } else {
1284 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1285 &ref_index);
1287 if (ret)
1288 goto out;
1290 /* if we already have a perfect match, we're done */
1291 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1292 ref_index, name, namelen)) {
1294 * look for a conflicting back reference in the
1295 * metadata. if we find one we have to unlink that name
1296 * of the file before we add our new link. Later on, we
1297 * overwrite any existing back reference, and we don't
1298 * want to create dangling pointers in the directory.
1301 if (!search_done) {
1302 ret = __add_inode_ref(trans, root, path, log,
1303 dir, inode, eb,
1304 inode_objectid,
1305 parent_objectid,
1306 ref_index, name, namelen,
1307 &search_done);
1308 if (ret) {
1309 if (ret == 1)
1310 ret = 0;
1311 goto out;
1315 /* insert our name */
1316 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1317 0, ref_index);
1318 if (ret)
1319 goto out;
1321 btrfs_update_inode(trans, root, inode);
1324 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1325 kfree(name);
1326 name = NULL;
1327 if (log_ref_ver) {
1328 iput(dir);
1329 dir = NULL;
1333 /* finally write the back reference in the inode */
1334 ret = overwrite_item(trans, root, path, eb, slot, key);
1335 out:
1336 btrfs_release_path(path);
1337 kfree(name);
1338 iput(dir);
1339 iput(inode);
1340 return ret;
1343 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1344 struct btrfs_root *root, u64 ino)
1346 int ret;
1348 ret = btrfs_insert_orphan_item(trans, root, ino);
1349 if (ret == -EEXIST)
1350 ret = 0;
1352 return ret;
1355 static int count_inode_extrefs(struct btrfs_root *root,
1356 struct inode *inode, struct btrfs_path *path)
1358 int ret = 0;
1359 int name_len;
1360 unsigned int nlink = 0;
1361 u32 item_size;
1362 u32 cur_offset = 0;
1363 u64 inode_objectid = btrfs_ino(inode);
1364 u64 offset = 0;
1365 unsigned long ptr;
1366 struct btrfs_inode_extref *extref;
1367 struct extent_buffer *leaf;
1369 while (1) {
1370 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1371 &extref, &offset);
1372 if (ret)
1373 break;
1375 leaf = path->nodes[0];
1376 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1377 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1378 cur_offset = 0;
1380 while (cur_offset < item_size) {
1381 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1382 name_len = btrfs_inode_extref_name_len(leaf, extref);
1384 nlink++;
1386 cur_offset += name_len + sizeof(*extref);
1389 offset++;
1390 btrfs_release_path(path);
1392 btrfs_release_path(path);
1394 if (ret < 0 && ret != -ENOENT)
1395 return ret;
1396 return nlink;
1399 static int count_inode_refs(struct btrfs_root *root,
1400 struct inode *inode, struct btrfs_path *path)
1402 int ret;
1403 struct btrfs_key key;
1404 unsigned int nlink = 0;
1405 unsigned long ptr;
1406 unsigned long ptr_end;
1407 int name_len;
1408 u64 ino = btrfs_ino(inode);
1410 key.objectid = ino;
1411 key.type = BTRFS_INODE_REF_KEY;
1412 key.offset = (u64)-1;
1414 while (1) {
1415 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1416 if (ret < 0)
1417 break;
1418 if (ret > 0) {
1419 if (path->slots[0] == 0)
1420 break;
1421 path->slots[0]--;
1423 process_slot:
1424 btrfs_item_key_to_cpu(path->nodes[0], &key,
1425 path->slots[0]);
1426 if (key.objectid != ino ||
1427 key.type != BTRFS_INODE_REF_KEY)
1428 break;
1429 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1430 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1431 path->slots[0]);
1432 while (ptr < ptr_end) {
1433 struct btrfs_inode_ref *ref;
1435 ref = (struct btrfs_inode_ref *)ptr;
1436 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1437 ref);
1438 ptr = (unsigned long)(ref + 1) + name_len;
1439 nlink++;
1442 if (key.offset == 0)
1443 break;
1444 if (path->slots[0] > 0) {
1445 path->slots[0]--;
1446 goto process_slot;
1448 key.offset--;
1449 btrfs_release_path(path);
1451 btrfs_release_path(path);
1453 return nlink;
1457 * There are a few corners where the link count of the file can't
1458 * be properly maintained during replay. So, instead of adding
1459 * lots of complexity to the log code, we just scan the backrefs
1460 * for any file that has been through replay.
1462 * The scan will update the link count on the inode to reflect the
1463 * number of back refs found. If it goes down to zero, the iput
1464 * will free the inode.
1466 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1467 struct btrfs_root *root,
1468 struct inode *inode)
1470 struct btrfs_path *path;
1471 int ret;
1472 u64 nlink = 0;
1473 u64 ino = btrfs_ino(inode);
1475 path = btrfs_alloc_path();
1476 if (!path)
1477 return -ENOMEM;
1479 ret = count_inode_refs(root, inode, path);
1480 if (ret < 0)
1481 goto out;
1483 nlink = ret;
1485 ret = count_inode_extrefs(root, inode, path);
1486 if (ret < 0)
1487 goto out;
1489 nlink += ret;
1491 ret = 0;
1493 if (nlink != inode->i_nlink) {
1494 set_nlink(inode, nlink);
1495 btrfs_update_inode(trans, root, inode);
1497 BTRFS_I(inode)->index_cnt = (u64)-1;
1499 if (inode->i_nlink == 0) {
1500 if (S_ISDIR(inode->i_mode)) {
1501 ret = replay_dir_deletes(trans, root, NULL, path,
1502 ino, 1);
1503 if (ret)
1504 goto out;
1506 ret = insert_orphan_item(trans, root, ino);
1509 out:
1510 btrfs_free_path(path);
1511 return ret;
1514 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1515 struct btrfs_root *root,
1516 struct btrfs_path *path)
1518 int ret;
1519 struct btrfs_key key;
1520 struct inode *inode;
1522 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1523 key.type = BTRFS_ORPHAN_ITEM_KEY;
1524 key.offset = (u64)-1;
1525 while (1) {
1526 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1527 if (ret < 0)
1528 break;
1530 if (ret == 1) {
1531 if (path->slots[0] == 0)
1532 break;
1533 path->slots[0]--;
1536 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1537 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1538 key.type != BTRFS_ORPHAN_ITEM_KEY)
1539 break;
1541 ret = btrfs_del_item(trans, root, path);
1542 if (ret)
1543 goto out;
1545 btrfs_release_path(path);
1546 inode = read_one_inode(root, key.offset);
1547 if (!inode)
1548 return -EIO;
1550 ret = fixup_inode_link_count(trans, root, inode);
1551 iput(inode);
1552 if (ret)
1553 goto out;
1556 * fixup on a directory may create new entries,
1557 * make sure we always look for the highset possible
1558 * offset
1560 key.offset = (u64)-1;
1562 ret = 0;
1563 out:
1564 btrfs_release_path(path);
1565 return ret;
1570 * record a given inode in the fixup dir so we can check its link
1571 * count when replay is done. The link count is incremented here
1572 * so the inode won't go away until we check it
1574 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1575 struct btrfs_root *root,
1576 struct btrfs_path *path,
1577 u64 objectid)
1579 struct btrfs_key key;
1580 int ret = 0;
1581 struct inode *inode;
1583 inode = read_one_inode(root, objectid);
1584 if (!inode)
1585 return -EIO;
1587 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1588 key.type = BTRFS_ORPHAN_ITEM_KEY;
1589 key.offset = objectid;
1591 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1593 btrfs_release_path(path);
1594 if (ret == 0) {
1595 if (!inode->i_nlink)
1596 set_nlink(inode, 1);
1597 else
1598 inc_nlink(inode);
1599 ret = btrfs_update_inode(trans, root, inode);
1600 } else if (ret == -EEXIST) {
1601 ret = 0;
1602 } else {
1603 BUG(); /* Logic Error */
1605 iput(inode);
1607 return ret;
1611 * when replaying the log for a directory, we only insert names
1612 * for inodes that actually exist. This means an fsync on a directory
1613 * does not implicitly fsync all the new files in it
1615 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1616 struct btrfs_root *root,
1617 u64 dirid, u64 index,
1618 char *name, int name_len,
1619 struct btrfs_key *location)
1621 struct inode *inode;
1622 struct inode *dir;
1623 int ret;
1625 inode = read_one_inode(root, location->objectid);
1626 if (!inode)
1627 return -ENOENT;
1629 dir = read_one_inode(root, dirid);
1630 if (!dir) {
1631 iput(inode);
1632 return -EIO;
1635 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1637 /* FIXME, put inode into FIXUP list */
1639 iput(inode);
1640 iput(dir);
1641 return ret;
1645 * Return true if an inode reference exists in the log for the given name,
1646 * inode and parent inode.
1648 static bool name_in_log_ref(struct btrfs_root *log_root,
1649 const char *name, const int name_len,
1650 const u64 dirid, const u64 ino)
1652 struct btrfs_key search_key;
1654 search_key.objectid = ino;
1655 search_key.type = BTRFS_INODE_REF_KEY;
1656 search_key.offset = dirid;
1657 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1658 return true;
1660 search_key.type = BTRFS_INODE_EXTREF_KEY;
1661 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1662 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1663 return true;
1665 return false;
1669 * take a single entry in a log directory item and replay it into
1670 * the subvolume.
1672 * if a conflicting item exists in the subdirectory already,
1673 * the inode it points to is unlinked and put into the link count
1674 * fix up tree.
1676 * If a name from the log points to a file or directory that does
1677 * not exist in the FS, it is skipped. fsyncs on directories
1678 * do not force down inodes inside that directory, just changes to the
1679 * names or unlinks in a directory.
1681 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1682 * non-existing inode) and 1 if the name was replayed.
1684 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1685 struct btrfs_root *root,
1686 struct btrfs_path *path,
1687 struct extent_buffer *eb,
1688 struct btrfs_dir_item *di,
1689 struct btrfs_key *key)
1691 char *name;
1692 int name_len;
1693 struct btrfs_dir_item *dst_di;
1694 struct btrfs_key found_key;
1695 struct btrfs_key log_key;
1696 struct inode *dir;
1697 u8 log_type;
1698 int exists;
1699 int ret = 0;
1700 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1701 bool name_added = false;
1703 dir = read_one_inode(root, key->objectid);
1704 if (!dir)
1705 return -EIO;
1707 name_len = btrfs_dir_name_len(eb, di);
1708 name = kmalloc(name_len, GFP_NOFS);
1709 if (!name) {
1710 ret = -ENOMEM;
1711 goto out;
1714 log_type = btrfs_dir_type(eb, di);
1715 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1716 name_len);
1718 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1719 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1720 if (exists == 0)
1721 exists = 1;
1722 else
1723 exists = 0;
1724 btrfs_release_path(path);
1726 if (key->type == BTRFS_DIR_ITEM_KEY) {
1727 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1728 name, name_len, 1);
1729 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1730 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1731 key->objectid,
1732 key->offset, name,
1733 name_len, 1);
1734 } else {
1735 /* Corruption */
1736 ret = -EINVAL;
1737 goto out;
1739 if (IS_ERR_OR_NULL(dst_di)) {
1740 /* we need a sequence number to insert, so we only
1741 * do inserts for the BTRFS_DIR_INDEX_KEY types
1743 if (key->type != BTRFS_DIR_INDEX_KEY)
1744 goto out;
1745 goto insert;
1748 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1749 /* the existing item matches the logged item */
1750 if (found_key.objectid == log_key.objectid &&
1751 found_key.type == log_key.type &&
1752 found_key.offset == log_key.offset &&
1753 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1754 update_size = false;
1755 goto out;
1759 * don't drop the conflicting directory entry if the inode
1760 * for the new entry doesn't exist
1762 if (!exists)
1763 goto out;
1765 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1766 if (ret)
1767 goto out;
1769 if (key->type == BTRFS_DIR_INDEX_KEY)
1770 goto insert;
1771 out:
1772 btrfs_release_path(path);
1773 if (!ret && update_size) {
1774 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1775 ret = btrfs_update_inode(trans, root, dir);
1777 kfree(name);
1778 iput(dir);
1779 if (!ret && name_added)
1780 ret = 1;
1781 return ret;
1783 insert:
1784 if (name_in_log_ref(root->log_root, name, name_len,
1785 key->objectid, log_key.objectid)) {
1786 /* The dentry will be added later. */
1787 ret = 0;
1788 update_size = false;
1789 goto out;
1791 btrfs_release_path(path);
1792 ret = insert_one_name(trans, root, key->objectid, key->offset,
1793 name, name_len, &log_key);
1794 if (ret && ret != -ENOENT && ret != -EEXIST)
1795 goto out;
1796 if (!ret)
1797 name_added = true;
1798 update_size = false;
1799 ret = 0;
1800 goto out;
1804 * find all the names in a directory item and reconcile them into
1805 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1806 * one name in a directory item, but the same code gets used for
1807 * both directory index types
1809 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1810 struct btrfs_root *root,
1811 struct btrfs_path *path,
1812 struct extent_buffer *eb, int slot,
1813 struct btrfs_key *key)
1815 int ret = 0;
1816 u32 item_size = btrfs_item_size_nr(eb, slot);
1817 struct btrfs_dir_item *di;
1818 int name_len;
1819 unsigned long ptr;
1820 unsigned long ptr_end;
1821 struct btrfs_path *fixup_path = NULL;
1823 ptr = btrfs_item_ptr_offset(eb, slot);
1824 ptr_end = ptr + item_size;
1825 while (ptr < ptr_end) {
1826 di = (struct btrfs_dir_item *)ptr;
1827 if (verify_dir_item(root, eb, di))
1828 return -EIO;
1829 name_len = btrfs_dir_name_len(eb, di);
1830 ret = replay_one_name(trans, root, path, eb, di, key);
1831 if (ret < 0)
1832 break;
1833 ptr = (unsigned long)(di + 1);
1834 ptr += name_len;
1837 * If this entry refers to a non-directory (directories can not
1838 * have a link count > 1) and it was added in the transaction
1839 * that was not committed, make sure we fixup the link count of
1840 * the inode it the entry points to. Otherwise something like
1841 * the following would result in a directory pointing to an
1842 * inode with a wrong link that does not account for this dir
1843 * entry:
1845 * mkdir testdir
1846 * touch testdir/foo
1847 * touch testdir/bar
1848 * sync
1850 * ln testdir/bar testdir/bar_link
1851 * ln testdir/foo testdir/foo_link
1852 * xfs_io -c "fsync" testdir/bar
1854 * <power failure>
1856 * mount fs, log replay happens
1858 * File foo would remain with a link count of 1 when it has two
1859 * entries pointing to it in the directory testdir. This would
1860 * make it impossible to ever delete the parent directory has
1861 * it would result in stale dentries that can never be deleted.
1863 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1864 struct btrfs_key di_key;
1866 if (!fixup_path) {
1867 fixup_path = btrfs_alloc_path();
1868 if (!fixup_path) {
1869 ret = -ENOMEM;
1870 break;
1874 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1875 ret = link_to_fixup_dir(trans, root, fixup_path,
1876 di_key.objectid);
1877 if (ret)
1878 break;
1880 ret = 0;
1882 btrfs_free_path(fixup_path);
1883 return ret;
1887 * directory replay has two parts. There are the standard directory
1888 * items in the log copied from the subvolume, and range items
1889 * created in the log while the subvolume was logged.
1891 * The range items tell us which parts of the key space the log
1892 * is authoritative for. During replay, if a key in the subvolume
1893 * directory is in a logged range item, but not actually in the log
1894 * that means it was deleted from the directory before the fsync
1895 * and should be removed.
1897 static noinline int find_dir_range(struct btrfs_root *root,
1898 struct btrfs_path *path,
1899 u64 dirid, int key_type,
1900 u64 *start_ret, u64 *end_ret)
1902 struct btrfs_key key;
1903 u64 found_end;
1904 struct btrfs_dir_log_item *item;
1905 int ret;
1906 int nritems;
1908 if (*start_ret == (u64)-1)
1909 return 1;
1911 key.objectid = dirid;
1912 key.type = key_type;
1913 key.offset = *start_ret;
1915 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1916 if (ret < 0)
1917 goto out;
1918 if (ret > 0) {
1919 if (path->slots[0] == 0)
1920 goto out;
1921 path->slots[0]--;
1923 if (ret != 0)
1924 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1926 if (key.type != key_type || key.objectid != dirid) {
1927 ret = 1;
1928 goto next;
1930 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1931 struct btrfs_dir_log_item);
1932 found_end = btrfs_dir_log_end(path->nodes[0], item);
1934 if (*start_ret >= key.offset && *start_ret <= found_end) {
1935 ret = 0;
1936 *start_ret = key.offset;
1937 *end_ret = found_end;
1938 goto out;
1940 ret = 1;
1941 next:
1942 /* check the next slot in the tree to see if it is a valid item */
1943 nritems = btrfs_header_nritems(path->nodes[0]);
1944 path->slots[0]++;
1945 if (path->slots[0] >= nritems) {
1946 ret = btrfs_next_leaf(root, path);
1947 if (ret)
1948 goto out;
1951 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1953 if (key.type != key_type || key.objectid != dirid) {
1954 ret = 1;
1955 goto out;
1957 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1958 struct btrfs_dir_log_item);
1959 found_end = btrfs_dir_log_end(path->nodes[0], item);
1960 *start_ret = key.offset;
1961 *end_ret = found_end;
1962 ret = 0;
1963 out:
1964 btrfs_release_path(path);
1965 return ret;
1969 * this looks for a given directory item in the log. If the directory
1970 * item is not in the log, the item is removed and the inode it points
1971 * to is unlinked
1973 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1974 struct btrfs_root *root,
1975 struct btrfs_root *log,
1976 struct btrfs_path *path,
1977 struct btrfs_path *log_path,
1978 struct inode *dir,
1979 struct btrfs_key *dir_key)
1981 int ret;
1982 struct extent_buffer *eb;
1983 int slot;
1984 u32 item_size;
1985 struct btrfs_dir_item *di;
1986 struct btrfs_dir_item *log_di;
1987 int name_len;
1988 unsigned long ptr;
1989 unsigned long ptr_end;
1990 char *name;
1991 struct inode *inode;
1992 struct btrfs_key location;
1994 again:
1995 eb = path->nodes[0];
1996 slot = path->slots[0];
1997 item_size = btrfs_item_size_nr(eb, slot);
1998 ptr = btrfs_item_ptr_offset(eb, slot);
1999 ptr_end = ptr + item_size;
2000 while (ptr < ptr_end) {
2001 di = (struct btrfs_dir_item *)ptr;
2002 if (verify_dir_item(root, eb, di)) {
2003 ret = -EIO;
2004 goto out;
2007 name_len = btrfs_dir_name_len(eb, di);
2008 name = kmalloc(name_len, GFP_NOFS);
2009 if (!name) {
2010 ret = -ENOMEM;
2011 goto out;
2013 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2014 name_len);
2015 log_di = NULL;
2016 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2017 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2018 dir_key->objectid,
2019 name, name_len, 0);
2020 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2021 log_di = btrfs_lookup_dir_index_item(trans, log,
2022 log_path,
2023 dir_key->objectid,
2024 dir_key->offset,
2025 name, name_len, 0);
2027 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
2028 btrfs_dir_item_key_to_cpu(eb, di, &location);
2029 btrfs_release_path(path);
2030 btrfs_release_path(log_path);
2031 inode = read_one_inode(root, location.objectid);
2032 if (!inode) {
2033 kfree(name);
2034 return -EIO;
2037 ret = link_to_fixup_dir(trans, root,
2038 path, location.objectid);
2039 if (ret) {
2040 kfree(name);
2041 iput(inode);
2042 goto out;
2045 inc_nlink(inode);
2046 ret = btrfs_unlink_inode(trans, root, dir, inode,
2047 name, name_len);
2048 if (!ret)
2049 ret = btrfs_run_delayed_items(trans, root);
2050 kfree(name);
2051 iput(inode);
2052 if (ret)
2053 goto out;
2055 /* there might still be more names under this key
2056 * check and repeat if required
2058 ret = btrfs_search_slot(NULL, root, dir_key, path,
2059 0, 0);
2060 if (ret == 0)
2061 goto again;
2062 ret = 0;
2063 goto out;
2064 } else if (IS_ERR(log_di)) {
2065 kfree(name);
2066 return PTR_ERR(log_di);
2068 btrfs_release_path(log_path);
2069 kfree(name);
2071 ptr = (unsigned long)(di + 1);
2072 ptr += name_len;
2074 ret = 0;
2075 out:
2076 btrfs_release_path(path);
2077 btrfs_release_path(log_path);
2078 return ret;
2081 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2082 struct btrfs_root *root,
2083 struct btrfs_root *log,
2084 struct btrfs_path *path,
2085 const u64 ino)
2087 struct btrfs_key search_key;
2088 struct btrfs_path *log_path;
2089 int i;
2090 int nritems;
2091 int ret;
2093 log_path = btrfs_alloc_path();
2094 if (!log_path)
2095 return -ENOMEM;
2097 search_key.objectid = ino;
2098 search_key.type = BTRFS_XATTR_ITEM_KEY;
2099 search_key.offset = 0;
2100 again:
2101 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2102 if (ret < 0)
2103 goto out;
2104 process_leaf:
2105 nritems = btrfs_header_nritems(path->nodes[0]);
2106 for (i = path->slots[0]; i < nritems; i++) {
2107 struct btrfs_key key;
2108 struct btrfs_dir_item *di;
2109 struct btrfs_dir_item *log_di;
2110 u32 total_size;
2111 u32 cur;
2113 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2114 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2115 ret = 0;
2116 goto out;
2119 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2120 total_size = btrfs_item_size_nr(path->nodes[0], i);
2121 cur = 0;
2122 while (cur < total_size) {
2123 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2124 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2125 u32 this_len = sizeof(*di) + name_len + data_len;
2126 char *name;
2128 name = kmalloc(name_len, GFP_NOFS);
2129 if (!name) {
2130 ret = -ENOMEM;
2131 goto out;
2133 read_extent_buffer(path->nodes[0], name,
2134 (unsigned long)(di + 1), name_len);
2136 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2137 name, name_len, 0);
2138 btrfs_release_path(log_path);
2139 if (!log_di) {
2140 /* Doesn't exist in log tree, so delete it. */
2141 btrfs_release_path(path);
2142 di = btrfs_lookup_xattr(trans, root, path, ino,
2143 name, name_len, -1);
2144 kfree(name);
2145 if (IS_ERR(di)) {
2146 ret = PTR_ERR(di);
2147 goto out;
2149 ASSERT(di);
2150 ret = btrfs_delete_one_dir_name(trans, root,
2151 path, di);
2152 if (ret)
2153 goto out;
2154 btrfs_release_path(path);
2155 search_key = key;
2156 goto again;
2158 kfree(name);
2159 if (IS_ERR(log_di)) {
2160 ret = PTR_ERR(log_di);
2161 goto out;
2163 cur += this_len;
2164 di = (struct btrfs_dir_item *)((char *)di + this_len);
2167 ret = btrfs_next_leaf(root, path);
2168 if (ret > 0)
2169 ret = 0;
2170 else if (ret == 0)
2171 goto process_leaf;
2172 out:
2173 btrfs_free_path(log_path);
2174 btrfs_release_path(path);
2175 return ret;
2180 * deletion replay happens before we copy any new directory items
2181 * out of the log or out of backreferences from inodes. It
2182 * scans the log to find ranges of keys that log is authoritative for,
2183 * and then scans the directory to find items in those ranges that are
2184 * not present in the log.
2186 * Anything we don't find in the log is unlinked and removed from the
2187 * directory.
2189 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2190 struct btrfs_root *root,
2191 struct btrfs_root *log,
2192 struct btrfs_path *path,
2193 u64 dirid, int del_all)
2195 u64 range_start;
2196 u64 range_end;
2197 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2198 int ret = 0;
2199 struct btrfs_key dir_key;
2200 struct btrfs_key found_key;
2201 struct btrfs_path *log_path;
2202 struct inode *dir;
2204 dir_key.objectid = dirid;
2205 dir_key.type = BTRFS_DIR_ITEM_KEY;
2206 log_path = btrfs_alloc_path();
2207 if (!log_path)
2208 return -ENOMEM;
2210 dir = read_one_inode(root, dirid);
2211 /* it isn't an error if the inode isn't there, that can happen
2212 * because we replay the deletes before we copy in the inode item
2213 * from the log
2215 if (!dir) {
2216 btrfs_free_path(log_path);
2217 return 0;
2219 again:
2220 range_start = 0;
2221 range_end = 0;
2222 while (1) {
2223 if (del_all)
2224 range_end = (u64)-1;
2225 else {
2226 ret = find_dir_range(log, path, dirid, key_type,
2227 &range_start, &range_end);
2228 if (ret != 0)
2229 break;
2232 dir_key.offset = range_start;
2233 while (1) {
2234 int nritems;
2235 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2236 0, 0);
2237 if (ret < 0)
2238 goto out;
2240 nritems = btrfs_header_nritems(path->nodes[0]);
2241 if (path->slots[0] >= nritems) {
2242 ret = btrfs_next_leaf(root, path);
2243 if (ret)
2244 break;
2246 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2247 path->slots[0]);
2248 if (found_key.objectid != dirid ||
2249 found_key.type != dir_key.type)
2250 goto next_type;
2252 if (found_key.offset > range_end)
2253 break;
2255 ret = check_item_in_log(trans, root, log, path,
2256 log_path, dir,
2257 &found_key);
2258 if (ret)
2259 goto out;
2260 if (found_key.offset == (u64)-1)
2261 break;
2262 dir_key.offset = found_key.offset + 1;
2264 btrfs_release_path(path);
2265 if (range_end == (u64)-1)
2266 break;
2267 range_start = range_end + 1;
2270 next_type:
2271 ret = 0;
2272 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2273 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2274 dir_key.type = BTRFS_DIR_INDEX_KEY;
2275 btrfs_release_path(path);
2276 goto again;
2278 out:
2279 btrfs_release_path(path);
2280 btrfs_free_path(log_path);
2281 iput(dir);
2282 return ret;
2286 * the process_func used to replay items from the log tree. This
2287 * gets called in two different stages. The first stage just looks
2288 * for inodes and makes sure they are all copied into the subvolume.
2290 * The second stage copies all the other item types from the log into
2291 * the subvolume. The two stage approach is slower, but gets rid of
2292 * lots of complexity around inodes referencing other inodes that exist
2293 * only in the log (references come from either directory items or inode
2294 * back refs).
2296 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2297 struct walk_control *wc, u64 gen)
2299 int nritems;
2300 struct btrfs_path *path;
2301 struct btrfs_root *root = wc->replay_dest;
2302 struct btrfs_key key;
2303 int level;
2304 int i;
2305 int ret;
2307 ret = btrfs_read_buffer(eb, gen);
2308 if (ret)
2309 return ret;
2311 level = btrfs_header_level(eb);
2313 if (level != 0)
2314 return 0;
2316 path = btrfs_alloc_path();
2317 if (!path)
2318 return -ENOMEM;
2320 nritems = btrfs_header_nritems(eb);
2321 for (i = 0; i < nritems; i++) {
2322 btrfs_item_key_to_cpu(eb, &key, i);
2324 /* inode keys are done during the first stage */
2325 if (key.type == BTRFS_INODE_ITEM_KEY &&
2326 wc->stage == LOG_WALK_REPLAY_INODES) {
2327 struct btrfs_inode_item *inode_item;
2328 u32 mode;
2330 inode_item = btrfs_item_ptr(eb, i,
2331 struct btrfs_inode_item);
2332 ret = replay_xattr_deletes(wc->trans, root, log,
2333 path, key.objectid);
2334 if (ret)
2335 break;
2336 mode = btrfs_inode_mode(eb, inode_item);
2337 if (S_ISDIR(mode)) {
2338 ret = replay_dir_deletes(wc->trans,
2339 root, log, path, key.objectid, 0);
2340 if (ret)
2341 break;
2343 ret = overwrite_item(wc->trans, root, path,
2344 eb, i, &key);
2345 if (ret)
2346 break;
2348 /* for regular files, make sure corresponding
2349 * orphan item exist. extents past the new EOF
2350 * will be truncated later by orphan cleanup.
2352 if (S_ISREG(mode)) {
2353 ret = insert_orphan_item(wc->trans, root,
2354 key.objectid);
2355 if (ret)
2356 break;
2359 ret = link_to_fixup_dir(wc->trans, root,
2360 path, key.objectid);
2361 if (ret)
2362 break;
2365 if (key.type == BTRFS_DIR_INDEX_KEY &&
2366 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2367 ret = replay_one_dir_item(wc->trans, root, path,
2368 eb, i, &key);
2369 if (ret)
2370 break;
2373 if (wc->stage < LOG_WALK_REPLAY_ALL)
2374 continue;
2376 /* these keys are simply copied */
2377 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2378 ret = overwrite_item(wc->trans, root, path,
2379 eb, i, &key);
2380 if (ret)
2381 break;
2382 } else if (key.type == BTRFS_INODE_REF_KEY ||
2383 key.type == BTRFS_INODE_EXTREF_KEY) {
2384 ret = add_inode_ref(wc->trans, root, log, path,
2385 eb, i, &key);
2386 if (ret && ret != -ENOENT)
2387 break;
2388 ret = 0;
2389 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2390 ret = replay_one_extent(wc->trans, root, path,
2391 eb, i, &key);
2392 if (ret)
2393 break;
2394 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2395 ret = replay_one_dir_item(wc->trans, root, path,
2396 eb, i, &key);
2397 if (ret)
2398 break;
2401 btrfs_free_path(path);
2402 return ret;
2405 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2406 struct btrfs_root *root,
2407 struct btrfs_path *path, int *level,
2408 struct walk_control *wc)
2410 u64 root_owner;
2411 u64 bytenr;
2412 u64 ptr_gen;
2413 struct extent_buffer *next;
2414 struct extent_buffer *cur;
2415 struct extent_buffer *parent;
2416 u32 blocksize;
2417 int ret = 0;
2419 WARN_ON(*level < 0);
2420 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2422 while (*level > 0) {
2423 WARN_ON(*level < 0);
2424 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2425 cur = path->nodes[*level];
2427 WARN_ON(btrfs_header_level(cur) != *level);
2429 if (path->slots[*level] >=
2430 btrfs_header_nritems(cur))
2431 break;
2433 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2434 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2435 blocksize = root->nodesize;
2437 parent = path->nodes[*level];
2438 root_owner = btrfs_header_owner(parent);
2440 next = btrfs_find_create_tree_block(root, bytenr);
2441 if (IS_ERR(next))
2442 return PTR_ERR(next);
2444 if (*level == 1) {
2445 ret = wc->process_func(root, next, wc, ptr_gen);
2446 if (ret) {
2447 free_extent_buffer(next);
2448 return ret;
2451 path->slots[*level]++;
2452 if (wc->free) {
2453 ret = btrfs_read_buffer(next, ptr_gen);
2454 if (ret) {
2455 free_extent_buffer(next);
2456 return ret;
2459 if (trans) {
2460 btrfs_tree_lock(next);
2461 btrfs_set_lock_blocking(next);
2462 clean_tree_block(trans, root->fs_info,
2463 next);
2464 btrfs_wait_tree_block_writeback(next);
2465 btrfs_tree_unlock(next);
2468 WARN_ON(root_owner !=
2469 BTRFS_TREE_LOG_OBJECTID);
2470 ret = btrfs_free_and_pin_reserved_extent(root,
2471 bytenr, blocksize);
2472 if (ret) {
2473 free_extent_buffer(next);
2474 return ret;
2477 free_extent_buffer(next);
2478 continue;
2480 ret = btrfs_read_buffer(next, ptr_gen);
2481 if (ret) {
2482 free_extent_buffer(next);
2483 return ret;
2486 WARN_ON(*level <= 0);
2487 if (path->nodes[*level-1])
2488 free_extent_buffer(path->nodes[*level-1]);
2489 path->nodes[*level-1] = next;
2490 *level = btrfs_header_level(next);
2491 path->slots[*level] = 0;
2492 cond_resched();
2494 WARN_ON(*level < 0);
2495 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2497 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2499 cond_resched();
2500 return 0;
2503 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2504 struct btrfs_root *root,
2505 struct btrfs_path *path, int *level,
2506 struct walk_control *wc)
2508 u64 root_owner;
2509 int i;
2510 int slot;
2511 int ret;
2513 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2514 slot = path->slots[i];
2515 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2516 path->slots[i]++;
2517 *level = i;
2518 WARN_ON(*level == 0);
2519 return 0;
2520 } else {
2521 struct extent_buffer *parent;
2522 if (path->nodes[*level] == root->node)
2523 parent = path->nodes[*level];
2524 else
2525 parent = path->nodes[*level + 1];
2527 root_owner = btrfs_header_owner(parent);
2528 ret = wc->process_func(root, path->nodes[*level], wc,
2529 btrfs_header_generation(path->nodes[*level]));
2530 if (ret)
2531 return ret;
2533 if (wc->free) {
2534 struct extent_buffer *next;
2536 next = path->nodes[*level];
2538 if (trans) {
2539 btrfs_tree_lock(next);
2540 btrfs_set_lock_blocking(next);
2541 clean_tree_block(trans, root->fs_info,
2542 next);
2543 btrfs_wait_tree_block_writeback(next);
2544 btrfs_tree_unlock(next);
2547 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2548 ret = btrfs_free_and_pin_reserved_extent(root,
2549 path->nodes[*level]->start,
2550 path->nodes[*level]->len);
2551 if (ret)
2552 return ret;
2554 free_extent_buffer(path->nodes[*level]);
2555 path->nodes[*level] = NULL;
2556 *level = i + 1;
2559 return 1;
2563 * drop the reference count on the tree rooted at 'snap'. This traverses
2564 * the tree freeing any blocks that have a ref count of zero after being
2565 * decremented.
2567 static int walk_log_tree(struct btrfs_trans_handle *trans,
2568 struct btrfs_root *log, struct walk_control *wc)
2570 int ret = 0;
2571 int wret;
2572 int level;
2573 struct btrfs_path *path;
2574 int orig_level;
2576 path = btrfs_alloc_path();
2577 if (!path)
2578 return -ENOMEM;
2580 level = btrfs_header_level(log->node);
2581 orig_level = level;
2582 path->nodes[level] = log->node;
2583 extent_buffer_get(log->node);
2584 path->slots[level] = 0;
2586 while (1) {
2587 wret = walk_down_log_tree(trans, log, path, &level, wc);
2588 if (wret > 0)
2589 break;
2590 if (wret < 0) {
2591 ret = wret;
2592 goto out;
2595 wret = walk_up_log_tree(trans, log, path, &level, wc);
2596 if (wret > 0)
2597 break;
2598 if (wret < 0) {
2599 ret = wret;
2600 goto out;
2604 /* was the root node processed? if not, catch it here */
2605 if (path->nodes[orig_level]) {
2606 ret = wc->process_func(log, path->nodes[orig_level], wc,
2607 btrfs_header_generation(path->nodes[orig_level]));
2608 if (ret)
2609 goto out;
2610 if (wc->free) {
2611 struct extent_buffer *next;
2613 next = path->nodes[orig_level];
2615 if (trans) {
2616 btrfs_tree_lock(next);
2617 btrfs_set_lock_blocking(next);
2618 clean_tree_block(trans, log->fs_info, next);
2619 btrfs_wait_tree_block_writeback(next);
2620 btrfs_tree_unlock(next);
2623 WARN_ON(log->root_key.objectid !=
2624 BTRFS_TREE_LOG_OBJECTID);
2625 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2626 next->len);
2627 if (ret)
2628 goto out;
2632 out:
2633 btrfs_free_path(path);
2634 return ret;
2638 * helper function to update the item for a given subvolumes log root
2639 * in the tree of log roots
2641 static int update_log_root(struct btrfs_trans_handle *trans,
2642 struct btrfs_root *log)
2644 int ret;
2646 if (log->log_transid == 1) {
2647 /* insert root item on the first sync */
2648 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2649 &log->root_key, &log->root_item);
2650 } else {
2651 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2652 &log->root_key, &log->root_item);
2654 return ret;
2657 static void wait_log_commit(struct btrfs_root *root, int transid)
2659 DEFINE_WAIT(wait);
2660 int index = transid % 2;
2663 * we only allow two pending log transactions at a time,
2664 * so we know that if ours is more than 2 older than the
2665 * current transaction, we're done
2667 do {
2668 prepare_to_wait(&root->log_commit_wait[index],
2669 &wait, TASK_UNINTERRUPTIBLE);
2670 mutex_unlock(&root->log_mutex);
2672 if (root->log_transid_committed < transid &&
2673 atomic_read(&root->log_commit[index]))
2674 schedule();
2676 finish_wait(&root->log_commit_wait[index], &wait);
2677 mutex_lock(&root->log_mutex);
2678 } while (root->log_transid_committed < transid &&
2679 atomic_read(&root->log_commit[index]));
2682 static void wait_for_writer(struct btrfs_root *root)
2684 DEFINE_WAIT(wait);
2686 while (atomic_read(&root->log_writers)) {
2687 prepare_to_wait(&root->log_writer_wait,
2688 &wait, TASK_UNINTERRUPTIBLE);
2689 mutex_unlock(&root->log_mutex);
2690 if (atomic_read(&root->log_writers))
2691 schedule();
2692 finish_wait(&root->log_writer_wait, &wait);
2693 mutex_lock(&root->log_mutex);
2697 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2698 struct btrfs_log_ctx *ctx)
2700 if (!ctx)
2701 return;
2703 mutex_lock(&root->log_mutex);
2704 list_del_init(&ctx->list);
2705 mutex_unlock(&root->log_mutex);
2709 * Invoked in log mutex context, or be sure there is no other task which
2710 * can access the list.
2712 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2713 int index, int error)
2715 struct btrfs_log_ctx *ctx;
2716 struct btrfs_log_ctx *safe;
2718 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2719 list_del_init(&ctx->list);
2720 ctx->log_ret = error;
2723 INIT_LIST_HEAD(&root->log_ctxs[index]);
2727 * btrfs_sync_log does sends a given tree log down to the disk and
2728 * updates the super blocks to record it. When this call is done,
2729 * you know that any inodes previously logged are safely on disk only
2730 * if it returns 0.
2732 * Any other return value means you need to call btrfs_commit_transaction.
2733 * Some of the edge cases for fsyncing directories that have had unlinks
2734 * or renames done in the past mean that sometimes the only safe
2735 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2736 * that has happened.
2738 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2739 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2741 int index1;
2742 int index2;
2743 int mark;
2744 int ret;
2745 struct btrfs_root *log = root->log_root;
2746 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2747 int log_transid = 0;
2748 struct btrfs_log_ctx root_log_ctx;
2749 struct blk_plug plug;
2751 mutex_lock(&root->log_mutex);
2752 log_transid = ctx->log_transid;
2753 if (root->log_transid_committed >= log_transid) {
2754 mutex_unlock(&root->log_mutex);
2755 return ctx->log_ret;
2758 index1 = log_transid % 2;
2759 if (atomic_read(&root->log_commit[index1])) {
2760 wait_log_commit(root, log_transid);
2761 mutex_unlock(&root->log_mutex);
2762 return ctx->log_ret;
2764 ASSERT(log_transid == root->log_transid);
2765 atomic_set(&root->log_commit[index1], 1);
2767 /* wait for previous tree log sync to complete */
2768 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2769 wait_log_commit(root, log_transid - 1);
2771 while (1) {
2772 int batch = atomic_read(&root->log_batch);
2773 /* when we're on an ssd, just kick the log commit out */
2774 if (!btrfs_test_opt(root->fs_info, SSD) &&
2775 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2776 mutex_unlock(&root->log_mutex);
2777 schedule_timeout_uninterruptible(1);
2778 mutex_lock(&root->log_mutex);
2780 wait_for_writer(root);
2781 if (batch == atomic_read(&root->log_batch))
2782 break;
2785 /* bail out if we need to do a full commit */
2786 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2787 ret = -EAGAIN;
2788 btrfs_free_logged_extents(log, log_transid);
2789 mutex_unlock(&root->log_mutex);
2790 goto out;
2793 if (log_transid % 2 == 0)
2794 mark = EXTENT_DIRTY;
2795 else
2796 mark = EXTENT_NEW;
2798 /* we start IO on all the marked extents here, but we don't actually
2799 * wait for them until later.
2801 blk_start_plug(&plug);
2802 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2803 if (ret) {
2804 blk_finish_plug(&plug);
2805 btrfs_abort_transaction(trans, ret);
2806 btrfs_free_logged_extents(log, log_transid);
2807 btrfs_set_log_full_commit(root->fs_info, trans);
2808 mutex_unlock(&root->log_mutex);
2809 goto out;
2812 btrfs_set_root_node(&log->root_item, log->node);
2814 root->log_transid++;
2815 log->log_transid = root->log_transid;
2816 root->log_start_pid = 0;
2818 * IO has been started, blocks of the log tree have WRITTEN flag set
2819 * in their headers. new modifications of the log will be written to
2820 * new positions. so it's safe to allow log writers to go in.
2822 mutex_unlock(&root->log_mutex);
2824 btrfs_init_log_ctx(&root_log_ctx, NULL);
2826 mutex_lock(&log_root_tree->log_mutex);
2827 atomic_inc(&log_root_tree->log_batch);
2828 atomic_inc(&log_root_tree->log_writers);
2830 index2 = log_root_tree->log_transid % 2;
2831 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2832 root_log_ctx.log_transid = log_root_tree->log_transid;
2834 mutex_unlock(&log_root_tree->log_mutex);
2836 ret = update_log_root(trans, log);
2838 mutex_lock(&log_root_tree->log_mutex);
2839 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2841 * Implicit memory barrier after atomic_dec_and_test
2843 if (waitqueue_active(&log_root_tree->log_writer_wait))
2844 wake_up(&log_root_tree->log_writer_wait);
2847 if (ret) {
2848 if (!list_empty(&root_log_ctx.list))
2849 list_del_init(&root_log_ctx.list);
2851 blk_finish_plug(&plug);
2852 btrfs_set_log_full_commit(root->fs_info, trans);
2854 if (ret != -ENOSPC) {
2855 btrfs_abort_transaction(trans, ret);
2856 mutex_unlock(&log_root_tree->log_mutex);
2857 goto out;
2859 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2860 btrfs_free_logged_extents(log, log_transid);
2861 mutex_unlock(&log_root_tree->log_mutex);
2862 ret = -EAGAIN;
2863 goto out;
2866 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2867 blk_finish_plug(&plug);
2868 list_del_init(&root_log_ctx.list);
2869 mutex_unlock(&log_root_tree->log_mutex);
2870 ret = root_log_ctx.log_ret;
2871 goto out;
2874 index2 = root_log_ctx.log_transid % 2;
2875 if (atomic_read(&log_root_tree->log_commit[index2])) {
2876 blk_finish_plug(&plug);
2877 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
2878 mark);
2879 btrfs_wait_logged_extents(trans, log, log_transid);
2880 wait_log_commit(log_root_tree,
2881 root_log_ctx.log_transid);
2882 mutex_unlock(&log_root_tree->log_mutex);
2883 if (!ret)
2884 ret = root_log_ctx.log_ret;
2885 goto out;
2887 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2888 atomic_set(&log_root_tree->log_commit[index2], 1);
2890 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2891 wait_log_commit(log_root_tree,
2892 root_log_ctx.log_transid - 1);
2895 wait_for_writer(log_root_tree);
2898 * now that we've moved on to the tree of log tree roots,
2899 * check the full commit flag again
2901 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2902 blk_finish_plug(&plug);
2903 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2904 btrfs_free_logged_extents(log, log_transid);
2905 mutex_unlock(&log_root_tree->log_mutex);
2906 ret = -EAGAIN;
2907 goto out_wake_log_root;
2910 ret = btrfs_write_marked_extents(log_root_tree,
2911 &log_root_tree->dirty_log_pages,
2912 EXTENT_DIRTY | EXTENT_NEW);
2913 blk_finish_plug(&plug);
2914 if (ret) {
2915 btrfs_set_log_full_commit(root->fs_info, trans);
2916 btrfs_abort_transaction(trans, ret);
2917 btrfs_free_logged_extents(log, log_transid);
2918 mutex_unlock(&log_root_tree->log_mutex);
2919 goto out_wake_log_root;
2921 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2922 if (!ret)
2923 ret = btrfs_wait_marked_extents(log_root_tree,
2924 &log_root_tree->dirty_log_pages,
2925 EXTENT_NEW | EXTENT_DIRTY);
2926 if (ret) {
2927 btrfs_set_log_full_commit(root->fs_info, trans);
2928 btrfs_free_logged_extents(log, log_transid);
2929 mutex_unlock(&log_root_tree->log_mutex);
2930 goto out_wake_log_root;
2932 btrfs_wait_logged_extents(trans, log, log_transid);
2934 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2935 log_root_tree->node->start);
2936 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2937 btrfs_header_level(log_root_tree->node));
2939 log_root_tree->log_transid++;
2940 mutex_unlock(&log_root_tree->log_mutex);
2943 * nobody else is going to jump in and write the the ctree
2944 * super here because the log_commit atomic below is protecting
2945 * us. We must be called with a transaction handle pinning
2946 * the running transaction open, so a full commit can't hop
2947 * in and cause problems either.
2949 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2950 if (ret) {
2951 btrfs_set_log_full_commit(root->fs_info, trans);
2952 btrfs_abort_transaction(trans, ret);
2953 goto out_wake_log_root;
2956 mutex_lock(&root->log_mutex);
2957 if (root->last_log_commit < log_transid)
2958 root->last_log_commit = log_transid;
2959 mutex_unlock(&root->log_mutex);
2961 out_wake_log_root:
2962 mutex_lock(&log_root_tree->log_mutex);
2963 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2965 log_root_tree->log_transid_committed++;
2966 atomic_set(&log_root_tree->log_commit[index2], 0);
2967 mutex_unlock(&log_root_tree->log_mutex);
2970 * The barrier before waitqueue_active is implied by mutex_unlock
2972 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2973 wake_up(&log_root_tree->log_commit_wait[index2]);
2974 out:
2975 mutex_lock(&root->log_mutex);
2976 btrfs_remove_all_log_ctxs(root, index1, ret);
2977 root->log_transid_committed++;
2978 atomic_set(&root->log_commit[index1], 0);
2979 mutex_unlock(&root->log_mutex);
2982 * The barrier before waitqueue_active is implied by mutex_unlock
2984 if (waitqueue_active(&root->log_commit_wait[index1]))
2985 wake_up(&root->log_commit_wait[index1]);
2986 return ret;
2989 static void free_log_tree(struct btrfs_trans_handle *trans,
2990 struct btrfs_root *log)
2992 int ret;
2993 u64 start;
2994 u64 end;
2995 struct walk_control wc = {
2996 .free = 1,
2997 .process_func = process_one_buffer
3000 ret = walk_log_tree(trans, log, &wc);
3001 /* I don't think this can happen but just in case */
3002 if (ret)
3003 btrfs_abort_transaction(trans, ret);
3005 while (1) {
3006 ret = find_first_extent_bit(&log->dirty_log_pages,
3007 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
3008 NULL);
3009 if (ret)
3010 break;
3012 clear_extent_bits(&log->dirty_log_pages, start, end,
3013 EXTENT_DIRTY | EXTENT_NEW);
3017 * We may have short-circuited the log tree with the full commit logic
3018 * and left ordered extents on our list, so clear these out to keep us
3019 * from leaking inodes and memory.
3021 btrfs_free_logged_extents(log, 0);
3022 btrfs_free_logged_extents(log, 1);
3024 free_extent_buffer(log->node);
3025 kfree(log);
3029 * free all the extents used by the tree log. This should be called
3030 * at commit time of the full transaction
3032 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3034 if (root->log_root) {
3035 free_log_tree(trans, root->log_root);
3036 root->log_root = NULL;
3038 return 0;
3041 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3042 struct btrfs_fs_info *fs_info)
3044 if (fs_info->log_root_tree) {
3045 free_log_tree(trans, fs_info->log_root_tree);
3046 fs_info->log_root_tree = NULL;
3048 return 0;
3052 * If both a file and directory are logged, and unlinks or renames are
3053 * mixed in, we have a few interesting corners:
3055 * create file X in dir Y
3056 * link file X to X.link in dir Y
3057 * fsync file X
3058 * unlink file X but leave X.link
3059 * fsync dir Y
3061 * After a crash we would expect only X.link to exist. But file X
3062 * didn't get fsync'd again so the log has back refs for X and X.link.
3064 * We solve this by removing directory entries and inode backrefs from the
3065 * log when a file that was logged in the current transaction is
3066 * unlinked. Any later fsync will include the updated log entries, and
3067 * we'll be able to reconstruct the proper directory items from backrefs.
3069 * This optimizations allows us to avoid relogging the entire inode
3070 * or the entire directory.
3072 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3073 struct btrfs_root *root,
3074 const char *name, int name_len,
3075 struct inode *dir, u64 index)
3077 struct btrfs_root *log;
3078 struct btrfs_dir_item *di;
3079 struct btrfs_path *path;
3080 int ret;
3081 int err = 0;
3082 int bytes_del = 0;
3083 u64 dir_ino = btrfs_ino(dir);
3085 if (BTRFS_I(dir)->logged_trans < trans->transid)
3086 return 0;
3088 ret = join_running_log_trans(root);
3089 if (ret)
3090 return 0;
3092 mutex_lock(&BTRFS_I(dir)->log_mutex);
3094 log = root->log_root;
3095 path = btrfs_alloc_path();
3096 if (!path) {
3097 err = -ENOMEM;
3098 goto out_unlock;
3101 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3102 name, name_len, -1);
3103 if (IS_ERR(di)) {
3104 err = PTR_ERR(di);
3105 goto fail;
3107 if (di) {
3108 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3109 bytes_del += name_len;
3110 if (ret) {
3111 err = ret;
3112 goto fail;
3115 btrfs_release_path(path);
3116 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3117 index, name, name_len, -1);
3118 if (IS_ERR(di)) {
3119 err = PTR_ERR(di);
3120 goto fail;
3122 if (di) {
3123 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3124 bytes_del += name_len;
3125 if (ret) {
3126 err = ret;
3127 goto fail;
3131 /* update the directory size in the log to reflect the names
3132 * we have removed
3134 if (bytes_del) {
3135 struct btrfs_key key;
3137 key.objectid = dir_ino;
3138 key.offset = 0;
3139 key.type = BTRFS_INODE_ITEM_KEY;
3140 btrfs_release_path(path);
3142 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3143 if (ret < 0) {
3144 err = ret;
3145 goto fail;
3147 if (ret == 0) {
3148 struct btrfs_inode_item *item;
3149 u64 i_size;
3151 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3152 struct btrfs_inode_item);
3153 i_size = btrfs_inode_size(path->nodes[0], item);
3154 if (i_size > bytes_del)
3155 i_size -= bytes_del;
3156 else
3157 i_size = 0;
3158 btrfs_set_inode_size(path->nodes[0], item, i_size);
3159 btrfs_mark_buffer_dirty(path->nodes[0]);
3160 } else
3161 ret = 0;
3162 btrfs_release_path(path);
3164 fail:
3165 btrfs_free_path(path);
3166 out_unlock:
3167 mutex_unlock(&BTRFS_I(dir)->log_mutex);
3168 if (ret == -ENOSPC) {
3169 btrfs_set_log_full_commit(root->fs_info, trans);
3170 ret = 0;
3171 } else if (ret < 0)
3172 btrfs_abort_transaction(trans, ret);
3174 btrfs_end_log_trans(root);
3176 return err;
3179 /* see comments for btrfs_del_dir_entries_in_log */
3180 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3181 struct btrfs_root *root,
3182 const char *name, int name_len,
3183 struct inode *inode, u64 dirid)
3185 struct btrfs_root *log;
3186 u64 index;
3187 int ret;
3189 if (BTRFS_I(inode)->logged_trans < trans->transid)
3190 return 0;
3192 ret = join_running_log_trans(root);
3193 if (ret)
3194 return 0;
3195 log = root->log_root;
3196 mutex_lock(&BTRFS_I(inode)->log_mutex);
3198 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3199 dirid, &index);
3200 mutex_unlock(&BTRFS_I(inode)->log_mutex);
3201 if (ret == -ENOSPC) {
3202 btrfs_set_log_full_commit(root->fs_info, trans);
3203 ret = 0;
3204 } else if (ret < 0 && ret != -ENOENT)
3205 btrfs_abort_transaction(trans, ret);
3206 btrfs_end_log_trans(root);
3208 return ret;
3212 * creates a range item in the log for 'dirid'. first_offset and
3213 * last_offset tell us which parts of the key space the log should
3214 * be considered authoritative for.
3216 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3217 struct btrfs_root *log,
3218 struct btrfs_path *path,
3219 int key_type, u64 dirid,
3220 u64 first_offset, u64 last_offset)
3222 int ret;
3223 struct btrfs_key key;
3224 struct btrfs_dir_log_item *item;
3226 key.objectid = dirid;
3227 key.offset = first_offset;
3228 if (key_type == BTRFS_DIR_ITEM_KEY)
3229 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3230 else
3231 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3232 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3233 if (ret)
3234 return ret;
3236 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3237 struct btrfs_dir_log_item);
3238 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3239 btrfs_mark_buffer_dirty(path->nodes[0]);
3240 btrfs_release_path(path);
3241 return 0;
3245 * log all the items included in the current transaction for a given
3246 * directory. This also creates the range items in the log tree required
3247 * to replay anything deleted before the fsync
3249 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3250 struct btrfs_root *root, struct inode *inode,
3251 struct btrfs_path *path,
3252 struct btrfs_path *dst_path, int key_type,
3253 struct btrfs_log_ctx *ctx,
3254 u64 min_offset, u64 *last_offset_ret)
3256 struct btrfs_key min_key;
3257 struct btrfs_root *log = root->log_root;
3258 struct extent_buffer *src;
3259 int err = 0;
3260 int ret;
3261 int i;
3262 int nritems;
3263 u64 first_offset = min_offset;
3264 u64 last_offset = (u64)-1;
3265 u64 ino = btrfs_ino(inode);
3267 log = root->log_root;
3269 min_key.objectid = ino;
3270 min_key.type = key_type;
3271 min_key.offset = min_offset;
3273 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3276 * we didn't find anything from this transaction, see if there
3277 * is anything at all
3279 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3280 min_key.objectid = ino;
3281 min_key.type = key_type;
3282 min_key.offset = (u64)-1;
3283 btrfs_release_path(path);
3284 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3285 if (ret < 0) {
3286 btrfs_release_path(path);
3287 return ret;
3289 ret = btrfs_previous_item(root, path, ino, key_type);
3291 /* if ret == 0 there are items for this type,
3292 * create a range to tell us the last key of this type.
3293 * otherwise, there are no items in this directory after
3294 * *min_offset, and we create a range to indicate that.
3296 if (ret == 0) {
3297 struct btrfs_key tmp;
3298 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3299 path->slots[0]);
3300 if (key_type == tmp.type)
3301 first_offset = max(min_offset, tmp.offset) + 1;
3303 goto done;
3306 /* go backward to find any previous key */
3307 ret = btrfs_previous_item(root, path, ino, key_type);
3308 if (ret == 0) {
3309 struct btrfs_key tmp;
3310 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3311 if (key_type == tmp.type) {
3312 first_offset = tmp.offset;
3313 ret = overwrite_item(trans, log, dst_path,
3314 path->nodes[0], path->slots[0],
3315 &tmp);
3316 if (ret) {
3317 err = ret;
3318 goto done;
3322 btrfs_release_path(path);
3324 /* find the first key from this transaction again */
3325 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3326 if (WARN_ON(ret != 0))
3327 goto done;
3330 * we have a block from this transaction, log every item in it
3331 * from our directory
3333 while (1) {
3334 struct btrfs_key tmp;
3335 src = path->nodes[0];
3336 nritems = btrfs_header_nritems(src);
3337 for (i = path->slots[0]; i < nritems; i++) {
3338 struct btrfs_dir_item *di;
3340 btrfs_item_key_to_cpu(src, &min_key, i);
3342 if (min_key.objectid != ino || min_key.type != key_type)
3343 goto done;
3344 ret = overwrite_item(trans, log, dst_path, src, i,
3345 &min_key);
3346 if (ret) {
3347 err = ret;
3348 goto done;
3352 * We must make sure that when we log a directory entry,
3353 * the corresponding inode, after log replay, has a
3354 * matching link count. For example:
3356 * touch foo
3357 * mkdir mydir
3358 * sync
3359 * ln foo mydir/bar
3360 * xfs_io -c "fsync" mydir
3361 * <crash>
3362 * <mount fs and log replay>
3364 * Would result in a fsync log that when replayed, our
3365 * file inode would have a link count of 1, but we get
3366 * two directory entries pointing to the same inode.
3367 * After removing one of the names, it would not be
3368 * possible to remove the other name, which resulted
3369 * always in stale file handle errors, and would not
3370 * be possible to rmdir the parent directory, since
3371 * its i_size could never decrement to the value
3372 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3374 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3375 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3376 if (ctx &&
3377 (btrfs_dir_transid(src, di) == trans->transid ||
3378 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3379 tmp.type != BTRFS_ROOT_ITEM_KEY)
3380 ctx->log_new_dentries = true;
3382 path->slots[0] = nritems;
3385 * look ahead to the next item and see if it is also
3386 * from this directory and from this transaction
3388 ret = btrfs_next_leaf(root, path);
3389 if (ret == 1) {
3390 last_offset = (u64)-1;
3391 goto done;
3393 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3394 if (tmp.objectid != ino || tmp.type != key_type) {
3395 last_offset = (u64)-1;
3396 goto done;
3398 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3399 ret = overwrite_item(trans, log, dst_path,
3400 path->nodes[0], path->slots[0],
3401 &tmp);
3402 if (ret)
3403 err = ret;
3404 else
3405 last_offset = tmp.offset;
3406 goto done;
3409 done:
3410 btrfs_release_path(path);
3411 btrfs_release_path(dst_path);
3413 if (err == 0) {
3414 *last_offset_ret = last_offset;
3416 * insert the log range keys to indicate where the log
3417 * is valid
3419 ret = insert_dir_log_key(trans, log, path, key_type,
3420 ino, first_offset, last_offset);
3421 if (ret)
3422 err = ret;
3424 return err;
3428 * logging directories is very similar to logging inodes, We find all the items
3429 * from the current transaction and write them to the log.
3431 * The recovery code scans the directory in the subvolume, and if it finds a
3432 * key in the range logged that is not present in the log tree, then it means
3433 * that dir entry was unlinked during the transaction.
3435 * In order for that scan to work, we must include one key smaller than
3436 * the smallest logged by this transaction and one key larger than the largest
3437 * key logged by this transaction.
3439 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3440 struct btrfs_root *root, struct inode *inode,
3441 struct btrfs_path *path,
3442 struct btrfs_path *dst_path,
3443 struct btrfs_log_ctx *ctx)
3445 u64 min_key;
3446 u64 max_key;
3447 int ret;
3448 int key_type = BTRFS_DIR_ITEM_KEY;
3450 again:
3451 min_key = 0;
3452 max_key = 0;
3453 while (1) {
3454 ret = log_dir_items(trans, root, inode, path,
3455 dst_path, key_type, ctx, min_key,
3456 &max_key);
3457 if (ret)
3458 return ret;
3459 if (max_key == (u64)-1)
3460 break;
3461 min_key = max_key + 1;
3464 if (key_type == BTRFS_DIR_ITEM_KEY) {
3465 key_type = BTRFS_DIR_INDEX_KEY;
3466 goto again;
3468 return 0;
3472 * a helper function to drop items from the log before we relog an
3473 * inode. max_key_type indicates the highest item type to remove.
3474 * This cannot be run for file data extents because it does not
3475 * free the extents they point to.
3477 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3478 struct btrfs_root *log,
3479 struct btrfs_path *path,
3480 u64 objectid, int max_key_type)
3482 int ret;
3483 struct btrfs_key key;
3484 struct btrfs_key found_key;
3485 int start_slot;
3487 key.objectid = objectid;
3488 key.type = max_key_type;
3489 key.offset = (u64)-1;
3491 while (1) {
3492 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3493 BUG_ON(ret == 0); /* Logic error */
3494 if (ret < 0)
3495 break;
3497 if (path->slots[0] == 0)
3498 break;
3500 path->slots[0]--;
3501 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3502 path->slots[0]);
3504 if (found_key.objectid != objectid)
3505 break;
3507 found_key.offset = 0;
3508 found_key.type = 0;
3509 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3510 &start_slot);
3512 ret = btrfs_del_items(trans, log, path, start_slot,
3513 path->slots[0] - start_slot + 1);
3515 * If start slot isn't 0 then we don't need to re-search, we've
3516 * found the last guy with the objectid in this tree.
3518 if (ret || start_slot != 0)
3519 break;
3520 btrfs_release_path(path);
3522 btrfs_release_path(path);
3523 if (ret > 0)
3524 ret = 0;
3525 return ret;
3528 static void fill_inode_item(struct btrfs_trans_handle *trans,
3529 struct extent_buffer *leaf,
3530 struct btrfs_inode_item *item,
3531 struct inode *inode, int log_inode_only,
3532 u64 logged_isize)
3534 struct btrfs_map_token token;
3536 btrfs_init_map_token(&token);
3538 if (log_inode_only) {
3539 /* set the generation to zero so the recover code
3540 * can tell the difference between an logging
3541 * just to say 'this inode exists' and a logging
3542 * to say 'update this inode with these values'
3544 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3545 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3546 } else {
3547 btrfs_set_token_inode_generation(leaf, item,
3548 BTRFS_I(inode)->generation,
3549 &token);
3550 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3553 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3554 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3555 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3556 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3558 btrfs_set_token_timespec_sec(leaf, &item->atime,
3559 inode->i_atime.tv_sec, &token);
3560 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3561 inode->i_atime.tv_nsec, &token);
3563 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3564 inode->i_mtime.tv_sec, &token);
3565 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3566 inode->i_mtime.tv_nsec, &token);
3568 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3569 inode->i_ctime.tv_sec, &token);
3570 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3571 inode->i_ctime.tv_nsec, &token);
3573 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3574 &token);
3576 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3577 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3578 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3579 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3580 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3583 static int log_inode_item(struct btrfs_trans_handle *trans,
3584 struct btrfs_root *log, struct btrfs_path *path,
3585 struct inode *inode)
3587 struct btrfs_inode_item *inode_item;
3588 int ret;
3590 ret = btrfs_insert_empty_item(trans, log, path,
3591 &BTRFS_I(inode)->location,
3592 sizeof(*inode_item));
3593 if (ret && ret != -EEXIST)
3594 return ret;
3595 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3596 struct btrfs_inode_item);
3597 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
3598 btrfs_release_path(path);
3599 return 0;
3602 static noinline int copy_items(struct btrfs_trans_handle *trans,
3603 struct inode *inode,
3604 struct btrfs_path *dst_path,
3605 struct btrfs_path *src_path, u64 *last_extent,
3606 int start_slot, int nr, int inode_only,
3607 u64 logged_isize)
3609 unsigned long src_offset;
3610 unsigned long dst_offset;
3611 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3612 struct btrfs_file_extent_item *extent;
3613 struct btrfs_inode_item *inode_item;
3614 struct extent_buffer *src = src_path->nodes[0];
3615 struct btrfs_key first_key, last_key, key;
3616 int ret;
3617 struct btrfs_key *ins_keys;
3618 u32 *ins_sizes;
3619 char *ins_data;
3620 int i;
3621 struct list_head ordered_sums;
3622 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3623 bool has_extents = false;
3624 bool need_find_last_extent = true;
3625 bool done = false;
3627 INIT_LIST_HEAD(&ordered_sums);
3629 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3630 nr * sizeof(u32), GFP_NOFS);
3631 if (!ins_data)
3632 return -ENOMEM;
3634 first_key.objectid = (u64)-1;
3636 ins_sizes = (u32 *)ins_data;
3637 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3639 for (i = 0; i < nr; i++) {
3640 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3641 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3643 ret = btrfs_insert_empty_items(trans, log, dst_path,
3644 ins_keys, ins_sizes, nr);
3645 if (ret) {
3646 kfree(ins_data);
3647 return ret;
3650 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3651 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3652 dst_path->slots[0]);
3654 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3656 if ((i == (nr - 1)))
3657 last_key = ins_keys[i];
3659 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3660 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3661 dst_path->slots[0],
3662 struct btrfs_inode_item);
3663 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3664 inode, inode_only == LOG_INODE_EXISTS,
3665 logged_isize);
3666 } else {
3667 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3668 src_offset, ins_sizes[i]);
3672 * We set need_find_last_extent here in case we know we were
3673 * processing other items and then walk into the first extent in
3674 * the inode. If we don't hit an extent then nothing changes,
3675 * we'll do the last search the next time around.
3677 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3678 has_extents = true;
3679 if (first_key.objectid == (u64)-1)
3680 first_key = ins_keys[i];
3681 } else {
3682 need_find_last_extent = false;
3685 /* take a reference on file data extents so that truncates
3686 * or deletes of this inode don't have to relog the inode
3687 * again
3689 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3690 !skip_csum) {
3691 int found_type;
3692 extent = btrfs_item_ptr(src, start_slot + i,
3693 struct btrfs_file_extent_item);
3695 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3696 continue;
3698 found_type = btrfs_file_extent_type(src, extent);
3699 if (found_type == BTRFS_FILE_EXTENT_REG) {
3700 u64 ds, dl, cs, cl;
3701 ds = btrfs_file_extent_disk_bytenr(src,
3702 extent);
3703 /* ds == 0 is a hole */
3704 if (ds == 0)
3705 continue;
3707 dl = btrfs_file_extent_disk_num_bytes(src,
3708 extent);
3709 cs = btrfs_file_extent_offset(src, extent);
3710 cl = btrfs_file_extent_num_bytes(src,
3711 extent);
3712 if (btrfs_file_extent_compression(src,
3713 extent)) {
3714 cs = 0;
3715 cl = dl;
3718 ret = btrfs_lookup_csums_range(
3719 log->fs_info->csum_root,
3720 ds + cs, ds + cs + cl - 1,
3721 &ordered_sums, 0);
3722 if (ret) {
3723 btrfs_release_path(dst_path);
3724 kfree(ins_data);
3725 return ret;
3731 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3732 btrfs_release_path(dst_path);
3733 kfree(ins_data);
3736 * we have to do this after the loop above to avoid changing the
3737 * log tree while trying to change the log tree.
3739 ret = 0;
3740 while (!list_empty(&ordered_sums)) {
3741 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3742 struct btrfs_ordered_sum,
3743 list);
3744 if (!ret)
3745 ret = btrfs_csum_file_blocks(trans, log, sums);
3746 list_del(&sums->list);
3747 kfree(sums);
3750 if (!has_extents)
3751 return ret;
3753 if (need_find_last_extent && *last_extent == first_key.offset) {
3755 * We don't have any leafs between our current one and the one
3756 * we processed before that can have file extent items for our
3757 * inode (and have a generation number smaller than our current
3758 * transaction id).
3760 need_find_last_extent = false;
3764 * Because we use btrfs_search_forward we could skip leaves that were
3765 * not modified and then assume *last_extent is valid when it really
3766 * isn't. So back up to the previous leaf and read the end of the last
3767 * extent before we go and fill in holes.
3769 if (need_find_last_extent) {
3770 u64 len;
3772 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3773 if (ret < 0)
3774 return ret;
3775 if (ret)
3776 goto fill_holes;
3777 if (src_path->slots[0])
3778 src_path->slots[0]--;
3779 src = src_path->nodes[0];
3780 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3781 if (key.objectid != btrfs_ino(inode) ||
3782 key.type != BTRFS_EXTENT_DATA_KEY)
3783 goto fill_holes;
3784 extent = btrfs_item_ptr(src, src_path->slots[0],
3785 struct btrfs_file_extent_item);
3786 if (btrfs_file_extent_type(src, extent) ==
3787 BTRFS_FILE_EXTENT_INLINE) {
3788 len = btrfs_file_extent_inline_len(src,
3789 src_path->slots[0],
3790 extent);
3791 *last_extent = ALIGN(key.offset + len,
3792 log->sectorsize);
3793 } else {
3794 len = btrfs_file_extent_num_bytes(src, extent);
3795 *last_extent = key.offset + len;
3798 fill_holes:
3799 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3800 * things could have happened
3802 * 1) A merge could have happened, so we could currently be on a leaf
3803 * that holds what we were copying in the first place.
3804 * 2) A split could have happened, and now not all of the items we want
3805 * are on the same leaf.
3807 * So we need to adjust how we search for holes, we need to drop the
3808 * path and re-search for the first extent key we found, and then walk
3809 * forward until we hit the last one we copied.
3811 if (need_find_last_extent) {
3812 /* btrfs_prev_leaf could return 1 without releasing the path */
3813 btrfs_release_path(src_path);
3814 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3815 src_path, 0, 0);
3816 if (ret < 0)
3817 return ret;
3818 ASSERT(ret == 0);
3819 src = src_path->nodes[0];
3820 i = src_path->slots[0];
3821 } else {
3822 i = start_slot;
3826 * Ok so here we need to go through and fill in any holes we may have
3827 * to make sure that holes are punched for those areas in case they had
3828 * extents previously.
3830 while (!done) {
3831 u64 offset, len;
3832 u64 extent_end;
3834 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3835 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3836 if (ret < 0)
3837 return ret;
3838 ASSERT(ret == 0);
3839 src = src_path->nodes[0];
3840 i = 0;
3843 btrfs_item_key_to_cpu(src, &key, i);
3844 if (!btrfs_comp_cpu_keys(&key, &last_key))
3845 done = true;
3846 if (key.objectid != btrfs_ino(inode) ||
3847 key.type != BTRFS_EXTENT_DATA_KEY) {
3848 i++;
3849 continue;
3851 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3852 if (btrfs_file_extent_type(src, extent) ==
3853 BTRFS_FILE_EXTENT_INLINE) {
3854 len = btrfs_file_extent_inline_len(src, i, extent);
3855 extent_end = ALIGN(key.offset + len, log->sectorsize);
3856 } else {
3857 len = btrfs_file_extent_num_bytes(src, extent);
3858 extent_end = key.offset + len;
3860 i++;
3862 if (*last_extent == key.offset) {
3863 *last_extent = extent_end;
3864 continue;
3866 offset = *last_extent;
3867 len = key.offset - *last_extent;
3868 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3869 offset, 0, 0, len, 0, len, 0,
3870 0, 0);
3871 if (ret)
3872 break;
3873 *last_extent = extent_end;
3876 * Need to let the callers know we dropped the path so they should
3877 * re-search.
3879 if (!ret && need_find_last_extent)
3880 ret = 1;
3881 return ret;
3884 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3886 struct extent_map *em1, *em2;
3888 em1 = list_entry(a, struct extent_map, list);
3889 em2 = list_entry(b, struct extent_map, list);
3891 if (em1->start < em2->start)
3892 return -1;
3893 else if (em1->start > em2->start)
3894 return 1;
3895 return 0;
3898 static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3899 struct inode *inode,
3900 struct btrfs_root *root,
3901 const struct extent_map *em,
3902 const struct list_head *logged_list,
3903 bool *ordered_io_error)
3905 struct btrfs_ordered_extent *ordered;
3906 struct btrfs_root *log = root->log_root;
3907 u64 mod_start = em->mod_start;
3908 u64 mod_len = em->mod_len;
3909 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3910 u64 csum_offset;
3911 u64 csum_len;
3912 LIST_HEAD(ordered_sums);
3913 int ret = 0;
3915 *ordered_io_error = false;
3917 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3918 em->block_start == EXTENT_MAP_HOLE)
3919 return 0;
3922 * Wait far any ordered extent that covers our extent map. If it
3923 * finishes without an error, first check and see if our csums are on
3924 * our outstanding ordered extents.
3926 list_for_each_entry(ordered, logged_list, log_list) {
3927 struct btrfs_ordered_sum *sum;
3929 if (!mod_len)
3930 break;
3932 if (ordered->file_offset + ordered->len <= mod_start ||
3933 mod_start + mod_len <= ordered->file_offset)
3934 continue;
3936 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3937 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3938 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3939 const u64 start = ordered->file_offset;
3940 const u64 end = ordered->file_offset + ordered->len - 1;
3942 WARN_ON(ordered->inode != inode);
3943 filemap_fdatawrite_range(inode->i_mapping, start, end);
3946 wait_event(ordered->wait,
3947 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3948 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3950 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3952 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3953 * i_mapping flags, so that the next fsync won't get
3954 * an outdated io error too.
3956 filemap_check_errors(inode->i_mapping);
3957 *ordered_io_error = true;
3958 break;
3961 * We are going to copy all the csums on this ordered extent, so
3962 * go ahead and adjust mod_start and mod_len in case this
3963 * ordered extent has already been logged.
3965 if (ordered->file_offset > mod_start) {
3966 if (ordered->file_offset + ordered->len >=
3967 mod_start + mod_len)
3968 mod_len = ordered->file_offset - mod_start;
3970 * If we have this case
3972 * |--------- logged extent ---------|
3973 * |----- ordered extent ----|
3975 * Just don't mess with mod_start and mod_len, we'll
3976 * just end up logging more csums than we need and it
3977 * will be ok.
3979 } else {
3980 if (ordered->file_offset + ordered->len <
3981 mod_start + mod_len) {
3982 mod_len = (mod_start + mod_len) -
3983 (ordered->file_offset + ordered->len);
3984 mod_start = ordered->file_offset +
3985 ordered->len;
3986 } else {
3987 mod_len = 0;
3991 if (skip_csum)
3992 continue;
3995 * To keep us from looping for the above case of an ordered
3996 * extent that falls inside of the logged extent.
3998 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3999 &ordered->flags))
4000 continue;
4002 list_for_each_entry(sum, &ordered->list, list) {
4003 ret = btrfs_csum_file_blocks(trans, log, sum);
4004 if (ret)
4005 break;
4009 if (*ordered_io_error || !mod_len || ret || skip_csum)
4010 return ret;
4012 if (em->compress_type) {
4013 csum_offset = 0;
4014 csum_len = max(em->block_len, em->orig_block_len);
4015 } else {
4016 csum_offset = mod_start - em->start;
4017 csum_len = mod_len;
4020 /* block start is already adjusted for the file extent offset. */
4021 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
4022 em->block_start + csum_offset,
4023 em->block_start + csum_offset +
4024 csum_len - 1, &ordered_sums, 0);
4025 if (ret)
4026 return ret;
4028 while (!list_empty(&ordered_sums)) {
4029 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4030 struct btrfs_ordered_sum,
4031 list);
4032 if (!ret)
4033 ret = btrfs_csum_file_blocks(trans, log, sums);
4034 list_del(&sums->list);
4035 kfree(sums);
4038 return ret;
4041 static int log_one_extent(struct btrfs_trans_handle *trans,
4042 struct inode *inode, struct btrfs_root *root,
4043 const struct extent_map *em,
4044 struct btrfs_path *path,
4045 const struct list_head *logged_list,
4046 struct btrfs_log_ctx *ctx)
4048 struct btrfs_root *log = root->log_root;
4049 struct btrfs_file_extent_item *fi;
4050 struct extent_buffer *leaf;
4051 struct btrfs_map_token token;
4052 struct btrfs_key key;
4053 u64 extent_offset = em->start - em->orig_start;
4054 u64 block_len;
4055 int ret;
4056 int extent_inserted = 0;
4057 bool ordered_io_err = false;
4059 ret = wait_ordered_extents(trans, inode, root, em, logged_list,
4060 &ordered_io_err);
4061 if (ret)
4062 return ret;
4064 if (ordered_io_err) {
4065 ctx->io_err = -EIO;
4066 return 0;
4069 btrfs_init_map_token(&token);
4071 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
4072 em->start + em->len, NULL, 0, 1,
4073 sizeof(*fi), &extent_inserted);
4074 if (ret)
4075 return ret;
4077 if (!extent_inserted) {
4078 key.objectid = btrfs_ino(inode);
4079 key.type = BTRFS_EXTENT_DATA_KEY;
4080 key.offset = em->start;
4082 ret = btrfs_insert_empty_item(trans, log, path, &key,
4083 sizeof(*fi));
4084 if (ret)
4085 return ret;
4087 leaf = path->nodes[0];
4088 fi = btrfs_item_ptr(leaf, path->slots[0],
4089 struct btrfs_file_extent_item);
4091 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4092 &token);
4093 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4094 btrfs_set_token_file_extent_type(leaf, fi,
4095 BTRFS_FILE_EXTENT_PREALLOC,
4096 &token);
4097 else
4098 btrfs_set_token_file_extent_type(leaf, fi,
4099 BTRFS_FILE_EXTENT_REG,
4100 &token);
4102 block_len = max(em->block_len, em->orig_block_len);
4103 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4104 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4105 em->block_start,
4106 &token);
4107 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4108 &token);
4109 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4110 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4111 em->block_start -
4112 extent_offset, &token);
4113 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4114 &token);
4115 } else {
4116 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4117 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4118 &token);
4121 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4122 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4123 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4124 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4125 &token);
4126 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4127 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4128 btrfs_mark_buffer_dirty(leaf);
4130 btrfs_release_path(path);
4132 return ret;
4135 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4136 struct btrfs_root *root,
4137 struct inode *inode,
4138 struct btrfs_path *path,
4139 struct list_head *logged_list,
4140 struct btrfs_log_ctx *ctx,
4141 const u64 start,
4142 const u64 end)
4144 struct extent_map *em, *n;
4145 struct list_head extents;
4146 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
4147 u64 test_gen;
4148 int ret = 0;
4149 int num = 0;
4151 INIT_LIST_HEAD(&extents);
4153 down_write(&BTRFS_I(inode)->dio_sem);
4154 write_lock(&tree->lock);
4155 test_gen = root->fs_info->last_trans_committed;
4157 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4158 list_del_init(&em->list);
4161 * Just an arbitrary number, this can be really CPU intensive
4162 * once we start getting a lot of extents, and really once we
4163 * have a bunch of extents we just want to commit since it will
4164 * be faster.
4166 if (++num > 32768) {
4167 list_del_init(&tree->modified_extents);
4168 ret = -EFBIG;
4169 goto process;
4172 if (em->generation <= test_gen)
4173 continue;
4174 /* Need a ref to keep it from getting evicted from cache */
4175 atomic_inc(&em->refs);
4176 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4177 list_add_tail(&em->list, &extents);
4178 num++;
4181 list_sort(NULL, &extents, extent_cmp);
4182 btrfs_get_logged_extents(inode, logged_list, start, end);
4184 * Some ordered extents started by fsync might have completed
4185 * before we could collect them into the list logged_list, which
4186 * means they're gone, not in our logged_list nor in the inode's
4187 * ordered tree. We want the application/user space to know an
4188 * error happened while attempting to persist file data so that
4189 * it can take proper action. If such error happened, we leave
4190 * without writing to the log tree and the fsync must report the
4191 * file data write error and not commit the current transaction.
4193 ret = filemap_check_errors(inode->i_mapping);
4194 if (ret)
4195 ctx->io_err = ret;
4196 process:
4197 while (!list_empty(&extents)) {
4198 em = list_entry(extents.next, struct extent_map, list);
4200 list_del_init(&em->list);
4203 * If we had an error we just need to delete everybody from our
4204 * private list.
4206 if (ret) {
4207 clear_em_logging(tree, em);
4208 free_extent_map(em);
4209 continue;
4212 write_unlock(&tree->lock);
4214 ret = log_one_extent(trans, inode, root, em, path, logged_list,
4215 ctx);
4216 write_lock(&tree->lock);
4217 clear_em_logging(tree, em);
4218 free_extent_map(em);
4220 WARN_ON(!list_empty(&extents));
4221 write_unlock(&tree->lock);
4222 up_write(&BTRFS_I(inode)->dio_sem);
4224 btrfs_release_path(path);
4225 return ret;
4228 static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
4229 struct btrfs_path *path, u64 *size_ret)
4231 struct btrfs_key key;
4232 int ret;
4234 key.objectid = btrfs_ino(inode);
4235 key.type = BTRFS_INODE_ITEM_KEY;
4236 key.offset = 0;
4238 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4239 if (ret < 0) {
4240 return ret;
4241 } else if (ret > 0) {
4242 *size_ret = 0;
4243 } else {
4244 struct btrfs_inode_item *item;
4246 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4247 struct btrfs_inode_item);
4248 *size_ret = btrfs_inode_size(path->nodes[0], item);
4251 btrfs_release_path(path);
4252 return 0;
4256 * At the moment we always log all xattrs. This is to figure out at log replay
4257 * time which xattrs must have their deletion replayed. If a xattr is missing
4258 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4259 * because if a xattr is deleted, the inode is fsynced and a power failure
4260 * happens, causing the log to be replayed the next time the fs is mounted,
4261 * we want the xattr to not exist anymore (same behaviour as other filesystems
4262 * with a journal, ext3/4, xfs, f2fs, etc).
4264 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4265 struct btrfs_root *root,
4266 struct inode *inode,
4267 struct btrfs_path *path,
4268 struct btrfs_path *dst_path)
4270 int ret;
4271 struct btrfs_key key;
4272 const u64 ino = btrfs_ino(inode);
4273 int ins_nr = 0;
4274 int start_slot = 0;
4276 key.objectid = ino;
4277 key.type = BTRFS_XATTR_ITEM_KEY;
4278 key.offset = 0;
4280 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4281 if (ret < 0)
4282 return ret;
4284 while (true) {
4285 int slot = path->slots[0];
4286 struct extent_buffer *leaf = path->nodes[0];
4287 int nritems = btrfs_header_nritems(leaf);
4289 if (slot >= nritems) {
4290 if (ins_nr > 0) {
4291 u64 last_extent = 0;
4293 ret = copy_items(trans, inode, dst_path, path,
4294 &last_extent, start_slot,
4295 ins_nr, 1, 0);
4296 /* can't be 1, extent items aren't processed */
4297 ASSERT(ret <= 0);
4298 if (ret < 0)
4299 return ret;
4300 ins_nr = 0;
4302 ret = btrfs_next_leaf(root, path);
4303 if (ret < 0)
4304 return ret;
4305 else if (ret > 0)
4306 break;
4307 continue;
4310 btrfs_item_key_to_cpu(leaf, &key, slot);
4311 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4312 break;
4314 if (ins_nr == 0)
4315 start_slot = slot;
4316 ins_nr++;
4317 path->slots[0]++;
4318 cond_resched();
4320 if (ins_nr > 0) {
4321 u64 last_extent = 0;
4323 ret = copy_items(trans, inode, dst_path, path,
4324 &last_extent, start_slot,
4325 ins_nr, 1, 0);
4326 /* can't be 1, extent items aren't processed */
4327 ASSERT(ret <= 0);
4328 if (ret < 0)
4329 return ret;
4332 return 0;
4336 * If the no holes feature is enabled we need to make sure any hole between the
4337 * last extent and the i_size of our inode is explicitly marked in the log. This
4338 * is to make sure that doing something like:
4340 * 1) create file with 128Kb of data
4341 * 2) truncate file to 64Kb
4342 * 3) truncate file to 256Kb
4343 * 4) fsync file
4344 * 5) <crash/power failure>
4345 * 6) mount fs and trigger log replay
4347 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4348 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4349 * file correspond to a hole. The presence of explicit holes in a log tree is
4350 * what guarantees that log replay will remove/adjust file extent items in the
4351 * fs/subvol tree.
4353 * Here we do not need to care about holes between extents, that is already done
4354 * by copy_items(). We also only need to do this in the full sync path, where we
4355 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4356 * lookup the list of modified extent maps and if any represents a hole, we
4357 * insert a corresponding extent representing a hole in the log tree.
4359 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4360 struct btrfs_root *root,
4361 struct inode *inode,
4362 struct btrfs_path *path)
4364 int ret;
4365 struct btrfs_key key;
4366 u64 hole_start;
4367 u64 hole_size;
4368 struct extent_buffer *leaf;
4369 struct btrfs_root *log = root->log_root;
4370 const u64 ino = btrfs_ino(inode);
4371 const u64 i_size = i_size_read(inode);
4373 if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
4374 return 0;
4376 key.objectid = ino;
4377 key.type = BTRFS_EXTENT_DATA_KEY;
4378 key.offset = (u64)-1;
4380 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4381 ASSERT(ret != 0);
4382 if (ret < 0)
4383 return ret;
4385 ASSERT(path->slots[0] > 0);
4386 path->slots[0]--;
4387 leaf = path->nodes[0];
4388 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4390 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4391 /* inode does not have any extents */
4392 hole_start = 0;
4393 hole_size = i_size;
4394 } else {
4395 struct btrfs_file_extent_item *extent;
4396 u64 len;
4399 * If there's an extent beyond i_size, an explicit hole was
4400 * already inserted by copy_items().
4402 if (key.offset >= i_size)
4403 return 0;
4405 extent = btrfs_item_ptr(leaf, path->slots[0],
4406 struct btrfs_file_extent_item);
4408 if (btrfs_file_extent_type(leaf, extent) ==
4409 BTRFS_FILE_EXTENT_INLINE) {
4410 len = btrfs_file_extent_inline_len(leaf,
4411 path->slots[0],
4412 extent);
4413 ASSERT(len == i_size);
4414 return 0;
4417 len = btrfs_file_extent_num_bytes(leaf, extent);
4418 /* Last extent goes beyond i_size, no need to log a hole. */
4419 if (key.offset + len > i_size)
4420 return 0;
4421 hole_start = key.offset + len;
4422 hole_size = i_size - hole_start;
4424 btrfs_release_path(path);
4426 /* Last extent ends at i_size. */
4427 if (hole_size == 0)
4428 return 0;
4430 hole_size = ALIGN(hole_size, root->sectorsize);
4431 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4432 hole_size, 0, hole_size, 0, 0, 0);
4433 return ret;
4437 * When we are logging a new inode X, check if it doesn't have a reference that
4438 * matches the reference from some other inode Y created in a past transaction
4439 * and that was renamed in the current transaction. If we don't do this, then at
4440 * log replay time we can lose inode Y (and all its files if it's a directory):
4442 * mkdir /mnt/x
4443 * echo "hello world" > /mnt/x/foobar
4444 * sync
4445 * mv /mnt/x /mnt/y
4446 * mkdir /mnt/x # or touch /mnt/x
4447 * xfs_io -c fsync /mnt/x
4448 * <power fail>
4449 * mount fs, trigger log replay
4451 * After the log replay procedure, we would lose the first directory and all its
4452 * files (file foobar).
4453 * For the case where inode Y is not a directory we simply end up losing it:
4455 * echo "123" > /mnt/foo
4456 * sync
4457 * mv /mnt/foo /mnt/bar
4458 * echo "abc" > /mnt/foo
4459 * xfs_io -c fsync /mnt/foo
4460 * <power fail>
4462 * We also need this for cases where a snapshot entry is replaced by some other
4463 * entry (file or directory) otherwise we end up with an unreplayable log due to
4464 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4465 * if it were a regular entry:
4467 * mkdir /mnt/x
4468 * btrfs subvolume snapshot /mnt /mnt/x/snap
4469 * btrfs subvolume delete /mnt/x/snap
4470 * rmdir /mnt/x
4471 * mkdir /mnt/x
4472 * fsync /mnt/x or fsync some new file inside it
4473 * <power fail>
4475 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4476 * the same transaction.
4478 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4479 const int slot,
4480 const struct btrfs_key *key,
4481 struct inode *inode,
4482 u64 *other_ino)
4484 int ret;
4485 struct btrfs_path *search_path;
4486 char *name = NULL;
4487 u32 name_len = 0;
4488 u32 item_size = btrfs_item_size_nr(eb, slot);
4489 u32 cur_offset = 0;
4490 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4492 search_path = btrfs_alloc_path();
4493 if (!search_path)
4494 return -ENOMEM;
4495 search_path->search_commit_root = 1;
4496 search_path->skip_locking = 1;
4498 while (cur_offset < item_size) {
4499 u64 parent;
4500 u32 this_name_len;
4501 u32 this_len;
4502 unsigned long name_ptr;
4503 struct btrfs_dir_item *di;
4505 if (key->type == BTRFS_INODE_REF_KEY) {
4506 struct btrfs_inode_ref *iref;
4508 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4509 parent = key->offset;
4510 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4511 name_ptr = (unsigned long)(iref + 1);
4512 this_len = sizeof(*iref) + this_name_len;
4513 } else {
4514 struct btrfs_inode_extref *extref;
4516 extref = (struct btrfs_inode_extref *)(ptr +
4517 cur_offset);
4518 parent = btrfs_inode_extref_parent(eb, extref);
4519 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4520 name_ptr = (unsigned long)&extref->name;
4521 this_len = sizeof(*extref) + this_name_len;
4524 if (this_name_len > name_len) {
4525 char *new_name;
4527 new_name = krealloc(name, this_name_len, GFP_NOFS);
4528 if (!new_name) {
4529 ret = -ENOMEM;
4530 goto out;
4532 name_len = this_name_len;
4533 name = new_name;
4536 read_extent_buffer(eb, name, name_ptr, this_name_len);
4537 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
4538 search_path, parent,
4539 name, this_name_len, 0);
4540 if (di && !IS_ERR(di)) {
4541 struct btrfs_key di_key;
4543 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4544 di, &di_key);
4545 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4546 ret = 1;
4547 *other_ino = di_key.objectid;
4548 } else {
4549 ret = -EAGAIN;
4551 goto out;
4552 } else if (IS_ERR(di)) {
4553 ret = PTR_ERR(di);
4554 goto out;
4556 btrfs_release_path(search_path);
4558 cur_offset += this_len;
4560 ret = 0;
4561 out:
4562 btrfs_free_path(search_path);
4563 kfree(name);
4564 return ret;
4567 /* log a single inode in the tree log.
4568 * At least one parent directory for this inode must exist in the tree
4569 * or be logged already.
4571 * Any items from this inode changed by the current transaction are copied
4572 * to the log tree. An extra reference is taken on any extents in this
4573 * file, allowing us to avoid a whole pile of corner cases around logging
4574 * blocks that have been removed from the tree.
4576 * See LOG_INODE_ALL and related defines for a description of what inode_only
4577 * does.
4579 * This handles both files and directories.
4581 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4582 struct btrfs_root *root, struct inode *inode,
4583 int inode_only,
4584 const loff_t start,
4585 const loff_t end,
4586 struct btrfs_log_ctx *ctx)
4588 struct btrfs_path *path;
4589 struct btrfs_path *dst_path;
4590 struct btrfs_key min_key;
4591 struct btrfs_key max_key;
4592 struct btrfs_root *log = root->log_root;
4593 struct extent_buffer *src = NULL;
4594 LIST_HEAD(logged_list);
4595 u64 last_extent = 0;
4596 int err = 0;
4597 int ret;
4598 int nritems;
4599 int ins_start_slot = 0;
4600 int ins_nr;
4601 bool fast_search = false;
4602 u64 ino = btrfs_ino(inode);
4603 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4604 u64 logged_isize = 0;
4605 bool need_log_inode_item = true;
4607 path = btrfs_alloc_path();
4608 if (!path)
4609 return -ENOMEM;
4610 dst_path = btrfs_alloc_path();
4611 if (!dst_path) {
4612 btrfs_free_path(path);
4613 return -ENOMEM;
4616 min_key.objectid = ino;
4617 min_key.type = BTRFS_INODE_ITEM_KEY;
4618 min_key.offset = 0;
4620 max_key.objectid = ino;
4623 /* today the code can only do partial logging of directories */
4624 if (S_ISDIR(inode->i_mode) ||
4625 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4626 &BTRFS_I(inode)->runtime_flags) &&
4627 inode_only >= LOG_INODE_EXISTS))
4628 max_key.type = BTRFS_XATTR_ITEM_KEY;
4629 else
4630 max_key.type = (u8)-1;
4631 max_key.offset = (u64)-1;
4634 * Only run delayed items if we are a dir or a new file.
4635 * Otherwise commit the delayed inode only, which is needed in
4636 * order for the log replay code to mark inodes for link count
4637 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4639 if (S_ISDIR(inode->i_mode) ||
4640 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
4641 ret = btrfs_commit_inode_delayed_items(trans, inode);
4642 else
4643 ret = btrfs_commit_inode_delayed_inode(inode);
4645 if (ret) {
4646 btrfs_free_path(path);
4647 btrfs_free_path(dst_path);
4648 return ret;
4651 if (inode_only == LOG_OTHER_INODE) {
4652 inode_only = LOG_INODE_EXISTS;
4653 mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
4654 SINGLE_DEPTH_NESTING);
4655 } else {
4656 mutex_lock(&BTRFS_I(inode)->log_mutex);
4660 * a brute force approach to making sure we get the most uptodate
4661 * copies of everything.
4663 if (S_ISDIR(inode->i_mode)) {
4664 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4666 if (inode_only == LOG_INODE_EXISTS)
4667 max_key_type = BTRFS_XATTR_ITEM_KEY;
4668 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4669 } else {
4670 if (inode_only == LOG_INODE_EXISTS) {
4672 * Make sure the new inode item we write to the log has
4673 * the same isize as the current one (if it exists).
4674 * This is necessary to prevent data loss after log
4675 * replay, and also to prevent doing a wrong expanding
4676 * truncate - for e.g. create file, write 4K into offset
4677 * 0, fsync, write 4K into offset 4096, add hard link,
4678 * fsync some other file (to sync log), power fail - if
4679 * we use the inode's current i_size, after log replay
4680 * we get a 8Kb file, with the last 4Kb extent as a hole
4681 * (zeroes), as if an expanding truncate happened,
4682 * instead of getting a file of 4Kb only.
4684 err = logged_inode_size(log, inode, path,
4685 &logged_isize);
4686 if (err)
4687 goto out_unlock;
4689 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4690 &BTRFS_I(inode)->runtime_flags)) {
4691 if (inode_only == LOG_INODE_EXISTS) {
4692 max_key.type = BTRFS_XATTR_ITEM_KEY;
4693 ret = drop_objectid_items(trans, log, path, ino,
4694 max_key.type);
4695 } else {
4696 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4697 &BTRFS_I(inode)->runtime_flags);
4698 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4699 &BTRFS_I(inode)->runtime_flags);
4700 while(1) {
4701 ret = btrfs_truncate_inode_items(trans,
4702 log, inode, 0, 0);
4703 if (ret != -EAGAIN)
4704 break;
4707 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4708 &BTRFS_I(inode)->runtime_flags) ||
4709 inode_only == LOG_INODE_EXISTS) {
4710 if (inode_only == LOG_INODE_ALL)
4711 fast_search = true;
4712 max_key.type = BTRFS_XATTR_ITEM_KEY;
4713 ret = drop_objectid_items(trans, log, path, ino,
4714 max_key.type);
4715 } else {
4716 if (inode_only == LOG_INODE_ALL)
4717 fast_search = true;
4718 goto log_extents;
4722 if (ret) {
4723 err = ret;
4724 goto out_unlock;
4727 while (1) {
4728 ins_nr = 0;
4729 ret = btrfs_search_forward(root, &min_key,
4730 path, trans->transid);
4731 if (ret < 0) {
4732 err = ret;
4733 goto out_unlock;
4735 if (ret != 0)
4736 break;
4737 again:
4738 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4739 if (min_key.objectid != ino)
4740 break;
4741 if (min_key.type > max_key.type)
4742 break;
4744 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4745 need_log_inode_item = false;
4747 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4748 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4749 BTRFS_I(inode)->generation == trans->transid) {
4750 u64 other_ino = 0;
4752 ret = btrfs_check_ref_name_override(path->nodes[0],
4753 path->slots[0],
4754 &min_key, inode,
4755 &other_ino);
4756 if (ret < 0) {
4757 err = ret;
4758 goto out_unlock;
4759 } else if (ret > 0 && ctx &&
4760 other_ino != btrfs_ino(ctx->inode)) {
4761 struct btrfs_key inode_key;
4762 struct inode *other_inode;
4764 if (ins_nr > 0) {
4765 ins_nr++;
4766 } else {
4767 ins_nr = 1;
4768 ins_start_slot = path->slots[0];
4770 ret = copy_items(trans, inode, dst_path, path,
4771 &last_extent, ins_start_slot,
4772 ins_nr, inode_only,
4773 logged_isize);
4774 if (ret < 0) {
4775 err = ret;
4776 goto out_unlock;
4778 ins_nr = 0;
4779 btrfs_release_path(path);
4780 inode_key.objectid = other_ino;
4781 inode_key.type = BTRFS_INODE_ITEM_KEY;
4782 inode_key.offset = 0;
4783 other_inode = btrfs_iget(root->fs_info->sb,
4784 &inode_key, root,
4785 NULL);
4787 * If the other inode that had a conflicting dir
4788 * entry was deleted in the current transaction,
4789 * we don't need to do more work nor fallback to
4790 * a transaction commit.
4792 if (IS_ERR(other_inode) &&
4793 PTR_ERR(other_inode) == -ENOENT) {
4794 goto next_key;
4795 } else if (IS_ERR(other_inode)) {
4796 err = PTR_ERR(other_inode);
4797 goto out_unlock;
4800 * We are safe logging the other inode without
4801 * acquiring its i_mutex as long as we log with
4802 * the LOG_INODE_EXISTS mode. We're safe against
4803 * concurrent renames of the other inode as well
4804 * because during a rename we pin the log and
4805 * update the log with the new name before we
4806 * unpin it.
4808 err = btrfs_log_inode(trans, root, other_inode,
4809 LOG_OTHER_INODE,
4810 0, LLONG_MAX, ctx);
4811 iput(other_inode);
4812 if (err)
4813 goto out_unlock;
4814 else
4815 goto next_key;
4819 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4820 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4821 if (ins_nr == 0)
4822 goto next_slot;
4823 ret = copy_items(trans, inode, dst_path, path,
4824 &last_extent, ins_start_slot,
4825 ins_nr, inode_only, logged_isize);
4826 if (ret < 0) {
4827 err = ret;
4828 goto out_unlock;
4830 ins_nr = 0;
4831 if (ret) {
4832 btrfs_release_path(path);
4833 continue;
4835 goto next_slot;
4838 src = path->nodes[0];
4839 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4840 ins_nr++;
4841 goto next_slot;
4842 } else if (!ins_nr) {
4843 ins_start_slot = path->slots[0];
4844 ins_nr = 1;
4845 goto next_slot;
4848 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4849 ins_start_slot, ins_nr, inode_only,
4850 logged_isize);
4851 if (ret < 0) {
4852 err = ret;
4853 goto out_unlock;
4855 if (ret) {
4856 ins_nr = 0;
4857 btrfs_release_path(path);
4858 continue;
4860 ins_nr = 1;
4861 ins_start_slot = path->slots[0];
4862 next_slot:
4864 nritems = btrfs_header_nritems(path->nodes[0]);
4865 path->slots[0]++;
4866 if (path->slots[0] < nritems) {
4867 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4868 path->slots[0]);
4869 goto again;
4871 if (ins_nr) {
4872 ret = copy_items(trans, inode, dst_path, path,
4873 &last_extent, ins_start_slot,
4874 ins_nr, inode_only, logged_isize);
4875 if (ret < 0) {
4876 err = ret;
4877 goto out_unlock;
4879 ret = 0;
4880 ins_nr = 0;
4882 btrfs_release_path(path);
4883 next_key:
4884 if (min_key.offset < (u64)-1) {
4885 min_key.offset++;
4886 } else if (min_key.type < max_key.type) {
4887 min_key.type++;
4888 min_key.offset = 0;
4889 } else {
4890 break;
4893 if (ins_nr) {
4894 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4895 ins_start_slot, ins_nr, inode_only,
4896 logged_isize);
4897 if (ret < 0) {
4898 err = ret;
4899 goto out_unlock;
4901 ret = 0;
4902 ins_nr = 0;
4905 btrfs_release_path(path);
4906 btrfs_release_path(dst_path);
4907 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4908 if (err)
4909 goto out_unlock;
4910 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4911 btrfs_release_path(path);
4912 btrfs_release_path(dst_path);
4913 err = btrfs_log_trailing_hole(trans, root, inode, path);
4914 if (err)
4915 goto out_unlock;
4917 log_extents:
4918 btrfs_release_path(path);
4919 btrfs_release_path(dst_path);
4920 if (need_log_inode_item) {
4921 err = log_inode_item(trans, log, dst_path, inode);
4922 if (err)
4923 goto out_unlock;
4925 if (fast_search) {
4926 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4927 &logged_list, ctx, start, end);
4928 if (ret) {
4929 err = ret;
4930 goto out_unlock;
4932 } else if (inode_only == LOG_INODE_ALL) {
4933 struct extent_map *em, *n;
4935 write_lock(&em_tree->lock);
4937 * We can't just remove every em if we're called for a ranged
4938 * fsync - that is, one that doesn't cover the whole possible
4939 * file range (0 to LLONG_MAX). This is because we can have
4940 * em's that fall outside the range we're logging and therefore
4941 * their ordered operations haven't completed yet
4942 * (btrfs_finish_ordered_io() not invoked yet). This means we
4943 * didn't get their respective file extent item in the fs/subvol
4944 * tree yet, and need to let the next fast fsync (one which
4945 * consults the list of modified extent maps) find the em so
4946 * that it logs a matching file extent item and waits for the
4947 * respective ordered operation to complete (if it's still
4948 * running).
4950 * Removing every em outside the range we're logging would make
4951 * the next fast fsync not log their matching file extent items,
4952 * therefore making us lose data after a log replay.
4954 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4955 list) {
4956 const u64 mod_end = em->mod_start + em->mod_len - 1;
4958 if (em->mod_start >= start && mod_end <= end)
4959 list_del_init(&em->list);
4961 write_unlock(&em_tree->lock);
4964 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
4965 ret = log_directory_changes(trans, root, inode, path, dst_path,
4966 ctx);
4967 if (ret) {
4968 err = ret;
4969 goto out_unlock;
4973 spin_lock(&BTRFS_I(inode)->lock);
4974 BTRFS_I(inode)->logged_trans = trans->transid;
4975 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
4976 spin_unlock(&BTRFS_I(inode)->lock);
4977 out_unlock:
4978 if (unlikely(err))
4979 btrfs_put_logged_extents(&logged_list);
4980 else
4981 btrfs_submit_logged_extents(&logged_list, log);
4982 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4984 btrfs_free_path(path);
4985 btrfs_free_path(dst_path);
4986 return err;
4990 * Check if we must fallback to a transaction commit when logging an inode.
4991 * This must be called after logging the inode and is used only in the context
4992 * when fsyncing an inode requires the need to log some other inode - in which
4993 * case we can't lock the i_mutex of each other inode we need to log as that
4994 * can lead to deadlocks with concurrent fsync against other inodes (as we can
4995 * log inodes up or down in the hierarchy) or rename operations for example. So
4996 * we take the log_mutex of the inode after we have logged it and then check for
4997 * its last_unlink_trans value - this is safe because any task setting
4998 * last_unlink_trans must take the log_mutex and it must do this before it does
4999 * the actual unlink operation, so if we do this check before a concurrent task
5000 * sets last_unlink_trans it means we've logged a consistent version/state of
5001 * all the inode items, otherwise we are not sure and must do a transaction
5002 * commit (the concurrent task might have only updated last_unlink_trans before
5003 * we logged the inode or it might have also done the unlink).
5005 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5006 struct inode *inode)
5008 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
5009 bool ret = false;
5011 mutex_lock(&BTRFS_I(inode)->log_mutex);
5012 if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
5014 * Make sure any commits to the log are forced to be full
5015 * commits.
5017 btrfs_set_log_full_commit(fs_info, trans);
5018 ret = true;
5020 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5022 return ret;
5026 * follow the dentry parent pointers up the chain and see if any
5027 * of the directories in it require a full commit before they can
5028 * be logged. Returns zero if nothing special needs to be done or 1 if
5029 * a full commit is required.
5031 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5032 struct inode *inode,
5033 struct dentry *parent,
5034 struct super_block *sb,
5035 u64 last_committed)
5037 int ret = 0;
5038 struct dentry *old_parent = NULL;
5039 struct inode *orig_inode = inode;
5042 * for regular files, if its inode is already on disk, we don't
5043 * have to worry about the parents at all. This is because
5044 * we can use the last_unlink_trans field to record renames
5045 * and other fun in this file.
5047 if (S_ISREG(inode->i_mode) &&
5048 BTRFS_I(inode)->generation <= last_committed &&
5049 BTRFS_I(inode)->last_unlink_trans <= last_committed)
5050 goto out;
5052 if (!S_ISDIR(inode->i_mode)) {
5053 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5054 goto out;
5055 inode = d_inode(parent);
5058 while (1) {
5060 * If we are logging a directory then we start with our inode,
5061 * not our parent's inode, so we need to skip setting the
5062 * logged_trans so that further down in the log code we don't
5063 * think this inode has already been logged.
5065 if (inode != orig_inode)
5066 BTRFS_I(inode)->logged_trans = trans->transid;
5067 smp_mb();
5069 if (btrfs_must_commit_transaction(trans, inode)) {
5070 ret = 1;
5071 break;
5074 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5075 break;
5077 if (IS_ROOT(parent)) {
5078 inode = d_inode(parent);
5079 if (btrfs_must_commit_transaction(trans, inode))
5080 ret = 1;
5081 break;
5084 parent = dget_parent(parent);
5085 dput(old_parent);
5086 old_parent = parent;
5087 inode = d_inode(parent);
5090 dput(old_parent);
5091 out:
5092 return ret;
5095 struct btrfs_dir_list {
5096 u64 ino;
5097 struct list_head list;
5101 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5102 * details about the why it is needed.
5103 * This is a recursive operation - if an existing dentry corresponds to a
5104 * directory, that directory's new entries are logged too (same behaviour as
5105 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5106 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5107 * complains about the following circular lock dependency / possible deadlock:
5109 * CPU0 CPU1
5110 * ---- ----
5111 * lock(&type->i_mutex_dir_key#3/2);
5112 * lock(sb_internal#2);
5113 * lock(&type->i_mutex_dir_key#3/2);
5114 * lock(&sb->s_type->i_mutex_key#14);
5116 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5117 * sb_start_intwrite() in btrfs_start_transaction().
5118 * Not locking i_mutex of the inodes is still safe because:
5120 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5121 * that while logging the inode new references (names) are added or removed
5122 * from the inode, leaving the logged inode item with a link count that does
5123 * not match the number of logged inode reference items. This is fine because
5124 * at log replay time we compute the real number of links and correct the
5125 * link count in the inode item (see replay_one_buffer() and
5126 * link_to_fixup_dir());
5128 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5129 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5130 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5131 * has a size that doesn't match the sum of the lengths of all the logged
5132 * names. This does not result in a problem because if a dir_item key is
5133 * logged but its matching dir_index key is not logged, at log replay time we
5134 * don't use it to replay the respective name (see replay_one_name()). On the
5135 * other hand if only the dir_index key ends up being logged, the respective
5136 * name is added to the fs/subvol tree with both the dir_item and dir_index
5137 * keys created (see replay_one_name()).
5138 * The directory's inode item with a wrong i_size is not a problem as well,
5139 * since we don't use it at log replay time to set the i_size in the inode
5140 * item of the fs/subvol tree (see overwrite_item()).
5142 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5143 struct btrfs_root *root,
5144 struct inode *start_inode,
5145 struct btrfs_log_ctx *ctx)
5147 struct btrfs_root *log = root->log_root;
5148 struct btrfs_path *path;
5149 LIST_HEAD(dir_list);
5150 struct btrfs_dir_list *dir_elem;
5151 int ret = 0;
5153 path = btrfs_alloc_path();
5154 if (!path)
5155 return -ENOMEM;
5157 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5158 if (!dir_elem) {
5159 btrfs_free_path(path);
5160 return -ENOMEM;
5162 dir_elem->ino = btrfs_ino(start_inode);
5163 list_add_tail(&dir_elem->list, &dir_list);
5165 while (!list_empty(&dir_list)) {
5166 struct extent_buffer *leaf;
5167 struct btrfs_key min_key;
5168 int nritems;
5169 int i;
5171 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5172 list);
5173 if (ret)
5174 goto next_dir_inode;
5176 min_key.objectid = dir_elem->ino;
5177 min_key.type = BTRFS_DIR_ITEM_KEY;
5178 min_key.offset = 0;
5179 again:
5180 btrfs_release_path(path);
5181 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5182 if (ret < 0) {
5183 goto next_dir_inode;
5184 } else if (ret > 0) {
5185 ret = 0;
5186 goto next_dir_inode;
5189 process_leaf:
5190 leaf = path->nodes[0];
5191 nritems = btrfs_header_nritems(leaf);
5192 for (i = path->slots[0]; i < nritems; i++) {
5193 struct btrfs_dir_item *di;
5194 struct btrfs_key di_key;
5195 struct inode *di_inode;
5196 struct btrfs_dir_list *new_dir_elem;
5197 int log_mode = LOG_INODE_EXISTS;
5198 int type;
5200 btrfs_item_key_to_cpu(leaf, &min_key, i);
5201 if (min_key.objectid != dir_elem->ino ||
5202 min_key.type != BTRFS_DIR_ITEM_KEY)
5203 goto next_dir_inode;
5205 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5206 type = btrfs_dir_type(leaf, di);
5207 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5208 type != BTRFS_FT_DIR)
5209 continue;
5210 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5211 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5212 continue;
5214 btrfs_release_path(path);
5215 di_inode = btrfs_iget(root->fs_info->sb, &di_key,
5216 root, NULL);
5217 if (IS_ERR(di_inode)) {
5218 ret = PTR_ERR(di_inode);
5219 goto next_dir_inode;
5222 if (btrfs_inode_in_log(di_inode, trans->transid)) {
5223 iput(di_inode);
5224 break;
5227 ctx->log_new_dentries = false;
5228 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5229 log_mode = LOG_INODE_ALL;
5230 ret = btrfs_log_inode(trans, root, di_inode,
5231 log_mode, 0, LLONG_MAX, ctx);
5232 if (!ret &&
5233 btrfs_must_commit_transaction(trans, di_inode))
5234 ret = 1;
5235 iput(di_inode);
5236 if (ret)
5237 goto next_dir_inode;
5238 if (ctx->log_new_dentries) {
5239 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5240 GFP_NOFS);
5241 if (!new_dir_elem) {
5242 ret = -ENOMEM;
5243 goto next_dir_inode;
5245 new_dir_elem->ino = di_key.objectid;
5246 list_add_tail(&new_dir_elem->list, &dir_list);
5248 break;
5250 if (i == nritems) {
5251 ret = btrfs_next_leaf(log, path);
5252 if (ret < 0) {
5253 goto next_dir_inode;
5254 } else if (ret > 0) {
5255 ret = 0;
5256 goto next_dir_inode;
5258 goto process_leaf;
5260 if (min_key.offset < (u64)-1) {
5261 min_key.offset++;
5262 goto again;
5264 next_dir_inode:
5265 list_del(&dir_elem->list);
5266 kfree(dir_elem);
5269 btrfs_free_path(path);
5270 return ret;
5273 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5274 struct inode *inode,
5275 struct btrfs_log_ctx *ctx)
5277 int ret;
5278 struct btrfs_path *path;
5279 struct btrfs_key key;
5280 struct btrfs_root *root = BTRFS_I(inode)->root;
5281 const u64 ino = btrfs_ino(inode);
5283 path = btrfs_alloc_path();
5284 if (!path)
5285 return -ENOMEM;
5286 path->skip_locking = 1;
5287 path->search_commit_root = 1;
5289 key.objectid = ino;
5290 key.type = BTRFS_INODE_REF_KEY;
5291 key.offset = 0;
5292 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5293 if (ret < 0)
5294 goto out;
5296 while (true) {
5297 struct extent_buffer *leaf = path->nodes[0];
5298 int slot = path->slots[0];
5299 u32 cur_offset = 0;
5300 u32 item_size;
5301 unsigned long ptr;
5303 if (slot >= btrfs_header_nritems(leaf)) {
5304 ret = btrfs_next_leaf(root, path);
5305 if (ret < 0)
5306 goto out;
5307 else if (ret > 0)
5308 break;
5309 continue;
5312 btrfs_item_key_to_cpu(leaf, &key, slot);
5313 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5314 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5315 break;
5317 item_size = btrfs_item_size_nr(leaf, slot);
5318 ptr = btrfs_item_ptr_offset(leaf, slot);
5319 while (cur_offset < item_size) {
5320 struct btrfs_key inode_key;
5321 struct inode *dir_inode;
5323 inode_key.type = BTRFS_INODE_ITEM_KEY;
5324 inode_key.offset = 0;
5326 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5327 struct btrfs_inode_extref *extref;
5329 extref = (struct btrfs_inode_extref *)
5330 (ptr + cur_offset);
5331 inode_key.objectid = btrfs_inode_extref_parent(
5332 leaf, extref);
5333 cur_offset += sizeof(*extref);
5334 cur_offset += btrfs_inode_extref_name_len(leaf,
5335 extref);
5336 } else {
5337 inode_key.objectid = key.offset;
5338 cur_offset = item_size;
5341 dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
5342 root, NULL);
5343 /* If parent inode was deleted, skip it. */
5344 if (IS_ERR(dir_inode))
5345 continue;
5347 if (ctx)
5348 ctx->log_new_dentries = false;
5349 ret = btrfs_log_inode(trans, root, dir_inode,
5350 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5351 if (!ret &&
5352 btrfs_must_commit_transaction(trans, dir_inode))
5353 ret = 1;
5354 if (!ret && ctx && ctx->log_new_dentries)
5355 ret = log_new_dir_dentries(trans, root,
5356 dir_inode, ctx);
5357 iput(dir_inode);
5358 if (ret)
5359 goto out;
5361 path->slots[0]++;
5363 ret = 0;
5364 out:
5365 btrfs_free_path(path);
5366 return ret;
5370 * helper function around btrfs_log_inode to make sure newly created
5371 * parent directories also end up in the log. A minimal inode and backref
5372 * only logging is done of any parent directories that are older than
5373 * the last committed transaction
5375 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5376 struct btrfs_root *root, struct inode *inode,
5377 struct dentry *parent,
5378 const loff_t start,
5379 const loff_t end,
5380 int exists_only,
5381 struct btrfs_log_ctx *ctx)
5383 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
5384 struct super_block *sb;
5385 struct dentry *old_parent = NULL;
5386 int ret = 0;
5387 u64 last_committed = root->fs_info->last_trans_committed;
5388 bool log_dentries = false;
5389 struct inode *orig_inode = inode;
5391 sb = inode->i_sb;
5393 if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
5394 ret = 1;
5395 goto end_no_trans;
5399 * The prev transaction commit doesn't complete, we need do
5400 * full commit by ourselves.
5402 if (root->fs_info->last_trans_log_full_commit >
5403 root->fs_info->last_trans_committed) {
5404 ret = 1;
5405 goto end_no_trans;
5408 if (root != BTRFS_I(inode)->root ||
5409 btrfs_root_refs(&root->root_item) == 0) {
5410 ret = 1;
5411 goto end_no_trans;
5414 ret = check_parent_dirs_for_sync(trans, inode, parent,
5415 sb, last_committed);
5416 if (ret)
5417 goto end_no_trans;
5419 if (btrfs_inode_in_log(inode, trans->transid)) {
5420 ret = BTRFS_NO_LOG_SYNC;
5421 goto end_no_trans;
5424 ret = start_log_trans(trans, root, ctx);
5425 if (ret)
5426 goto end_no_trans;
5428 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5429 if (ret)
5430 goto end_trans;
5433 * for regular files, if its inode is already on disk, we don't
5434 * have to worry about the parents at all. This is because
5435 * we can use the last_unlink_trans field to record renames
5436 * and other fun in this file.
5438 if (S_ISREG(inode->i_mode) &&
5439 BTRFS_I(inode)->generation <= last_committed &&
5440 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
5441 ret = 0;
5442 goto end_trans;
5445 if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
5446 log_dentries = true;
5449 * On unlink we must make sure all our current and old parent directory
5450 * inodes are fully logged. This is to prevent leaving dangling
5451 * directory index entries in directories that were our parents but are
5452 * not anymore. Not doing this results in old parent directory being
5453 * impossible to delete after log replay (rmdir will always fail with
5454 * error -ENOTEMPTY).
5456 * Example 1:
5458 * mkdir testdir
5459 * touch testdir/foo
5460 * ln testdir/foo testdir/bar
5461 * sync
5462 * unlink testdir/bar
5463 * xfs_io -c fsync testdir/foo
5464 * <power failure>
5465 * mount fs, triggers log replay
5467 * If we don't log the parent directory (testdir), after log replay the
5468 * directory still has an entry pointing to the file inode using the bar
5469 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5470 * the file inode has a link count of 1.
5472 * Example 2:
5474 * mkdir testdir
5475 * touch foo
5476 * ln foo testdir/foo2
5477 * ln foo testdir/foo3
5478 * sync
5479 * unlink testdir/foo3
5480 * xfs_io -c fsync foo
5481 * <power failure>
5482 * mount fs, triggers log replay
5484 * Similar as the first example, after log replay the parent directory
5485 * testdir still has an entry pointing to the inode file with name foo3
5486 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5487 * and has a link count of 2.
5489 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
5490 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5491 if (ret)
5492 goto end_trans;
5495 while (1) {
5496 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5497 break;
5499 inode = d_inode(parent);
5500 if (root != BTRFS_I(inode)->root)
5501 break;
5503 if (BTRFS_I(inode)->generation > last_committed) {
5504 ret = btrfs_log_inode(trans, root, inode,
5505 LOG_INODE_EXISTS,
5506 0, LLONG_MAX, ctx);
5507 if (ret)
5508 goto end_trans;
5510 if (IS_ROOT(parent))
5511 break;
5513 parent = dget_parent(parent);
5514 dput(old_parent);
5515 old_parent = parent;
5517 if (log_dentries)
5518 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5519 else
5520 ret = 0;
5521 end_trans:
5522 dput(old_parent);
5523 if (ret < 0) {
5524 btrfs_set_log_full_commit(root->fs_info, trans);
5525 ret = 1;
5528 if (ret)
5529 btrfs_remove_log_ctx(root, ctx);
5530 btrfs_end_log_trans(root);
5531 end_no_trans:
5532 return ret;
5536 * it is not safe to log dentry if the chunk root has added new
5537 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5538 * If this returns 1, you must commit the transaction to safely get your
5539 * data on disk.
5541 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5542 struct btrfs_root *root, struct dentry *dentry,
5543 const loff_t start,
5544 const loff_t end,
5545 struct btrfs_log_ctx *ctx)
5547 struct dentry *parent = dget_parent(dentry);
5548 int ret;
5550 ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent,
5551 start, end, 0, ctx);
5552 dput(parent);
5554 return ret;
5558 * should be called during mount to recover any replay any log trees
5559 * from the FS
5561 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5563 int ret;
5564 struct btrfs_path *path;
5565 struct btrfs_trans_handle *trans;
5566 struct btrfs_key key;
5567 struct btrfs_key found_key;
5568 struct btrfs_key tmp_key;
5569 struct btrfs_root *log;
5570 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5571 struct walk_control wc = {
5572 .process_func = process_one_buffer,
5573 .stage = 0,
5576 path = btrfs_alloc_path();
5577 if (!path)
5578 return -ENOMEM;
5580 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5582 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5583 if (IS_ERR(trans)) {
5584 ret = PTR_ERR(trans);
5585 goto error;
5588 wc.trans = trans;
5589 wc.pin = 1;
5591 ret = walk_log_tree(trans, log_root_tree, &wc);
5592 if (ret) {
5593 btrfs_handle_fs_error(fs_info, ret,
5594 "Failed to pin buffers while recovering log root tree.");
5595 goto error;
5598 again:
5599 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5600 key.offset = (u64)-1;
5601 key.type = BTRFS_ROOT_ITEM_KEY;
5603 while (1) {
5604 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5606 if (ret < 0) {
5607 btrfs_handle_fs_error(fs_info, ret,
5608 "Couldn't find tree log root.");
5609 goto error;
5611 if (ret > 0) {
5612 if (path->slots[0] == 0)
5613 break;
5614 path->slots[0]--;
5616 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5617 path->slots[0]);
5618 btrfs_release_path(path);
5619 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5620 break;
5622 log = btrfs_read_fs_root(log_root_tree, &found_key);
5623 if (IS_ERR(log)) {
5624 ret = PTR_ERR(log);
5625 btrfs_handle_fs_error(fs_info, ret,
5626 "Couldn't read tree log root.");
5627 goto error;
5630 tmp_key.objectid = found_key.offset;
5631 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5632 tmp_key.offset = (u64)-1;
5634 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5635 if (IS_ERR(wc.replay_dest)) {
5636 ret = PTR_ERR(wc.replay_dest);
5637 free_extent_buffer(log->node);
5638 free_extent_buffer(log->commit_root);
5639 kfree(log);
5640 btrfs_handle_fs_error(fs_info, ret,
5641 "Couldn't read target root for tree log recovery.");
5642 goto error;
5645 wc.replay_dest->log_root = log;
5646 btrfs_record_root_in_trans(trans, wc.replay_dest);
5647 ret = walk_log_tree(trans, log, &wc);
5649 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5650 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5651 path);
5654 key.offset = found_key.offset - 1;
5655 wc.replay_dest->log_root = NULL;
5656 free_extent_buffer(log->node);
5657 free_extent_buffer(log->commit_root);
5658 kfree(log);
5660 if (ret)
5661 goto error;
5663 if (found_key.offset == 0)
5664 break;
5666 btrfs_release_path(path);
5668 /* step one is to pin it all, step two is to replay just inodes */
5669 if (wc.pin) {
5670 wc.pin = 0;
5671 wc.process_func = replay_one_buffer;
5672 wc.stage = LOG_WALK_REPLAY_INODES;
5673 goto again;
5675 /* step three is to replay everything */
5676 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5677 wc.stage++;
5678 goto again;
5681 btrfs_free_path(path);
5683 /* step 4: commit the transaction, which also unpins the blocks */
5684 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
5685 if (ret)
5686 return ret;
5688 free_extent_buffer(log_root_tree->node);
5689 log_root_tree->log_root = NULL;
5690 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5691 kfree(log_root_tree);
5693 return 0;
5694 error:
5695 if (wc.trans)
5696 btrfs_end_transaction(wc.trans, fs_info->tree_root);
5697 btrfs_free_path(path);
5698 return ret;
5702 * there are some corner cases where we want to force a full
5703 * commit instead of allowing a directory to be logged.
5705 * They revolve around files there were unlinked from the directory, and
5706 * this function updates the parent directory so that a full commit is
5707 * properly done if it is fsync'd later after the unlinks are done.
5709 * Must be called before the unlink operations (updates to the subvolume tree,
5710 * inodes, etc) are done.
5712 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5713 struct inode *dir, struct inode *inode,
5714 int for_rename)
5717 * when we're logging a file, if it hasn't been renamed
5718 * or unlinked, and its inode is fully committed on disk,
5719 * we don't have to worry about walking up the directory chain
5720 * to log its parents.
5722 * So, we use the last_unlink_trans field to put this transid
5723 * into the file. When the file is logged we check it and
5724 * don't log the parents if the file is fully on disk.
5726 mutex_lock(&BTRFS_I(inode)->log_mutex);
5727 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5728 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5731 * if this directory was already logged any new
5732 * names for this file/dir will get recorded
5734 smp_mb();
5735 if (BTRFS_I(dir)->logged_trans == trans->transid)
5736 return;
5739 * if the inode we're about to unlink was logged,
5740 * the log will be properly updated for any new names
5742 if (BTRFS_I(inode)->logged_trans == trans->transid)
5743 return;
5746 * when renaming files across directories, if the directory
5747 * there we're unlinking from gets fsync'd later on, there's
5748 * no way to find the destination directory later and fsync it
5749 * properly. So, we have to be conservative and force commits
5750 * so the new name gets discovered.
5752 if (for_rename)
5753 goto record;
5755 /* we can safely do the unlink without any special recording */
5756 return;
5758 record:
5759 mutex_lock(&BTRFS_I(dir)->log_mutex);
5760 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5761 mutex_unlock(&BTRFS_I(dir)->log_mutex);
5765 * Make sure that if someone attempts to fsync the parent directory of a deleted
5766 * snapshot, it ends up triggering a transaction commit. This is to guarantee
5767 * that after replaying the log tree of the parent directory's root we will not
5768 * see the snapshot anymore and at log replay time we will not see any log tree
5769 * corresponding to the deleted snapshot's root, which could lead to replaying
5770 * it after replaying the log tree of the parent directory (which would replay
5771 * the snapshot delete operation).
5773 * Must be called before the actual snapshot destroy operation (updates to the
5774 * parent root and tree of tree roots trees, etc) are done.
5776 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
5777 struct inode *dir)
5779 mutex_lock(&BTRFS_I(dir)->log_mutex);
5780 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5781 mutex_unlock(&BTRFS_I(dir)->log_mutex);
5785 * Call this after adding a new name for a file and it will properly
5786 * update the log to reflect the new name.
5788 * It will return zero if all goes well, and it will return 1 if a
5789 * full transaction commit is required.
5791 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5792 struct inode *inode, struct inode *old_dir,
5793 struct dentry *parent)
5795 struct btrfs_root * root = BTRFS_I(inode)->root;
5798 * this will force the logging code to walk the dentry chain
5799 * up for the file
5801 if (S_ISREG(inode->i_mode))
5802 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5805 * if this inode hasn't been logged and directory we're renaming it
5806 * from hasn't been logged, we don't need to log it
5808 if (BTRFS_I(inode)->logged_trans <=
5809 root->fs_info->last_trans_committed &&
5810 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
5811 root->fs_info->last_trans_committed))
5812 return 0;
5814 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
5815 LLONG_MAX, 1, NULL);