crypto: s5p-sss - Fix kernel Oops in AES-ECB mode
[linux/fpc-iii.git] / fs / btrfs / tree-log.c
blobb5e1afb30f364ef4984e022431fae711412a5d0f
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
23 #include "tree-log.h"
24 #include "disk-io.h"
25 #include "locking.h"
26 #include "print-tree.h"
27 #include "backref.h"
28 #include "hash.h"
29 #include "compression.h"
30 #include "qgroup.h"
31 #include "inode-map.h"
33 /* magic values for the inode_only field in btrfs_log_inode:
35 * LOG_INODE_ALL means to log everything
36 * LOG_INODE_EXISTS means to log just enough to recreate the inode
37 * during log replay
39 #define LOG_INODE_ALL 0
40 #define LOG_INODE_EXISTS 1
41 #define LOG_OTHER_INODE 2
44 * directory trouble cases
46 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
47 * log, we must force a full commit before doing an fsync of the directory
48 * where the unlink was done.
49 * ---> record transid of last unlink/rename per directory
51 * mkdir foo/some_dir
52 * normal commit
53 * rename foo/some_dir foo2/some_dir
54 * mkdir foo/some_dir
55 * fsync foo/some_dir/some_file
57 * The fsync above will unlink the original some_dir without recording
58 * it in its new location (foo2). After a crash, some_dir will be gone
59 * unless the fsync of some_file forces a full commit
61 * 2) we must log any new names for any file or dir that is in the fsync
62 * log. ---> check inode while renaming/linking.
64 * 2a) we must log any new names for any file or dir during rename
65 * when the directory they are being removed from was logged.
66 * ---> check inode and old parent dir during rename
68 * 2a is actually the more important variant. With the extra logging
69 * a crash might unlink the old name without recreating the new one
71 * 3) after a crash, we must go through any directories with a link count
72 * of zero and redo the rm -rf
74 * mkdir f1/foo
75 * normal commit
76 * rm -rf f1/foo
77 * fsync(f1)
79 * The directory f1 was fully removed from the FS, but fsync was never
80 * called on f1, only its parent dir. After a crash the rm -rf must
81 * be replayed. This must be able to recurse down the entire
82 * directory tree. The inode link count fixup code takes care of the
83 * ugly details.
87 * stages for the tree walking. The first
88 * stage (0) is to only pin down the blocks we find
89 * the second stage (1) is to make sure that all the inodes
90 * we find in the log are created in the subvolume.
92 * The last stage is to deal with directories and links and extents
93 * and all the other fun semantics
95 #define LOG_WALK_PIN_ONLY 0
96 #define LOG_WALK_REPLAY_INODES 1
97 #define LOG_WALK_REPLAY_DIR_INDEX 2
98 #define LOG_WALK_REPLAY_ALL 3
100 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root, struct btrfs_inode *inode,
102 int inode_only,
103 const loff_t start,
104 const loff_t end,
105 struct btrfs_log_ctx *ctx);
106 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root,
108 struct btrfs_path *path, u64 objectid);
109 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
110 struct btrfs_root *root,
111 struct btrfs_root *log,
112 struct btrfs_path *path,
113 u64 dirid, int del_all);
116 * tree logging is a special write ahead log used to make sure that
117 * fsyncs and O_SYNCs can happen without doing full tree commits.
119 * Full tree commits are expensive because they require commonly
120 * modified blocks to be recowed, creating many dirty pages in the
121 * extent tree an 4x-6x higher write load than ext3.
123 * Instead of doing a tree commit on every fsync, we use the
124 * key ranges and transaction ids to find items for a given file or directory
125 * that have changed in this transaction. Those items are copied into
126 * a special tree (one per subvolume root), that tree is written to disk
127 * and then the fsync is considered complete.
129 * After a crash, items are copied out of the log-tree back into the
130 * subvolume tree. Any file data extents found are recorded in the extent
131 * allocation tree, and the log-tree freed.
133 * The log tree is read three times, once to pin down all the extents it is
134 * using in ram and once, once to create all the inodes logged in the tree
135 * and once to do all the other items.
139 * start a sub transaction and setup the log tree
140 * this increments the log tree writer count to make the people
141 * syncing the tree wait for us to finish
143 static int start_log_trans(struct btrfs_trans_handle *trans,
144 struct btrfs_root *root,
145 struct btrfs_log_ctx *ctx)
147 struct btrfs_fs_info *fs_info = root->fs_info;
148 int ret = 0;
150 mutex_lock(&root->log_mutex);
152 if (root->log_root) {
153 if (btrfs_need_log_full_commit(fs_info, trans)) {
154 ret = -EAGAIN;
155 goto out;
158 if (!root->log_start_pid) {
159 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
160 root->log_start_pid = current->pid;
161 } else if (root->log_start_pid != current->pid) {
162 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
164 } else {
165 mutex_lock(&fs_info->tree_log_mutex);
166 if (!fs_info->log_root_tree)
167 ret = btrfs_init_log_root_tree(trans, fs_info);
168 mutex_unlock(&fs_info->tree_log_mutex);
169 if (ret)
170 goto out;
172 ret = btrfs_add_log_tree(trans, root);
173 if (ret)
174 goto out;
176 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
177 root->log_start_pid = current->pid;
180 atomic_inc(&root->log_batch);
181 atomic_inc(&root->log_writers);
182 if (ctx) {
183 int index = root->log_transid % 2;
184 list_add_tail(&ctx->list, &root->log_ctxs[index]);
185 ctx->log_transid = root->log_transid;
188 out:
189 mutex_unlock(&root->log_mutex);
190 return ret;
194 * returns 0 if there was a log transaction running and we were able
195 * to join, or returns -ENOENT if there were not transactions
196 * in progress
198 static int join_running_log_trans(struct btrfs_root *root)
200 int ret = -ENOENT;
202 smp_mb();
203 if (!root->log_root)
204 return -ENOENT;
206 mutex_lock(&root->log_mutex);
207 if (root->log_root) {
208 ret = 0;
209 atomic_inc(&root->log_writers);
211 mutex_unlock(&root->log_mutex);
212 return ret;
216 * This either makes the current running log transaction wait
217 * until you call btrfs_end_log_trans() or it makes any future
218 * log transactions wait until you call btrfs_end_log_trans()
220 int btrfs_pin_log_trans(struct btrfs_root *root)
222 int ret = -ENOENT;
224 mutex_lock(&root->log_mutex);
225 atomic_inc(&root->log_writers);
226 mutex_unlock(&root->log_mutex);
227 return ret;
231 * indicate we're done making changes to the log tree
232 * and wake up anyone waiting to do a sync
234 void btrfs_end_log_trans(struct btrfs_root *root)
236 if (atomic_dec_and_test(&root->log_writers)) {
238 * Implicit memory barrier after atomic_dec_and_test
240 if (waitqueue_active(&root->log_writer_wait))
241 wake_up(&root->log_writer_wait);
247 * the walk control struct is used to pass state down the chain when
248 * processing the log tree. The stage field tells us which part
249 * of the log tree processing we are currently doing. The others
250 * are state fields used for that specific part
252 struct walk_control {
253 /* should we free the extent on disk when done? This is used
254 * at transaction commit time while freeing a log tree
256 int free;
258 /* should we write out the extent buffer? This is used
259 * while flushing the log tree to disk during a sync
261 int write;
263 /* should we wait for the extent buffer io to finish? Also used
264 * while flushing the log tree to disk for a sync
266 int wait;
268 /* pin only walk, we record which extents on disk belong to the
269 * log trees
271 int pin;
273 /* what stage of the replay code we're currently in */
274 int stage;
276 /* the root we are currently replaying */
277 struct btrfs_root *replay_dest;
279 /* the trans handle for the current replay */
280 struct btrfs_trans_handle *trans;
282 /* the function that gets used to process blocks we find in the
283 * tree. Note the extent_buffer might not be up to date when it is
284 * passed in, and it must be checked or read if you need the data
285 * inside it
287 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
288 struct walk_control *wc, u64 gen);
292 * process_func used to pin down extents, write them or wait on them
294 static int process_one_buffer(struct btrfs_root *log,
295 struct extent_buffer *eb,
296 struct walk_control *wc, u64 gen)
298 struct btrfs_fs_info *fs_info = log->fs_info;
299 int ret = 0;
302 * If this fs is mixed then we need to be able to process the leaves to
303 * pin down any logged extents, so we have to read the block.
305 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
306 ret = btrfs_read_buffer(eb, gen);
307 if (ret)
308 return ret;
311 if (wc->pin)
312 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
313 eb->len);
315 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
316 if (wc->pin && btrfs_header_level(eb) == 0)
317 ret = btrfs_exclude_logged_extents(fs_info, eb);
318 if (wc->write)
319 btrfs_write_tree_block(eb);
320 if (wc->wait)
321 btrfs_wait_tree_block_writeback(eb);
323 return ret;
327 * Item overwrite used by replay and tree logging. eb, slot and key all refer
328 * to the src data we are copying out.
330 * root is the tree we are copying into, and path is a scratch
331 * path for use in this function (it should be released on entry and
332 * will be released on exit).
334 * If the key is already in the destination tree the existing item is
335 * overwritten. If the existing item isn't big enough, it is extended.
336 * If it is too large, it is truncated.
338 * If the key isn't in the destination yet, a new item is inserted.
340 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
341 struct btrfs_root *root,
342 struct btrfs_path *path,
343 struct extent_buffer *eb, int slot,
344 struct btrfs_key *key)
346 struct btrfs_fs_info *fs_info = root->fs_info;
347 int ret;
348 u32 item_size;
349 u64 saved_i_size = 0;
350 int save_old_i_size = 0;
351 unsigned long src_ptr;
352 unsigned long dst_ptr;
353 int overwrite_root = 0;
354 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
356 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
357 overwrite_root = 1;
359 item_size = btrfs_item_size_nr(eb, slot);
360 src_ptr = btrfs_item_ptr_offset(eb, slot);
362 /* look for the key in the destination tree */
363 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
364 if (ret < 0)
365 return ret;
367 if (ret == 0) {
368 char *src_copy;
369 char *dst_copy;
370 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
371 path->slots[0]);
372 if (dst_size != item_size)
373 goto insert;
375 if (item_size == 0) {
376 btrfs_release_path(path);
377 return 0;
379 dst_copy = kmalloc(item_size, GFP_NOFS);
380 src_copy = kmalloc(item_size, GFP_NOFS);
381 if (!dst_copy || !src_copy) {
382 btrfs_release_path(path);
383 kfree(dst_copy);
384 kfree(src_copy);
385 return -ENOMEM;
388 read_extent_buffer(eb, src_copy, src_ptr, item_size);
390 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
391 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
392 item_size);
393 ret = memcmp(dst_copy, src_copy, item_size);
395 kfree(dst_copy);
396 kfree(src_copy);
398 * they have the same contents, just return, this saves
399 * us from cowing blocks in the destination tree and doing
400 * extra writes that may not have been done by a previous
401 * sync
403 if (ret == 0) {
404 btrfs_release_path(path);
405 return 0;
409 * We need to load the old nbytes into the inode so when we
410 * replay the extents we've logged we get the right nbytes.
412 if (inode_item) {
413 struct btrfs_inode_item *item;
414 u64 nbytes;
415 u32 mode;
417 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
418 struct btrfs_inode_item);
419 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
420 item = btrfs_item_ptr(eb, slot,
421 struct btrfs_inode_item);
422 btrfs_set_inode_nbytes(eb, item, nbytes);
425 * If this is a directory we need to reset the i_size to
426 * 0 so that we can set it up properly when replaying
427 * the rest of the items in this log.
429 mode = btrfs_inode_mode(eb, item);
430 if (S_ISDIR(mode))
431 btrfs_set_inode_size(eb, item, 0);
433 } else if (inode_item) {
434 struct btrfs_inode_item *item;
435 u32 mode;
438 * New inode, set nbytes to 0 so that the nbytes comes out
439 * properly when we replay the extents.
441 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
442 btrfs_set_inode_nbytes(eb, item, 0);
445 * If this is a directory we need to reset the i_size to 0 so
446 * that we can set it up properly when replaying the rest of
447 * the items in this log.
449 mode = btrfs_inode_mode(eb, item);
450 if (S_ISDIR(mode))
451 btrfs_set_inode_size(eb, item, 0);
453 insert:
454 btrfs_release_path(path);
455 /* try to insert the key into the destination tree */
456 path->skip_release_on_error = 1;
457 ret = btrfs_insert_empty_item(trans, root, path,
458 key, item_size);
459 path->skip_release_on_error = 0;
461 /* make sure any existing item is the correct size */
462 if (ret == -EEXIST || ret == -EOVERFLOW) {
463 u32 found_size;
464 found_size = btrfs_item_size_nr(path->nodes[0],
465 path->slots[0]);
466 if (found_size > item_size)
467 btrfs_truncate_item(fs_info, path, item_size, 1);
468 else if (found_size < item_size)
469 btrfs_extend_item(fs_info, path,
470 item_size - found_size);
471 } else if (ret) {
472 return ret;
474 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
475 path->slots[0]);
477 /* don't overwrite an existing inode if the generation number
478 * was logged as zero. This is done when the tree logging code
479 * is just logging an inode to make sure it exists after recovery.
481 * Also, don't overwrite i_size on directories during replay.
482 * log replay inserts and removes directory items based on the
483 * state of the tree found in the subvolume, and i_size is modified
484 * as it goes
486 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
487 struct btrfs_inode_item *src_item;
488 struct btrfs_inode_item *dst_item;
490 src_item = (struct btrfs_inode_item *)src_ptr;
491 dst_item = (struct btrfs_inode_item *)dst_ptr;
493 if (btrfs_inode_generation(eb, src_item) == 0) {
494 struct extent_buffer *dst_eb = path->nodes[0];
495 const u64 ino_size = btrfs_inode_size(eb, src_item);
498 * For regular files an ino_size == 0 is used only when
499 * logging that an inode exists, as part of a directory
500 * fsync, and the inode wasn't fsynced before. In this
501 * case don't set the size of the inode in the fs/subvol
502 * tree, otherwise we would be throwing valid data away.
504 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
505 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
506 ino_size != 0) {
507 struct btrfs_map_token token;
509 btrfs_init_map_token(&token);
510 btrfs_set_token_inode_size(dst_eb, dst_item,
511 ino_size, &token);
513 goto no_copy;
516 if (overwrite_root &&
517 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
518 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
519 save_old_i_size = 1;
520 saved_i_size = btrfs_inode_size(path->nodes[0],
521 dst_item);
525 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
526 src_ptr, item_size);
528 if (save_old_i_size) {
529 struct btrfs_inode_item *dst_item;
530 dst_item = (struct btrfs_inode_item *)dst_ptr;
531 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
534 /* make sure the generation is filled in */
535 if (key->type == BTRFS_INODE_ITEM_KEY) {
536 struct btrfs_inode_item *dst_item;
537 dst_item = (struct btrfs_inode_item *)dst_ptr;
538 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
539 btrfs_set_inode_generation(path->nodes[0], dst_item,
540 trans->transid);
543 no_copy:
544 btrfs_mark_buffer_dirty(path->nodes[0]);
545 btrfs_release_path(path);
546 return 0;
550 * simple helper to read an inode off the disk from a given root
551 * This can only be called for subvolume roots and not for the log
553 static noinline struct inode *read_one_inode(struct btrfs_root *root,
554 u64 objectid)
556 struct btrfs_key key;
557 struct inode *inode;
559 key.objectid = objectid;
560 key.type = BTRFS_INODE_ITEM_KEY;
561 key.offset = 0;
562 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
563 if (IS_ERR(inode)) {
564 inode = NULL;
565 } else if (is_bad_inode(inode)) {
566 iput(inode);
567 inode = NULL;
569 return inode;
572 /* replays a single extent in 'eb' at 'slot' with 'key' into the
573 * subvolume 'root'. path is released on entry and should be released
574 * on exit.
576 * extents in the log tree have not been allocated out of the extent
577 * tree yet. So, this completes the allocation, taking a reference
578 * as required if the extent already exists or creating a new extent
579 * if it isn't in the extent allocation tree yet.
581 * The extent is inserted into the file, dropping any existing extents
582 * from the file that overlap the new one.
584 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
585 struct btrfs_root *root,
586 struct btrfs_path *path,
587 struct extent_buffer *eb, int slot,
588 struct btrfs_key *key)
590 struct btrfs_fs_info *fs_info = root->fs_info;
591 int found_type;
592 u64 extent_end;
593 u64 start = key->offset;
594 u64 nbytes = 0;
595 struct btrfs_file_extent_item *item;
596 struct inode *inode = NULL;
597 unsigned long size;
598 int ret = 0;
600 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
601 found_type = btrfs_file_extent_type(eb, item);
603 if (found_type == BTRFS_FILE_EXTENT_REG ||
604 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
605 nbytes = btrfs_file_extent_num_bytes(eb, item);
606 extent_end = start + nbytes;
609 * We don't add to the inodes nbytes if we are prealloc or a
610 * hole.
612 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
613 nbytes = 0;
614 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
615 size = btrfs_file_extent_inline_len(eb, slot, item);
616 nbytes = btrfs_file_extent_ram_bytes(eb, item);
617 extent_end = ALIGN(start + size,
618 fs_info->sectorsize);
619 } else {
620 ret = 0;
621 goto out;
624 inode = read_one_inode(root, key->objectid);
625 if (!inode) {
626 ret = -EIO;
627 goto out;
631 * first check to see if we already have this extent in the
632 * file. This must be done before the btrfs_drop_extents run
633 * so we don't try to drop this extent.
635 ret = btrfs_lookup_file_extent(trans, root, path,
636 btrfs_ino(BTRFS_I(inode)), start, 0);
638 if (ret == 0 &&
639 (found_type == BTRFS_FILE_EXTENT_REG ||
640 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
641 struct btrfs_file_extent_item cmp1;
642 struct btrfs_file_extent_item cmp2;
643 struct btrfs_file_extent_item *existing;
644 struct extent_buffer *leaf;
646 leaf = path->nodes[0];
647 existing = btrfs_item_ptr(leaf, path->slots[0],
648 struct btrfs_file_extent_item);
650 read_extent_buffer(eb, &cmp1, (unsigned long)item,
651 sizeof(cmp1));
652 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
653 sizeof(cmp2));
656 * we already have a pointer to this exact extent,
657 * we don't have to do anything
659 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
660 btrfs_release_path(path);
661 goto out;
664 btrfs_release_path(path);
666 /* drop any overlapping extents */
667 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
668 if (ret)
669 goto out;
671 if (found_type == BTRFS_FILE_EXTENT_REG ||
672 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
673 u64 offset;
674 unsigned long dest_offset;
675 struct btrfs_key ins;
677 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
678 btrfs_fs_incompat(fs_info, NO_HOLES))
679 goto update_inode;
681 ret = btrfs_insert_empty_item(trans, root, path, key,
682 sizeof(*item));
683 if (ret)
684 goto out;
685 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
686 path->slots[0]);
687 copy_extent_buffer(path->nodes[0], eb, dest_offset,
688 (unsigned long)item, sizeof(*item));
690 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
691 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
692 ins.type = BTRFS_EXTENT_ITEM_KEY;
693 offset = key->offset - btrfs_file_extent_offset(eb, item);
696 * Manually record dirty extent, as here we did a shallow
697 * file extent item copy and skip normal backref update,
698 * but modifying extent tree all by ourselves.
699 * So need to manually record dirty extent for qgroup,
700 * as the owner of the file extent changed from log tree
701 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
703 ret = btrfs_qgroup_trace_extent(trans, fs_info,
704 btrfs_file_extent_disk_bytenr(eb, item),
705 btrfs_file_extent_disk_num_bytes(eb, item),
706 GFP_NOFS);
707 if (ret < 0)
708 goto out;
710 if (ins.objectid > 0) {
711 u64 csum_start;
712 u64 csum_end;
713 LIST_HEAD(ordered_sums);
715 * is this extent already allocated in the extent
716 * allocation tree? If so, just add a reference
718 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
719 ins.offset);
720 if (ret == 0) {
721 ret = btrfs_inc_extent_ref(trans, root,
722 ins.objectid, ins.offset,
723 0, root->root_key.objectid,
724 key->objectid, offset);
725 if (ret)
726 goto out;
727 } else {
729 * insert the extent pointer in the extent
730 * allocation tree
732 ret = btrfs_alloc_logged_file_extent(trans,
733 fs_info,
734 root->root_key.objectid,
735 key->objectid, offset, &ins);
736 if (ret)
737 goto out;
739 btrfs_release_path(path);
741 if (btrfs_file_extent_compression(eb, item)) {
742 csum_start = ins.objectid;
743 csum_end = csum_start + ins.offset;
744 } else {
745 csum_start = ins.objectid +
746 btrfs_file_extent_offset(eb, item);
747 csum_end = csum_start +
748 btrfs_file_extent_num_bytes(eb, item);
751 ret = btrfs_lookup_csums_range(root->log_root,
752 csum_start, csum_end - 1,
753 &ordered_sums, 0);
754 if (ret)
755 goto out;
757 * Now delete all existing cums in the csum root that
758 * cover our range. We do this because we can have an
759 * extent that is completely referenced by one file
760 * extent item and partially referenced by another
761 * file extent item (like after using the clone or
762 * extent_same ioctls). In this case if we end up doing
763 * the replay of the one that partially references the
764 * extent first, and we do not do the csum deletion
765 * below, we can get 2 csum items in the csum tree that
766 * overlap each other. For example, imagine our log has
767 * the two following file extent items:
769 * key (257 EXTENT_DATA 409600)
770 * extent data disk byte 12845056 nr 102400
771 * extent data offset 20480 nr 20480 ram 102400
773 * key (257 EXTENT_DATA 819200)
774 * extent data disk byte 12845056 nr 102400
775 * extent data offset 0 nr 102400 ram 102400
777 * Where the second one fully references the 100K extent
778 * that starts at disk byte 12845056, and the log tree
779 * has a single csum item that covers the entire range
780 * of the extent:
782 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
784 * After the first file extent item is replayed, the
785 * csum tree gets the following csum item:
787 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
789 * Which covers the 20K sub-range starting at offset 20K
790 * of our extent. Now when we replay the second file
791 * extent item, if we do not delete existing csum items
792 * that cover any of its blocks, we end up getting two
793 * csum items in our csum tree that overlap each other:
795 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
796 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
798 * Which is a problem, because after this anyone trying
799 * to lookup up for the checksum of any block of our
800 * extent starting at an offset of 40K or higher, will
801 * end up looking at the second csum item only, which
802 * does not contain the checksum for any block starting
803 * at offset 40K or higher of our extent.
805 while (!list_empty(&ordered_sums)) {
806 struct btrfs_ordered_sum *sums;
807 sums = list_entry(ordered_sums.next,
808 struct btrfs_ordered_sum,
809 list);
810 if (!ret)
811 ret = btrfs_del_csums(trans, fs_info,
812 sums->bytenr,
813 sums->len);
814 if (!ret)
815 ret = btrfs_csum_file_blocks(trans,
816 fs_info->csum_root, sums);
817 list_del(&sums->list);
818 kfree(sums);
820 if (ret)
821 goto out;
822 } else {
823 btrfs_release_path(path);
825 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
826 /* inline extents are easy, we just overwrite them */
827 ret = overwrite_item(trans, root, path, eb, slot, key);
828 if (ret)
829 goto out;
832 inode_add_bytes(inode, nbytes);
833 update_inode:
834 ret = btrfs_update_inode(trans, root, inode);
835 out:
836 if (inode)
837 iput(inode);
838 return ret;
842 * when cleaning up conflicts between the directory names in the
843 * subvolume, directory names in the log and directory names in the
844 * inode back references, we may have to unlink inodes from directories.
846 * This is a helper function to do the unlink of a specific directory
847 * item
849 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
850 struct btrfs_root *root,
851 struct btrfs_path *path,
852 struct btrfs_inode *dir,
853 struct btrfs_dir_item *di)
855 struct btrfs_fs_info *fs_info = root->fs_info;
856 struct inode *inode;
857 char *name;
858 int name_len;
859 struct extent_buffer *leaf;
860 struct btrfs_key location;
861 int ret;
863 leaf = path->nodes[0];
865 btrfs_dir_item_key_to_cpu(leaf, di, &location);
866 name_len = btrfs_dir_name_len(leaf, di);
867 name = kmalloc(name_len, GFP_NOFS);
868 if (!name)
869 return -ENOMEM;
871 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
872 btrfs_release_path(path);
874 inode = read_one_inode(root, location.objectid);
875 if (!inode) {
876 ret = -EIO;
877 goto out;
880 ret = link_to_fixup_dir(trans, root, path, location.objectid);
881 if (ret)
882 goto out;
884 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
885 name_len);
886 if (ret)
887 goto out;
888 else
889 ret = btrfs_run_delayed_items(trans, fs_info);
890 out:
891 kfree(name);
892 iput(inode);
893 return ret;
897 * helper function to see if a given name and sequence number found
898 * in an inode back reference are already in a directory and correctly
899 * point to this inode
901 static noinline int inode_in_dir(struct btrfs_root *root,
902 struct btrfs_path *path,
903 u64 dirid, u64 objectid, u64 index,
904 const char *name, int name_len)
906 struct btrfs_dir_item *di;
907 struct btrfs_key location;
908 int match = 0;
910 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
911 index, name, name_len, 0);
912 if (di && !IS_ERR(di)) {
913 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
914 if (location.objectid != objectid)
915 goto out;
916 } else
917 goto out;
918 btrfs_release_path(path);
920 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
921 if (di && !IS_ERR(di)) {
922 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
923 if (location.objectid != objectid)
924 goto out;
925 } else
926 goto out;
927 match = 1;
928 out:
929 btrfs_release_path(path);
930 return match;
934 * helper function to check a log tree for a named back reference in
935 * an inode. This is used to decide if a back reference that is
936 * found in the subvolume conflicts with what we find in the log.
938 * inode backreferences may have multiple refs in a single item,
939 * during replay we process one reference at a time, and we don't
940 * want to delete valid links to a file from the subvolume if that
941 * link is also in the log.
943 static noinline int backref_in_log(struct btrfs_root *log,
944 struct btrfs_key *key,
945 u64 ref_objectid,
946 const char *name, int namelen)
948 struct btrfs_path *path;
949 struct btrfs_inode_ref *ref;
950 unsigned long ptr;
951 unsigned long ptr_end;
952 unsigned long name_ptr;
953 int found_name_len;
954 int item_size;
955 int ret;
956 int match = 0;
958 path = btrfs_alloc_path();
959 if (!path)
960 return -ENOMEM;
962 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
963 if (ret != 0)
964 goto out;
966 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
968 if (key->type == BTRFS_INODE_EXTREF_KEY) {
969 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
970 name, namelen, NULL))
971 match = 1;
973 goto out;
976 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
977 ptr_end = ptr + item_size;
978 while (ptr < ptr_end) {
979 ref = (struct btrfs_inode_ref *)ptr;
980 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
981 if (found_name_len == namelen) {
982 name_ptr = (unsigned long)(ref + 1);
983 ret = memcmp_extent_buffer(path->nodes[0], name,
984 name_ptr, namelen);
985 if (ret == 0) {
986 match = 1;
987 goto out;
990 ptr = (unsigned long)(ref + 1) + found_name_len;
992 out:
993 btrfs_free_path(path);
994 return match;
997 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
998 struct btrfs_root *root,
999 struct btrfs_path *path,
1000 struct btrfs_root *log_root,
1001 struct btrfs_inode *dir,
1002 struct btrfs_inode *inode,
1003 u64 inode_objectid, u64 parent_objectid,
1004 u64 ref_index, char *name, int namelen,
1005 int *search_done)
1007 struct btrfs_fs_info *fs_info = root->fs_info;
1008 int ret;
1009 char *victim_name;
1010 int victim_name_len;
1011 struct extent_buffer *leaf;
1012 struct btrfs_dir_item *di;
1013 struct btrfs_key search_key;
1014 struct btrfs_inode_extref *extref;
1016 again:
1017 /* Search old style refs */
1018 search_key.objectid = inode_objectid;
1019 search_key.type = BTRFS_INODE_REF_KEY;
1020 search_key.offset = parent_objectid;
1021 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1022 if (ret == 0) {
1023 struct btrfs_inode_ref *victim_ref;
1024 unsigned long ptr;
1025 unsigned long ptr_end;
1027 leaf = path->nodes[0];
1029 /* are we trying to overwrite a back ref for the root directory
1030 * if so, just jump out, we're done
1032 if (search_key.objectid == search_key.offset)
1033 return 1;
1035 /* check all the names in this back reference to see
1036 * if they are in the log. if so, we allow them to stay
1037 * otherwise they must be unlinked as a conflict
1039 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1040 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1041 while (ptr < ptr_end) {
1042 victim_ref = (struct btrfs_inode_ref *)ptr;
1043 victim_name_len = btrfs_inode_ref_name_len(leaf,
1044 victim_ref);
1045 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1046 if (!victim_name)
1047 return -ENOMEM;
1049 read_extent_buffer(leaf, victim_name,
1050 (unsigned long)(victim_ref + 1),
1051 victim_name_len);
1053 if (!backref_in_log(log_root, &search_key,
1054 parent_objectid,
1055 victim_name,
1056 victim_name_len)) {
1057 inc_nlink(&inode->vfs_inode);
1058 btrfs_release_path(path);
1060 ret = btrfs_unlink_inode(trans, root, dir, inode,
1061 victim_name, victim_name_len);
1062 kfree(victim_name);
1063 if (ret)
1064 return ret;
1065 ret = btrfs_run_delayed_items(trans, fs_info);
1066 if (ret)
1067 return ret;
1068 *search_done = 1;
1069 goto again;
1071 kfree(victim_name);
1073 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1077 * NOTE: we have searched root tree and checked the
1078 * corresponding ref, it does not need to check again.
1080 *search_done = 1;
1082 btrfs_release_path(path);
1084 /* Same search but for extended refs */
1085 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1086 inode_objectid, parent_objectid, 0,
1088 if (!IS_ERR_OR_NULL(extref)) {
1089 u32 item_size;
1090 u32 cur_offset = 0;
1091 unsigned long base;
1092 struct inode *victim_parent;
1094 leaf = path->nodes[0];
1096 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1097 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1099 while (cur_offset < item_size) {
1100 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1102 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1104 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1105 goto next;
1107 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1108 if (!victim_name)
1109 return -ENOMEM;
1110 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1111 victim_name_len);
1113 search_key.objectid = inode_objectid;
1114 search_key.type = BTRFS_INODE_EXTREF_KEY;
1115 search_key.offset = btrfs_extref_hash(parent_objectid,
1116 victim_name,
1117 victim_name_len);
1118 ret = 0;
1119 if (!backref_in_log(log_root, &search_key,
1120 parent_objectid, victim_name,
1121 victim_name_len)) {
1122 ret = -ENOENT;
1123 victim_parent = read_one_inode(root,
1124 parent_objectid);
1125 if (victim_parent) {
1126 inc_nlink(&inode->vfs_inode);
1127 btrfs_release_path(path);
1129 ret = btrfs_unlink_inode(trans, root,
1130 BTRFS_I(victim_parent),
1131 inode,
1132 victim_name,
1133 victim_name_len);
1134 if (!ret)
1135 ret = btrfs_run_delayed_items(
1136 trans,
1137 fs_info);
1139 iput(victim_parent);
1140 kfree(victim_name);
1141 if (ret)
1142 return ret;
1143 *search_done = 1;
1144 goto again;
1146 kfree(victim_name);
1147 next:
1148 cur_offset += victim_name_len + sizeof(*extref);
1150 *search_done = 1;
1152 btrfs_release_path(path);
1154 /* look for a conflicting sequence number */
1155 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1156 ref_index, name, namelen, 0);
1157 if (di && !IS_ERR(di)) {
1158 ret = drop_one_dir_item(trans, root, path, dir, di);
1159 if (ret)
1160 return ret;
1162 btrfs_release_path(path);
1164 /* look for a conflicing name */
1165 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1166 name, namelen, 0);
1167 if (di && !IS_ERR(di)) {
1168 ret = drop_one_dir_item(trans, root, path, dir, di);
1169 if (ret)
1170 return ret;
1172 btrfs_release_path(path);
1174 return 0;
1177 static int extref_get_fields(struct extent_buffer *eb, int slot,
1178 unsigned long ref_ptr, u32 *namelen, char **name,
1179 u64 *index, u64 *parent_objectid)
1181 struct btrfs_inode_extref *extref;
1183 extref = (struct btrfs_inode_extref *)ref_ptr;
1185 *namelen = btrfs_inode_extref_name_len(eb, extref);
1186 if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)&extref->name,
1187 *namelen))
1188 return -EIO;
1190 *name = kmalloc(*namelen, GFP_NOFS);
1191 if (*name == NULL)
1192 return -ENOMEM;
1194 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1195 *namelen);
1197 *index = btrfs_inode_extref_index(eb, extref);
1198 if (parent_objectid)
1199 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1201 return 0;
1204 static int ref_get_fields(struct extent_buffer *eb, int slot,
1205 unsigned long ref_ptr, u32 *namelen, char **name,
1206 u64 *index)
1208 struct btrfs_inode_ref *ref;
1210 ref = (struct btrfs_inode_ref *)ref_ptr;
1212 *namelen = btrfs_inode_ref_name_len(eb, ref);
1213 if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)(ref + 1),
1214 *namelen))
1215 return -EIO;
1217 *name = kmalloc(*namelen, GFP_NOFS);
1218 if (*name == NULL)
1219 return -ENOMEM;
1221 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1223 *index = btrfs_inode_ref_index(eb, ref);
1225 return 0;
1229 * replay one inode back reference item found in the log tree.
1230 * eb, slot and key refer to the buffer and key found in the log tree.
1231 * root is the destination we are replaying into, and path is for temp
1232 * use by this function. (it should be released on return).
1234 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1235 struct btrfs_root *root,
1236 struct btrfs_root *log,
1237 struct btrfs_path *path,
1238 struct extent_buffer *eb, int slot,
1239 struct btrfs_key *key)
1241 struct inode *dir = NULL;
1242 struct inode *inode = NULL;
1243 unsigned long ref_ptr;
1244 unsigned long ref_end;
1245 char *name = NULL;
1246 int namelen;
1247 int ret;
1248 int search_done = 0;
1249 int log_ref_ver = 0;
1250 u64 parent_objectid;
1251 u64 inode_objectid;
1252 u64 ref_index = 0;
1253 int ref_struct_size;
1255 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1256 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1258 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1259 struct btrfs_inode_extref *r;
1261 ref_struct_size = sizeof(struct btrfs_inode_extref);
1262 log_ref_ver = 1;
1263 r = (struct btrfs_inode_extref *)ref_ptr;
1264 parent_objectid = btrfs_inode_extref_parent(eb, r);
1265 } else {
1266 ref_struct_size = sizeof(struct btrfs_inode_ref);
1267 parent_objectid = key->offset;
1269 inode_objectid = key->objectid;
1272 * it is possible that we didn't log all the parent directories
1273 * for a given inode. If we don't find the dir, just don't
1274 * copy the back ref in. The link count fixup code will take
1275 * care of the rest
1277 dir = read_one_inode(root, parent_objectid);
1278 if (!dir) {
1279 ret = -ENOENT;
1280 goto out;
1283 inode = read_one_inode(root, inode_objectid);
1284 if (!inode) {
1285 ret = -EIO;
1286 goto out;
1289 while (ref_ptr < ref_end) {
1290 if (log_ref_ver) {
1291 ret = extref_get_fields(eb, slot, ref_ptr, &namelen,
1292 &name, &ref_index, &parent_objectid);
1294 * parent object can change from one array
1295 * item to another.
1297 if (!dir)
1298 dir = read_one_inode(root, parent_objectid);
1299 if (!dir) {
1300 ret = -ENOENT;
1301 goto out;
1303 } else {
1304 ret = ref_get_fields(eb, slot, ref_ptr, &namelen,
1305 &name, &ref_index);
1307 if (ret)
1308 goto out;
1310 /* if we already have a perfect match, we're done */
1311 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1312 btrfs_ino(BTRFS_I(inode)), ref_index,
1313 name, namelen)) {
1315 * look for a conflicting back reference in the
1316 * metadata. if we find one we have to unlink that name
1317 * of the file before we add our new link. Later on, we
1318 * overwrite any existing back reference, and we don't
1319 * want to create dangling pointers in the directory.
1322 if (!search_done) {
1323 ret = __add_inode_ref(trans, root, path, log,
1324 BTRFS_I(dir),
1325 BTRFS_I(inode),
1326 inode_objectid,
1327 parent_objectid,
1328 ref_index, name, namelen,
1329 &search_done);
1330 if (ret) {
1331 if (ret == 1)
1332 ret = 0;
1333 goto out;
1337 /* insert our name */
1338 ret = btrfs_add_link(trans, BTRFS_I(dir),
1339 BTRFS_I(inode),
1340 name, namelen, 0, ref_index);
1341 if (ret)
1342 goto out;
1344 btrfs_update_inode(trans, root, inode);
1347 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1348 kfree(name);
1349 name = NULL;
1350 if (log_ref_ver) {
1351 iput(dir);
1352 dir = NULL;
1356 /* finally write the back reference in the inode */
1357 ret = overwrite_item(trans, root, path, eb, slot, key);
1358 out:
1359 btrfs_release_path(path);
1360 kfree(name);
1361 iput(dir);
1362 iput(inode);
1363 return ret;
1366 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1367 struct btrfs_root *root, u64 ino)
1369 int ret;
1371 ret = btrfs_insert_orphan_item(trans, root, ino);
1372 if (ret == -EEXIST)
1373 ret = 0;
1375 return ret;
1378 static int count_inode_extrefs(struct btrfs_root *root,
1379 struct btrfs_inode *inode, struct btrfs_path *path)
1381 int ret = 0;
1382 int name_len;
1383 unsigned int nlink = 0;
1384 u32 item_size;
1385 u32 cur_offset = 0;
1386 u64 inode_objectid = btrfs_ino(inode);
1387 u64 offset = 0;
1388 unsigned long ptr;
1389 struct btrfs_inode_extref *extref;
1390 struct extent_buffer *leaf;
1392 while (1) {
1393 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1394 &extref, &offset);
1395 if (ret)
1396 break;
1398 leaf = path->nodes[0];
1399 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1400 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1401 cur_offset = 0;
1403 while (cur_offset < item_size) {
1404 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1405 name_len = btrfs_inode_extref_name_len(leaf, extref);
1407 nlink++;
1409 cur_offset += name_len + sizeof(*extref);
1412 offset++;
1413 btrfs_release_path(path);
1415 btrfs_release_path(path);
1417 if (ret < 0 && ret != -ENOENT)
1418 return ret;
1419 return nlink;
1422 static int count_inode_refs(struct btrfs_root *root,
1423 struct btrfs_inode *inode, struct btrfs_path *path)
1425 int ret;
1426 struct btrfs_key key;
1427 unsigned int nlink = 0;
1428 unsigned long ptr;
1429 unsigned long ptr_end;
1430 int name_len;
1431 u64 ino = btrfs_ino(inode);
1433 key.objectid = ino;
1434 key.type = BTRFS_INODE_REF_KEY;
1435 key.offset = (u64)-1;
1437 while (1) {
1438 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1439 if (ret < 0)
1440 break;
1441 if (ret > 0) {
1442 if (path->slots[0] == 0)
1443 break;
1444 path->slots[0]--;
1446 process_slot:
1447 btrfs_item_key_to_cpu(path->nodes[0], &key,
1448 path->slots[0]);
1449 if (key.objectid != ino ||
1450 key.type != BTRFS_INODE_REF_KEY)
1451 break;
1452 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1453 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1454 path->slots[0]);
1455 while (ptr < ptr_end) {
1456 struct btrfs_inode_ref *ref;
1458 ref = (struct btrfs_inode_ref *)ptr;
1459 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1460 ref);
1461 ptr = (unsigned long)(ref + 1) + name_len;
1462 nlink++;
1465 if (key.offset == 0)
1466 break;
1467 if (path->slots[0] > 0) {
1468 path->slots[0]--;
1469 goto process_slot;
1471 key.offset--;
1472 btrfs_release_path(path);
1474 btrfs_release_path(path);
1476 return nlink;
1480 * There are a few corners where the link count of the file can't
1481 * be properly maintained during replay. So, instead of adding
1482 * lots of complexity to the log code, we just scan the backrefs
1483 * for any file that has been through replay.
1485 * The scan will update the link count on the inode to reflect the
1486 * number of back refs found. If it goes down to zero, the iput
1487 * will free the inode.
1489 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1490 struct btrfs_root *root,
1491 struct inode *inode)
1493 struct btrfs_path *path;
1494 int ret;
1495 u64 nlink = 0;
1496 u64 ino = btrfs_ino(BTRFS_I(inode));
1498 path = btrfs_alloc_path();
1499 if (!path)
1500 return -ENOMEM;
1502 ret = count_inode_refs(root, BTRFS_I(inode), path);
1503 if (ret < 0)
1504 goto out;
1506 nlink = ret;
1508 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1509 if (ret < 0)
1510 goto out;
1512 nlink += ret;
1514 ret = 0;
1516 if (nlink != inode->i_nlink) {
1517 set_nlink(inode, nlink);
1518 btrfs_update_inode(trans, root, inode);
1520 BTRFS_I(inode)->index_cnt = (u64)-1;
1522 if (inode->i_nlink == 0) {
1523 if (S_ISDIR(inode->i_mode)) {
1524 ret = replay_dir_deletes(trans, root, NULL, path,
1525 ino, 1);
1526 if (ret)
1527 goto out;
1529 ret = insert_orphan_item(trans, root, ino);
1532 out:
1533 btrfs_free_path(path);
1534 return ret;
1537 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1538 struct btrfs_root *root,
1539 struct btrfs_path *path)
1541 int ret;
1542 struct btrfs_key key;
1543 struct inode *inode;
1545 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1546 key.type = BTRFS_ORPHAN_ITEM_KEY;
1547 key.offset = (u64)-1;
1548 while (1) {
1549 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1550 if (ret < 0)
1551 break;
1553 if (ret == 1) {
1554 if (path->slots[0] == 0)
1555 break;
1556 path->slots[0]--;
1559 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1560 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1561 key.type != BTRFS_ORPHAN_ITEM_KEY)
1562 break;
1564 ret = btrfs_del_item(trans, root, path);
1565 if (ret)
1566 goto out;
1568 btrfs_release_path(path);
1569 inode = read_one_inode(root, key.offset);
1570 if (!inode)
1571 return -EIO;
1573 ret = fixup_inode_link_count(trans, root, inode);
1574 iput(inode);
1575 if (ret)
1576 goto out;
1579 * fixup on a directory may create new entries,
1580 * make sure we always look for the highset possible
1581 * offset
1583 key.offset = (u64)-1;
1585 ret = 0;
1586 out:
1587 btrfs_release_path(path);
1588 return ret;
1593 * record a given inode in the fixup dir so we can check its link
1594 * count when replay is done. The link count is incremented here
1595 * so the inode won't go away until we check it
1597 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1598 struct btrfs_root *root,
1599 struct btrfs_path *path,
1600 u64 objectid)
1602 struct btrfs_key key;
1603 int ret = 0;
1604 struct inode *inode;
1606 inode = read_one_inode(root, objectid);
1607 if (!inode)
1608 return -EIO;
1610 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1611 key.type = BTRFS_ORPHAN_ITEM_KEY;
1612 key.offset = objectid;
1614 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1616 btrfs_release_path(path);
1617 if (ret == 0) {
1618 if (!inode->i_nlink)
1619 set_nlink(inode, 1);
1620 else
1621 inc_nlink(inode);
1622 ret = btrfs_update_inode(trans, root, inode);
1623 } else if (ret == -EEXIST) {
1624 ret = 0;
1625 } else {
1626 BUG(); /* Logic Error */
1628 iput(inode);
1630 return ret;
1634 * when replaying the log for a directory, we only insert names
1635 * for inodes that actually exist. This means an fsync on a directory
1636 * does not implicitly fsync all the new files in it
1638 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1639 struct btrfs_root *root,
1640 u64 dirid, u64 index,
1641 char *name, int name_len,
1642 struct btrfs_key *location)
1644 struct inode *inode;
1645 struct inode *dir;
1646 int ret;
1648 inode = read_one_inode(root, location->objectid);
1649 if (!inode)
1650 return -ENOENT;
1652 dir = read_one_inode(root, dirid);
1653 if (!dir) {
1654 iput(inode);
1655 return -EIO;
1658 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1659 name_len, 1, index);
1661 /* FIXME, put inode into FIXUP list */
1663 iput(inode);
1664 iput(dir);
1665 return ret;
1669 * Return true if an inode reference exists in the log for the given name,
1670 * inode and parent inode.
1672 static bool name_in_log_ref(struct btrfs_root *log_root,
1673 const char *name, const int name_len,
1674 const u64 dirid, const u64 ino)
1676 struct btrfs_key search_key;
1678 search_key.objectid = ino;
1679 search_key.type = BTRFS_INODE_REF_KEY;
1680 search_key.offset = dirid;
1681 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1682 return true;
1684 search_key.type = BTRFS_INODE_EXTREF_KEY;
1685 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1686 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1687 return true;
1689 return false;
1693 * take a single entry in a log directory item and replay it into
1694 * the subvolume.
1696 * if a conflicting item exists in the subdirectory already,
1697 * the inode it points to is unlinked and put into the link count
1698 * fix up tree.
1700 * If a name from the log points to a file or directory that does
1701 * not exist in the FS, it is skipped. fsyncs on directories
1702 * do not force down inodes inside that directory, just changes to the
1703 * names or unlinks in a directory.
1705 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1706 * non-existing inode) and 1 if the name was replayed.
1708 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1709 struct btrfs_root *root,
1710 struct btrfs_path *path,
1711 struct extent_buffer *eb,
1712 struct btrfs_dir_item *di,
1713 struct btrfs_key *key)
1715 char *name;
1716 int name_len;
1717 struct btrfs_dir_item *dst_di;
1718 struct btrfs_key found_key;
1719 struct btrfs_key log_key;
1720 struct inode *dir;
1721 u8 log_type;
1722 int exists;
1723 int ret = 0;
1724 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1725 bool name_added = false;
1727 dir = read_one_inode(root, key->objectid);
1728 if (!dir)
1729 return -EIO;
1731 name_len = btrfs_dir_name_len(eb, di);
1732 name = kmalloc(name_len, GFP_NOFS);
1733 if (!name) {
1734 ret = -ENOMEM;
1735 goto out;
1738 log_type = btrfs_dir_type(eb, di);
1739 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1740 name_len);
1742 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1743 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1744 if (exists == 0)
1745 exists = 1;
1746 else
1747 exists = 0;
1748 btrfs_release_path(path);
1750 if (key->type == BTRFS_DIR_ITEM_KEY) {
1751 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1752 name, name_len, 1);
1753 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1754 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1755 key->objectid,
1756 key->offset, name,
1757 name_len, 1);
1758 } else {
1759 /* Corruption */
1760 ret = -EINVAL;
1761 goto out;
1763 if (IS_ERR_OR_NULL(dst_di)) {
1764 /* we need a sequence number to insert, so we only
1765 * do inserts for the BTRFS_DIR_INDEX_KEY types
1767 if (key->type != BTRFS_DIR_INDEX_KEY)
1768 goto out;
1769 goto insert;
1772 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1773 /* the existing item matches the logged item */
1774 if (found_key.objectid == log_key.objectid &&
1775 found_key.type == log_key.type &&
1776 found_key.offset == log_key.offset &&
1777 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1778 update_size = false;
1779 goto out;
1783 * don't drop the conflicting directory entry if the inode
1784 * for the new entry doesn't exist
1786 if (!exists)
1787 goto out;
1789 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1790 if (ret)
1791 goto out;
1793 if (key->type == BTRFS_DIR_INDEX_KEY)
1794 goto insert;
1795 out:
1796 btrfs_release_path(path);
1797 if (!ret && update_size) {
1798 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
1799 ret = btrfs_update_inode(trans, root, dir);
1801 kfree(name);
1802 iput(dir);
1803 if (!ret && name_added)
1804 ret = 1;
1805 return ret;
1807 insert:
1808 if (name_in_log_ref(root->log_root, name, name_len,
1809 key->objectid, log_key.objectid)) {
1810 /* The dentry will be added later. */
1811 ret = 0;
1812 update_size = false;
1813 goto out;
1815 btrfs_release_path(path);
1816 ret = insert_one_name(trans, root, key->objectid, key->offset,
1817 name, name_len, &log_key);
1818 if (ret && ret != -ENOENT && ret != -EEXIST)
1819 goto out;
1820 if (!ret)
1821 name_added = true;
1822 update_size = false;
1823 ret = 0;
1824 goto out;
1828 * find all the names in a directory item and reconcile them into
1829 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1830 * one name in a directory item, but the same code gets used for
1831 * both directory index types
1833 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1834 struct btrfs_root *root,
1835 struct btrfs_path *path,
1836 struct extent_buffer *eb, int slot,
1837 struct btrfs_key *key)
1839 struct btrfs_fs_info *fs_info = root->fs_info;
1840 int ret = 0;
1841 u32 item_size = btrfs_item_size_nr(eb, slot);
1842 struct btrfs_dir_item *di;
1843 int name_len;
1844 unsigned long ptr;
1845 unsigned long ptr_end;
1846 struct btrfs_path *fixup_path = NULL;
1848 ptr = btrfs_item_ptr_offset(eb, slot);
1849 ptr_end = ptr + item_size;
1850 while (ptr < ptr_end) {
1851 di = (struct btrfs_dir_item *)ptr;
1852 if (verify_dir_item(fs_info, eb, slot, di))
1853 return -EIO;
1854 name_len = btrfs_dir_name_len(eb, di);
1855 ret = replay_one_name(trans, root, path, eb, di, key);
1856 if (ret < 0)
1857 break;
1858 ptr = (unsigned long)(di + 1);
1859 ptr += name_len;
1862 * If this entry refers to a non-directory (directories can not
1863 * have a link count > 1) and it was added in the transaction
1864 * that was not committed, make sure we fixup the link count of
1865 * the inode it the entry points to. Otherwise something like
1866 * the following would result in a directory pointing to an
1867 * inode with a wrong link that does not account for this dir
1868 * entry:
1870 * mkdir testdir
1871 * touch testdir/foo
1872 * touch testdir/bar
1873 * sync
1875 * ln testdir/bar testdir/bar_link
1876 * ln testdir/foo testdir/foo_link
1877 * xfs_io -c "fsync" testdir/bar
1879 * <power failure>
1881 * mount fs, log replay happens
1883 * File foo would remain with a link count of 1 when it has two
1884 * entries pointing to it in the directory testdir. This would
1885 * make it impossible to ever delete the parent directory has
1886 * it would result in stale dentries that can never be deleted.
1888 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1889 struct btrfs_key di_key;
1891 if (!fixup_path) {
1892 fixup_path = btrfs_alloc_path();
1893 if (!fixup_path) {
1894 ret = -ENOMEM;
1895 break;
1899 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1900 ret = link_to_fixup_dir(trans, root, fixup_path,
1901 di_key.objectid);
1902 if (ret)
1903 break;
1905 ret = 0;
1907 btrfs_free_path(fixup_path);
1908 return ret;
1912 * directory replay has two parts. There are the standard directory
1913 * items in the log copied from the subvolume, and range items
1914 * created in the log while the subvolume was logged.
1916 * The range items tell us which parts of the key space the log
1917 * is authoritative for. During replay, if a key in the subvolume
1918 * directory is in a logged range item, but not actually in the log
1919 * that means it was deleted from the directory before the fsync
1920 * and should be removed.
1922 static noinline int find_dir_range(struct btrfs_root *root,
1923 struct btrfs_path *path,
1924 u64 dirid, int key_type,
1925 u64 *start_ret, u64 *end_ret)
1927 struct btrfs_key key;
1928 u64 found_end;
1929 struct btrfs_dir_log_item *item;
1930 int ret;
1931 int nritems;
1933 if (*start_ret == (u64)-1)
1934 return 1;
1936 key.objectid = dirid;
1937 key.type = key_type;
1938 key.offset = *start_ret;
1940 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1941 if (ret < 0)
1942 goto out;
1943 if (ret > 0) {
1944 if (path->slots[0] == 0)
1945 goto out;
1946 path->slots[0]--;
1948 if (ret != 0)
1949 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1951 if (key.type != key_type || key.objectid != dirid) {
1952 ret = 1;
1953 goto next;
1955 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1956 struct btrfs_dir_log_item);
1957 found_end = btrfs_dir_log_end(path->nodes[0], item);
1959 if (*start_ret >= key.offset && *start_ret <= found_end) {
1960 ret = 0;
1961 *start_ret = key.offset;
1962 *end_ret = found_end;
1963 goto out;
1965 ret = 1;
1966 next:
1967 /* check the next slot in the tree to see if it is a valid item */
1968 nritems = btrfs_header_nritems(path->nodes[0]);
1969 path->slots[0]++;
1970 if (path->slots[0] >= nritems) {
1971 ret = btrfs_next_leaf(root, path);
1972 if (ret)
1973 goto out;
1976 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1978 if (key.type != key_type || key.objectid != dirid) {
1979 ret = 1;
1980 goto out;
1982 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1983 struct btrfs_dir_log_item);
1984 found_end = btrfs_dir_log_end(path->nodes[0], item);
1985 *start_ret = key.offset;
1986 *end_ret = found_end;
1987 ret = 0;
1988 out:
1989 btrfs_release_path(path);
1990 return ret;
1994 * this looks for a given directory item in the log. If the directory
1995 * item is not in the log, the item is removed and the inode it points
1996 * to is unlinked
1998 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1999 struct btrfs_root *root,
2000 struct btrfs_root *log,
2001 struct btrfs_path *path,
2002 struct btrfs_path *log_path,
2003 struct inode *dir,
2004 struct btrfs_key *dir_key)
2006 struct btrfs_fs_info *fs_info = root->fs_info;
2007 int ret;
2008 struct extent_buffer *eb;
2009 int slot;
2010 u32 item_size;
2011 struct btrfs_dir_item *di;
2012 struct btrfs_dir_item *log_di;
2013 int name_len;
2014 unsigned long ptr;
2015 unsigned long ptr_end;
2016 char *name;
2017 struct inode *inode;
2018 struct btrfs_key location;
2020 again:
2021 eb = path->nodes[0];
2022 slot = path->slots[0];
2023 item_size = btrfs_item_size_nr(eb, slot);
2024 ptr = btrfs_item_ptr_offset(eb, slot);
2025 ptr_end = ptr + item_size;
2026 while (ptr < ptr_end) {
2027 di = (struct btrfs_dir_item *)ptr;
2028 if (verify_dir_item(fs_info, eb, slot, di)) {
2029 ret = -EIO;
2030 goto out;
2033 name_len = btrfs_dir_name_len(eb, di);
2034 name = kmalloc(name_len, GFP_NOFS);
2035 if (!name) {
2036 ret = -ENOMEM;
2037 goto out;
2039 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2040 name_len);
2041 log_di = NULL;
2042 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2043 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2044 dir_key->objectid,
2045 name, name_len, 0);
2046 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2047 log_di = btrfs_lookup_dir_index_item(trans, log,
2048 log_path,
2049 dir_key->objectid,
2050 dir_key->offset,
2051 name, name_len, 0);
2053 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
2054 btrfs_dir_item_key_to_cpu(eb, di, &location);
2055 btrfs_release_path(path);
2056 btrfs_release_path(log_path);
2057 inode = read_one_inode(root, location.objectid);
2058 if (!inode) {
2059 kfree(name);
2060 return -EIO;
2063 ret = link_to_fixup_dir(trans, root,
2064 path, location.objectid);
2065 if (ret) {
2066 kfree(name);
2067 iput(inode);
2068 goto out;
2071 inc_nlink(inode);
2072 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2073 BTRFS_I(inode), name, name_len);
2074 if (!ret)
2075 ret = btrfs_run_delayed_items(trans, fs_info);
2076 kfree(name);
2077 iput(inode);
2078 if (ret)
2079 goto out;
2081 /* there might still be more names under this key
2082 * check and repeat if required
2084 ret = btrfs_search_slot(NULL, root, dir_key, path,
2085 0, 0);
2086 if (ret == 0)
2087 goto again;
2088 ret = 0;
2089 goto out;
2090 } else if (IS_ERR(log_di)) {
2091 kfree(name);
2092 return PTR_ERR(log_di);
2094 btrfs_release_path(log_path);
2095 kfree(name);
2097 ptr = (unsigned long)(di + 1);
2098 ptr += name_len;
2100 ret = 0;
2101 out:
2102 btrfs_release_path(path);
2103 btrfs_release_path(log_path);
2104 return ret;
2107 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2108 struct btrfs_root *root,
2109 struct btrfs_root *log,
2110 struct btrfs_path *path,
2111 const u64 ino)
2113 struct btrfs_fs_info *fs_info = root->fs_info;
2114 struct btrfs_key search_key;
2115 struct btrfs_path *log_path;
2116 int i;
2117 int nritems;
2118 int ret;
2120 log_path = btrfs_alloc_path();
2121 if (!log_path)
2122 return -ENOMEM;
2124 search_key.objectid = ino;
2125 search_key.type = BTRFS_XATTR_ITEM_KEY;
2126 search_key.offset = 0;
2127 again:
2128 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2129 if (ret < 0)
2130 goto out;
2131 process_leaf:
2132 nritems = btrfs_header_nritems(path->nodes[0]);
2133 for (i = path->slots[0]; i < nritems; i++) {
2134 struct btrfs_key key;
2135 struct btrfs_dir_item *di;
2136 struct btrfs_dir_item *log_di;
2137 u32 total_size;
2138 u32 cur;
2140 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2141 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2142 ret = 0;
2143 goto out;
2146 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2147 total_size = btrfs_item_size_nr(path->nodes[0], i);
2148 cur = 0;
2149 while (cur < total_size) {
2150 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2151 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2152 u32 this_len = sizeof(*di) + name_len + data_len;
2153 char *name;
2155 ret = verify_dir_item(fs_info, path->nodes[0], i, di);
2156 if (ret) {
2157 ret = -EIO;
2158 goto out;
2160 name = kmalloc(name_len, GFP_NOFS);
2161 if (!name) {
2162 ret = -ENOMEM;
2163 goto out;
2165 read_extent_buffer(path->nodes[0], name,
2166 (unsigned long)(di + 1), name_len);
2168 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2169 name, name_len, 0);
2170 btrfs_release_path(log_path);
2171 if (!log_di) {
2172 /* Doesn't exist in log tree, so delete it. */
2173 btrfs_release_path(path);
2174 di = btrfs_lookup_xattr(trans, root, path, ino,
2175 name, name_len, -1);
2176 kfree(name);
2177 if (IS_ERR(di)) {
2178 ret = PTR_ERR(di);
2179 goto out;
2181 ASSERT(di);
2182 ret = btrfs_delete_one_dir_name(trans, root,
2183 path, di);
2184 if (ret)
2185 goto out;
2186 btrfs_release_path(path);
2187 search_key = key;
2188 goto again;
2190 kfree(name);
2191 if (IS_ERR(log_di)) {
2192 ret = PTR_ERR(log_di);
2193 goto out;
2195 cur += this_len;
2196 di = (struct btrfs_dir_item *)((char *)di + this_len);
2199 ret = btrfs_next_leaf(root, path);
2200 if (ret > 0)
2201 ret = 0;
2202 else if (ret == 0)
2203 goto process_leaf;
2204 out:
2205 btrfs_free_path(log_path);
2206 btrfs_release_path(path);
2207 return ret;
2212 * deletion replay happens before we copy any new directory items
2213 * out of the log or out of backreferences from inodes. It
2214 * scans the log to find ranges of keys that log is authoritative for,
2215 * and then scans the directory to find items in those ranges that are
2216 * not present in the log.
2218 * Anything we don't find in the log is unlinked and removed from the
2219 * directory.
2221 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2222 struct btrfs_root *root,
2223 struct btrfs_root *log,
2224 struct btrfs_path *path,
2225 u64 dirid, int del_all)
2227 u64 range_start;
2228 u64 range_end;
2229 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2230 int ret = 0;
2231 struct btrfs_key dir_key;
2232 struct btrfs_key found_key;
2233 struct btrfs_path *log_path;
2234 struct inode *dir;
2236 dir_key.objectid = dirid;
2237 dir_key.type = BTRFS_DIR_ITEM_KEY;
2238 log_path = btrfs_alloc_path();
2239 if (!log_path)
2240 return -ENOMEM;
2242 dir = read_one_inode(root, dirid);
2243 /* it isn't an error if the inode isn't there, that can happen
2244 * because we replay the deletes before we copy in the inode item
2245 * from the log
2247 if (!dir) {
2248 btrfs_free_path(log_path);
2249 return 0;
2251 again:
2252 range_start = 0;
2253 range_end = 0;
2254 while (1) {
2255 if (del_all)
2256 range_end = (u64)-1;
2257 else {
2258 ret = find_dir_range(log, path, dirid, key_type,
2259 &range_start, &range_end);
2260 if (ret != 0)
2261 break;
2264 dir_key.offset = range_start;
2265 while (1) {
2266 int nritems;
2267 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2268 0, 0);
2269 if (ret < 0)
2270 goto out;
2272 nritems = btrfs_header_nritems(path->nodes[0]);
2273 if (path->slots[0] >= nritems) {
2274 ret = btrfs_next_leaf(root, path);
2275 if (ret)
2276 break;
2278 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2279 path->slots[0]);
2280 if (found_key.objectid != dirid ||
2281 found_key.type != dir_key.type)
2282 goto next_type;
2284 if (found_key.offset > range_end)
2285 break;
2287 ret = check_item_in_log(trans, root, log, path,
2288 log_path, dir,
2289 &found_key);
2290 if (ret)
2291 goto out;
2292 if (found_key.offset == (u64)-1)
2293 break;
2294 dir_key.offset = found_key.offset + 1;
2296 btrfs_release_path(path);
2297 if (range_end == (u64)-1)
2298 break;
2299 range_start = range_end + 1;
2302 next_type:
2303 ret = 0;
2304 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2305 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2306 dir_key.type = BTRFS_DIR_INDEX_KEY;
2307 btrfs_release_path(path);
2308 goto again;
2310 out:
2311 btrfs_release_path(path);
2312 btrfs_free_path(log_path);
2313 iput(dir);
2314 return ret;
2318 * the process_func used to replay items from the log tree. This
2319 * gets called in two different stages. The first stage just looks
2320 * for inodes and makes sure they are all copied into the subvolume.
2322 * The second stage copies all the other item types from the log into
2323 * the subvolume. The two stage approach is slower, but gets rid of
2324 * lots of complexity around inodes referencing other inodes that exist
2325 * only in the log (references come from either directory items or inode
2326 * back refs).
2328 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2329 struct walk_control *wc, u64 gen)
2331 int nritems;
2332 struct btrfs_path *path;
2333 struct btrfs_root *root = wc->replay_dest;
2334 struct btrfs_key key;
2335 int level;
2336 int i;
2337 int ret;
2339 ret = btrfs_read_buffer(eb, gen);
2340 if (ret)
2341 return ret;
2343 level = btrfs_header_level(eb);
2345 if (level != 0)
2346 return 0;
2348 path = btrfs_alloc_path();
2349 if (!path)
2350 return -ENOMEM;
2352 nritems = btrfs_header_nritems(eb);
2353 for (i = 0; i < nritems; i++) {
2354 btrfs_item_key_to_cpu(eb, &key, i);
2356 /* inode keys are done during the first stage */
2357 if (key.type == BTRFS_INODE_ITEM_KEY &&
2358 wc->stage == LOG_WALK_REPLAY_INODES) {
2359 struct btrfs_inode_item *inode_item;
2360 u32 mode;
2362 inode_item = btrfs_item_ptr(eb, i,
2363 struct btrfs_inode_item);
2364 ret = replay_xattr_deletes(wc->trans, root, log,
2365 path, key.objectid);
2366 if (ret)
2367 break;
2368 mode = btrfs_inode_mode(eb, inode_item);
2369 if (S_ISDIR(mode)) {
2370 ret = replay_dir_deletes(wc->trans,
2371 root, log, path, key.objectid, 0);
2372 if (ret)
2373 break;
2375 ret = overwrite_item(wc->trans, root, path,
2376 eb, i, &key);
2377 if (ret)
2378 break;
2380 /* for regular files, make sure corresponding
2381 * orphan item exist. extents past the new EOF
2382 * will be truncated later by orphan cleanup.
2384 if (S_ISREG(mode)) {
2385 ret = insert_orphan_item(wc->trans, root,
2386 key.objectid);
2387 if (ret)
2388 break;
2391 ret = link_to_fixup_dir(wc->trans, root,
2392 path, key.objectid);
2393 if (ret)
2394 break;
2397 if (key.type == BTRFS_DIR_INDEX_KEY &&
2398 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2399 ret = replay_one_dir_item(wc->trans, root, path,
2400 eb, i, &key);
2401 if (ret)
2402 break;
2405 if (wc->stage < LOG_WALK_REPLAY_ALL)
2406 continue;
2408 /* these keys are simply copied */
2409 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2410 ret = overwrite_item(wc->trans, root, path,
2411 eb, i, &key);
2412 if (ret)
2413 break;
2414 } else if (key.type == BTRFS_INODE_REF_KEY ||
2415 key.type == BTRFS_INODE_EXTREF_KEY) {
2416 ret = add_inode_ref(wc->trans, root, log, path,
2417 eb, i, &key);
2418 if (ret && ret != -ENOENT)
2419 break;
2420 ret = 0;
2421 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2422 ret = replay_one_extent(wc->trans, root, path,
2423 eb, i, &key);
2424 if (ret)
2425 break;
2426 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2427 ret = replay_one_dir_item(wc->trans, root, path,
2428 eb, i, &key);
2429 if (ret)
2430 break;
2433 btrfs_free_path(path);
2434 return ret;
2437 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2438 struct btrfs_root *root,
2439 struct btrfs_path *path, int *level,
2440 struct walk_control *wc)
2442 struct btrfs_fs_info *fs_info = root->fs_info;
2443 u64 root_owner;
2444 u64 bytenr;
2445 u64 ptr_gen;
2446 struct extent_buffer *next;
2447 struct extent_buffer *cur;
2448 struct extent_buffer *parent;
2449 u32 blocksize;
2450 int ret = 0;
2452 WARN_ON(*level < 0);
2453 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2455 while (*level > 0) {
2456 WARN_ON(*level < 0);
2457 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2458 cur = path->nodes[*level];
2460 WARN_ON(btrfs_header_level(cur) != *level);
2462 if (path->slots[*level] >=
2463 btrfs_header_nritems(cur))
2464 break;
2466 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2467 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2468 blocksize = fs_info->nodesize;
2470 parent = path->nodes[*level];
2471 root_owner = btrfs_header_owner(parent);
2473 next = btrfs_find_create_tree_block(fs_info, bytenr);
2474 if (IS_ERR(next))
2475 return PTR_ERR(next);
2477 if (*level == 1) {
2478 ret = wc->process_func(root, next, wc, ptr_gen);
2479 if (ret) {
2480 free_extent_buffer(next);
2481 return ret;
2484 path->slots[*level]++;
2485 if (wc->free) {
2486 ret = btrfs_read_buffer(next, ptr_gen);
2487 if (ret) {
2488 free_extent_buffer(next);
2489 return ret;
2492 if (trans) {
2493 btrfs_tree_lock(next);
2494 btrfs_set_lock_blocking(next);
2495 clean_tree_block(fs_info, next);
2496 btrfs_wait_tree_block_writeback(next);
2497 btrfs_tree_unlock(next);
2498 } else {
2499 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2500 clear_extent_buffer_dirty(next);
2503 WARN_ON(root_owner !=
2504 BTRFS_TREE_LOG_OBJECTID);
2505 ret = btrfs_free_and_pin_reserved_extent(
2506 fs_info, bytenr,
2507 blocksize);
2508 if (ret) {
2509 free_extent_buffer(next);
2510 return ret;
2513 free_extent_buffer(next);
2514 continue;
2516 ret = btrfs_read_buffer(next, ptr_gen);
2517 if (ret) {
2518 free_extent_buffer(next);
2519 return ret;
2522 WARN_ON(*level <= 0);
2523 if (path->nodes[*level-1])
2524 free_extent_buffer(path->nodes[*level-1]);
2525 path->nodes[*level-1] = next;
2526 *level = btrfs_header_level(next);
2527 path->slots[*level] = 0;
2528 cond_resched();
2530 WARN_ON(*level < 0);
2531 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2533 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2535 cond_resched();
2536 return 0;
2539 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2540 struct btrfs_root *root,
2541 struct btrfs_path *path, int *level,
2542 struct walk_control *wc)
2544 struct btrfs_fs_info *fs_info = root->fs_info;
2545 u64 root_owner;
2546 int i;
2547 int slot;
2548 int ret;
2550 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2551 slot = path->slots[i];
2552 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2553 path->slots[i]++;
2554 *level = i;
2555 WARN_ON(*level == 0);
2556 return 0;
2557 } else {
2558 struct extent_buffer *parent;
2559 if (path->nodes[*level] == root->node)
2560 parent = path->nodes[*level];
2561 else
2562 parent = path->nodes[*level + 1];
2564 root_owner = btrfs_header_owner(parent);
2565 ret = wc->process_func(root, path->nodes[*level], wc,
2566 btrfs_header_generation(path->nodes[*level]));
2567 if (ret)
2568 return ret;
2570 if (wc->free) {
2571 struct extent_buffer *next;
2573 next = path->nodes[*level];
2575 if (trans) {
2576 btrfs_tree_lock(next);
2577 btrfs_set_lock_blocking(next);
2578 clean_tree_block(fs_info, next);
2579 btrfs_wait_tree_block_writeback(next);
2580 btrfs_tree_unlock(next);
2581 } else {
2582 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2583 clear_extent_buffer_dirty(next);
2586 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2587 ret = btrfs_free_and_pin_reserved_extent(
2588 fs_info,
2589 path->nodes[*level]->start,
2590 path->nodes[*level]->len);
2591 if (ret)
2592 return ret;
2594 free_extent_buffer(path->nodes[*level]);
2595 path->nodes[*level] = NULL;
2596 *level = i + 1;
2599 return 1;
2603 * drop the reference count on the tree rooted at 'snap'. This traverses
2604 * the tree freeing any blocks that have a ref count of zero after being
2605 * decremented.
2607 static int walk_log_tree(struct btrfs_trans_handle *trans,
2608 struct btrfs_root *log, struct walk_control *wc)
2610 struct btrfs_fs_info *fs_info = log->fs_info;
2611 int ret = 0;
2612 int wret;
2613 int level;
2614 struct btrfs_path *path;
2615 int orig_level;
2617 path = btrfs_alloc_path();
2618 if (!path)
2619 return -ENOMEM;
2621 level = btrfs_header_level(log->node);
2622 orig_level = level;
2623 path->nodes[level] = log->node;
2624 extent_buffer_get(log->node);
2625 path->slots[level] = 0;
2627 while (1) {
2628 wret = walk_down_log_tree(trans, log, path, &level, wc);
2629 if (wret > 0)
2630 break;
2631 if (wret < 0) {
2632 ret = wret;
2633 goto out;
2636 wret = walk_up_log_tree(trans, log, path, &level, wc);
2637 if (wret > 0)
2638 break;
2639 if (wret < 0) {
2640 ret = wret;
2641 goto out;
2645 /* was the root node processed? if not, catch it here */
2646 if (path->nodes[orig_level]) {
2647 ret = wc->process_func(log, path->nodes[orig_level], wc,
2648 btrfs_header_generation(path->nodes[orig_level]));
2649 if (ret)
2650 goto out;
2651 if (wc->free) {
2652 struct extent_buffer *next;
2654 next = path->nodes[orig_level];
2656 if (trans) {
2657 btrfs_tree_lock(next);
2658 btrfs_set_lock_blocking(next);
2659 clean_tree_block(fs_info, next);
2660 btrfs_wait_tree_block_writeback(next);
2661 btrfs_tree_unlock(next);
2662 } else {
2663 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2664 clear_extent_buffer_dirty(next);
2667 WARN_ON(log->root_key.objectid !=
2668 BTRFS_TREE_LOG_OBJECTID);
2669 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2670 next->start, next->len);
2671 if (ret)
2672 goto out;
2676 out:
2677 btrfs_free_path(path);
2678 return ret;
2682 * helper function to update the item for a given subvolumes log root
2683 * in the tree of log roots
2685 static int update_log_root(struct btrfs_trans_handle *trans,
2686 struct btrfs_root *log)
2688 struct btrfs_fs_info *fs_info = log->fs_info;
2689 int ret;
2691 if (log->log_transid == 1) {
2692 /* insert root item on the first sync */
2693 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2694 &log->root_key, &log->root_item);
2695 } else {
2696 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2697 &log->root_key, &log->root_item);
2699 return ret;
2702 static void wait_log_commit(struct btrfs_root *root, int transid)
2704 DEFINE_WAIT(wait);
2705 int index = transid % 2;
2708 * we only allow two pending log transactions at a time,
2709 * so we know that if ours is more than 2 older than the
2710 * current transaction, we're done
2712 for (;;) {
2713 prepare_to_wait(&root->log_commit_wait[index],
2714 &wait, TASK_UNINTERRUPTIBLE);
2716 if (!(root->log_transid_committed < transid &&
2717 atomic_read(&root->log_commit[index])))
2718 break;
2720 mutex_unlock(&root->log_mutex);
2721 schedule();
2722 mutex_lock(&root->log_mutex);
2724 finish_wait(&root->log_commit_wait[index], &wait);
2727 static void wait_for_writer(struct btrfs_root *root)
2729 DEFINE_WAIT(wait);
2731 for (;;) {
2732 prepare_to_wait(&root->log_writer_wait, &wait,
2733 TASK_UNINTERRUPTIBLE);
2734 if (!atomic_read(&root->log_writers))
2735 break;
2737 mutex_unlock(&root->log_mutex);
2738 schedule();
2739 mutex_lock(&root->log_mutex);
2741 finish_wait(&root->log_writer_wait, &wait);
2744 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2745 struct btrfs_log_ctx *ctx)
2747 if (!ctx)
2748 return;
2750 mutex_lock(&root->log_mutex);
2751 list_del_init(&ctx->list);
2752 mutex_unlock(&root->log_mutex);
2756 * Invoked in log mutex context, or be sure there is no other task which
2757 * can access the list.
2759 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2760 int index, int error)
2762 struct btrfs_log_ctx *ctx;
2763 struct btrfs_log_ctx *safe;
2765 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2766 list_del_init(&ctx->list);
2767 ctx->log_ret = error;
2770 INIT_LIST_HEAD(&root->log_ctxs[index]);
2774 * btrfs_sync_log does sends a given tree log down to the disk and
2775 * updates the super blocks to record it. When this call is done,
2776 * you know that any inodes previously logged are safely on disk only
2777 * if it returns 0.
2779 * Any other return value means you need to call btrfs_commit_transaction.
2780 * Some of the edge cases for fsyncing directories that have had unlinks
2781 * or renames done in the past mean that sometimes the only safe
2782 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2783 * that has happened.
2785 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2786 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2788 int index1;
2789 int index2;
2790 int mark;
2791 int ret;
2792 struct btrfs_fs_info *fs_info = root->fs_info;
2793 struct btrfs_root *log = root->log_root;
2794 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
2795 int log_transid = 0;
2796 struct btrfs_log_ctx root_log_ctx;
2797 struct blk_plug plug;
2799 mutex_lock(&root->log_mutex);
2800 log_transid = ctx->log_transid;
2801 if (root->log_transid_committed >= log_transid) {
2802 mutex_unlock(&root->log_mutex);
2803 return ctx->log_ret;
2806 index1 = log_transid % 2;
2807 if (atomic_read(&root->log_commit[index1])) {
2808 wait_log_commit(root, log_transid);
2809 mutex_unlock(&root->log_mutex);
2810 return ctx->log_ret;
2812 ASSERT(log_transid == root->log_transid);
2813 atomic_set(&root->log_commit[index1], 1);
2815 /* wait for previous tree log sync to complete */
2816 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2817 wait_log_commit(root, log_transid - 1);
2819 while (1) {
2820 int batch = atomic_read(&root->log_batch);
2821 /* when we're on an ssd, just kick the log commit out */
2822 if (!btrfs_test_opt(fs_info, SSD) &&
2823 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2824 mutex_unlock(&root->log_mutex);
2825 schedule_timeout_uninterruptible(1);
2826 mutex_lock(&root->log_mutex);
2828 wait_for_writer(root);
2829 if (batch == atomic_read(&root->log_batch))
2830 break;
2833 /* bail out if we need to do a full commit */
2834 if (btrfs_need_log_full_commit(fs_info, trans)) {
2835 ret = -EAGAIN;
2836 btrfs_free_logged_extents(log, log_transid);
2837 mutex_unlock(&root->log_mutex);
2838 goto out;
2841 if (log_transid % 2 == 0)
2842 mark = EXTENT_DIRTY;
2843 else
2844 mark = EXTENT_NEW;
2846 /* we start IO on all the marked extents here, but we don't actually
2847 * wait for them until later.
2849 blk_start_plug(&plug);
2850 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
2851 if (ret) {
2852 blk_finish_plug(&plug);
2853 btrfs_abort_transaction(trans, ret);
2854 btrfs_free_logged_extents(log, log_transid);
2855 btrfs_set_log_full_commit(fs_info, trans);
2856 mutex_unlock(&root->log_mutex);
2857 goto out;
2860 btrfs_set_root_node(&log->root_item, log->node);
2862 root->log_transid++;
2863 log->log_transid = root->log_transid;
2864 root->log_start_pid = 0;
2866 * IO has been started, blocks of the log tree have WRITTEN flag set
2867 * in their headers. new modifications of the log will be written to
2868 * new positions. so it's safe to allow log writers to go in.
2870 mutex_unlock(&root->log_mutex);
2872 btrfs_init_log_ctx(&root_log_ctx, NULL);
2874 mutex_lock(&log_root_tree->log_mutex);
2875 atomic_inc(&log_root_tree->log_batch);
2876 atomic_inc(&log_root_tree->log_writers);
2878 index2 = log_root_tree->log_transid % 2;
2879 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2880 root_log_ctx.log_transid = log_root_tree->log_transid;
2882 mutex_unlock(&log_root_tree->log_mutex);
2884 ret = update_log_root(trans, log);
2886 mutex_lock(&log_root_tree->log_mutex);
2887 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2889 * Implicit memory barrier after atomic_dec_and_test
2891 if (waitqueue_active(&log_root_tree->log_writer_wait))
2892 wake_up(&log_root_tree->log_writer_wait);
2895 if (ret) {
2896 if (!list_empty(&root_log_ctx.list))
2897 list_del_init(&root_log_ctx.list);
2899 blk_finish_plug(&plug);
2900 btrfs_set_log_full_commit(fs_info, trans);
2902 if (ret != -ENOSPC) {
2903 btrfs_abort_transaction(trans, ret);
2904 mutex_unlock(&log_root_tree->log_mutex);
2905 goto out;
2907 btrfs_wait_tree_log_extents(log, mark);
2908 btrfs_free_logged_extents(log, log_transid);
2909 mutex_unlock(&log_root_tree->log_mutex);
2910 ret = -EAGAIN;
2911 goto out;
2914 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2915 blk_finish_plug(&plug);
2916 list_del_init(&root_log_ctx.list);
2917 mutex_unlock(&log_root_tree->log_mutex);
2918 ret = root_log_ctx.log_ret;
2919 goto out;
2922 index2 = root_log_ctx.log_transid % 2;
2923 if (atomic_read(&log_root_tree->log_commit[index2])) {
2924 blk_finish_plug(&plug);
2925 ret = btrfs_wait_tree_log_extents(log, mark);
2926 btrfs_wait_logged_extents(trans, log, log_transid);
2927 wait_log_commit(log_root_tree,
2928 root_log_ctx.log_transid);
2929 mutex_unlock(&log_root_tree->log_mutex);
2930 if (!ret)
2931 ret = root_log_ctx.log_ret;
2932 goto out;
2934 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2935 atomic_set(&log_root_tree->log_commit[index2], 1);
2937 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2938 wait_log_commit(log_root_tree,
2939 root_log_ctx.log_transid - 1);
2942 wait_for_writer(log_root_tree);
2945 * now that we've moved on to the tree of log tree roots,
2946 * check the full commit flag again
2948 if (btrfs_need_log_full_commit(fs_info, trans)) {
2949 blk_finish_plug(&plug);
2950 btrfs_wait_tree_log_extents(log, mark);
2951 btrfs_free_logged_extents(log, log_transid);
2952 mutex_unlock(&log_root_tree->log_mutex);
2953 ret = -EAGAIN;
2954 goto out_wake_log_root;
2957 ret = btrfs_write_marked_extents(fs_info,
2958 &log_root_tree->dirty_log_pages,
2959 EXTENT_DIRTY | EXTENT_NEW);
2960 blk_finish_plug(&plug);
2961 if (ret) {
2962 btrfs_set_log_full_commit(fs_info, trans);
2963 btrfs_abort_transaction(trans, ret);
2964 btrfs_free_logged_extents(log, log_transid);
2965 mutex_unlock(&log_root_tree->log_mutex);
2966 goto out_wake_log_root;
2968 ret = btrfs_wait_tree_log_extents(log, mark);
2969 if (!ret)
2970 ret = btrfs_wait_tree_log_extents(log_root_tree,
2971 EXTENT_NEW | EXTENT_DIRTY);
2972 if (ret) {
2973 btrfs_set_log_full_commit(fs_info, trans);
2974 btrfs_free_logged_extents(log, log_transid);
2975 mutex_unlock(&log_root_tree->log_mutex);
2976 goto out_wake_log_root;
2978 btrfs_wait_logged_extents(trans, log, log_transid);
2980 btrfs_set_super_log_root(fs_info->super_for_commit,
2981 log_root_tree->node->start);
2982 btrfs_set_super_log_root_level(fs_info->super_for_commit,
2983 btrfs_header_level(log_root_tree->node));
2985 log_root_tree->log_transid++;
2986 mutex_unlock(&log_root_tree->log_mutex);
2989 * nobody else is going to jump in and write the the ctree
2990 * super here because the log_commit atomic below is protecting
2991 * us. We must be called with a transaction handle pinning
2992 * the running transaction open, so a full commit can't hop
2993 * in and cause problems either.
2995 ret = write_all_supers(fs_info, 1);
2996 if (ret) {
2997 btrfs_set_log_full_commit(fs_info, trans);
2998 btrfs_abort_transaction(trans, ret);
2999 goto out_wake_log_root;
3002 mutex_lock(&root->log_mutex);
3003 if (root->last_log_commit < log_transid)
3004 root->last_log_commit = log_transid;
3005 mutex_unlock(&root->log_mutex);
3007 out_wake_log_root:
3008 mutex_lock(&log_root_tree->log_mutex);
3009 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3011 log_root_tree->log_transid_committed++;
3012 atomic_set(&log_root_tree->log_commit[index2], 0);
3013 mutex_unlock(&log_root_tree->log_mutex);
3016 * The barrier before waitqueue_active is implied by mutex_unlock
3018 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
3019 wake_up(&log_root_tree->log_commit_wait[index2]);
3020 out:
3021 mutex_lock(&root->log_mutex);
3022 btrfs_remove_all_log_ctxs(root, index1, ret);
3023 root->log_transid_committed++;
3024 atomic_set(&root->log_commit[index1], 0);
3025 mutex_unlock(&root->log_mutex);
3028 * The barrier before waitqueue_active is implied by mutex_unlock
3030 if (waitqueue_active(&root->log_commit_wait[index1]))
3031 wake_up(&root->log_commit_wait[index1]);
3032 return ret;
3035 static void free_log_tree(struct btrfs_trans_handle *trans,
3036 struct btrfs_root *log)
3038 int ret;
3039 u64 start;
3040 u64 end;
3041 struct walk_control wc = {
3042 .free = 1,
3043 .process_func = process_one_buffer
3046 ret = walk_log_tree(trans, log, &wc);
3047 /* I don't think this can happen but just in case */
3048 if (ret)
3049 btrfs_abort_transaction(trans, ret);
3051 while (1) {
3052 ret = find_first_extent_bit(&log->dirty_log_pages,
3053 0, &start, &end,
3054 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
3055 NULL);
3056 if (ret)
3057 break;
3059 clear_extent_bits(&log->dirty_log_pages, start, end,
3060 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3064 * We may have short-circuited the log tree with the full commit logic
3065 * and left ordered extents on our list, so clear these out to keep us
3066 * from leaking inodes and memory.
3068 btrfs_free_logged_extents(log, 0);
3069 btrfs_free_logged_extents(log, 1);
3071 free_extent_buffer(log->node);
3072 kfree(log);
3076 * free all the extents used by the tree log. This should be called
3077 * at commit time of the full transaction
3079 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3081 if (root->log_root) {
3082 free_log_tree(trans, root->log_root);
3083 root->log_root = NULL;
3085 return 0;
3088 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3089 struct btrfs_fs_info *fs_info)
3091 if (fs_info->log_root_tree) {
3092 free_log_tree(trans, fs_info->log_root_tree);
3093 fs_info->log_root_tree = NULL;
3095 return 0;
3099 * If both a file and directory are logged, and unlinks or renames are
3100 * mixed in, we have a few interesting corners:
3102 * create file X in dir Y
3103 * link file X to X.link in dir Y
3104 * fsync file X
3105 * unlink file X but leave X.link
3106 * fsync dir Y
3108 * After a crash we would expect only X.link to exist. But file X
3109 * didn't get fsync'd again so the log has back refs for X and X.link.
3111 * We solve this by removing directory entries and inode backrefs from the
3112 * log when a file that was logged in the current transaction is
3113 * unlinked. Any later fsync will include the updated log entries, and
3114 * we'll be able to reconstruct the proper directory items from backrefs.
3116 * This optimizations allows us to avoid relogging the entire inode
3117 * or the entire directory.
3119 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3120 struct btrfs_root *root,
3121 const char *name, int name_len,
3122 struct btrfs_inode *dir, u64 index)
3124 struct btrfs_root *log;
3125 struct btrfs_dir_item *di;
3126 struct btrfs_path *path;
3127 int ret;
3128 int err = 0;
3129 int bytes_del = 0;
3130 u64 dir_ino = btrfs_ino(dir);
3132 if (dir->logged_trans < trans->transid)
3133 return 0;
3135 ret = join_running_log_trans(root);
3136 if (ret)
3137 return 0;
3139 mutex_lock(&dir->log_mutex);
3141 log = root->log_root;
3142 path = btrfs_alloc_path();
3143 if (!path) {
3144 err = -ENOMEM;
3145 goto out_unlock;
3148 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3149 name, name_len, -1);
3150 if (IS_ERR(di)) {
3151 err = PTR_ERR(di);
3152 goto fail;
3154 if (di) {
3155 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3156 bytes_del += name_len;
3157 if (ret) {
3158 err = ret;
3159 goto fail;
3162 btrfs_release_path(path);
3163 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3164 index, name, name_len, -1);
3165 if (IS_ERR(di)) {
3166 err = PTR_ERR(di);
3167 goto fail;
3169 if (di) {
3170 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3171 bytes_del += name_len;
3172 if (ret) {
3173 err = ret;
3174 goto fail;
3178 /* update the directory size in the log to reflect the names
3179 * we have removed
3181 if (bytes_del) {
3182 struct btrfs_key key;
3184 key.objectid = dir_ino;
3185 key.offset = 0;
3186 key.type = BTRFS_INODE_ITEM_KEY;
3187 btrfs_release_path(path);
3189 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3190 if (ret < 0) {
3191 err = ret;
3192 goto fail;
3194 if (ret == 0) {
3195 struct btrfs_inode_item *item;
3196 u64 i_size;
3198 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3199 struct btrfs_inode_item);
3200 i_size = btrfs_inode_size(path->nodes[0], item);
3201 if (i_size > bytes_del)
3202 i_size -= bytes_del;
3203 else
3204 i_size = 0;
3205 btrfs_set_inode_size(path->nodes[0], item, i_size);
3206 btrfs_mark_buffer_dirty(path->nodes[0]);
3207 } else
3208 ret = 0;
3209 btrfs_release_path(path);
3211 fail:
3212 btrfs_free_path(path);
3213 out_unlock:
3214 mutex_unlock(&dir->log_mutex);
3215 if (ret == -ENOSPC) {
3216 btrfs_set_log_full_commit(root->fs_info, trans);
3217 ret = 0;
3218 } else if (ret < 0)
3219 btrfs_abort_transaction(trans, ret);
3221 btrfs_end_log_trans(root);
3223 return err;
3226 /* see comments for btrfs_del_dir_entries_in_log */
3227 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3228 struct btrfs_root *root,
3229 const char *name, int name_len,
3230 struct btrfs_inode *inode, u64 dirid)
3232 struct btrfs_fs_info *fs_info = root->fs_info;
3233 struct btrfs_root *log;
3234 u64 index;
3235 int ret;
3237 if (inode->logged_trans < trans->transid)
3238 return 0;
3240 ret = join_running_log_trans(root);
3241 if (ret)
3242 return 0;
3243 log = root->log_root;
3244 mutex_lock(&inode->log_mutex);
3246 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3247 dirid, &index);
3248 mutex_unlock(&inode->log_mutex);
3249 if (ret == -ENOSPC) {
3250 btrfs_set_log_full_commit(fs_info, trans);
3251 ret = 0;
3252 } else if (ret < 0 && ret != -ENOENT)
3253 btrfs_abort_transaction(trans, ret);
3254 btrfs_end_log_trans(root);
3256 return ret;
3260 * creates a range item in the log for 'dirid'. first_offset and
3261 * last_offset tell us which parts of the key space the log should
3262 * be considered authoritative for.
3264 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3265 struct btrfs_root *log,
3266 struct btrfs_path *path,
3267 int key_type, u64 dirid,
3268 u64 first_offset, u64 last_offset)
3270 int ret;
3271 struct btrfs_key key;
3272 struct btrfs_dir_log_item *item;
3274 key.objectid = dirid;
3275 key.offset = first_offset;
3276 if (key_type == BTRFS_DIR_ITEM_KEY)
3277 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3278 else
3279 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3280 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3281 if (ret)
3282 return ret;
3284 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3285 struct btrfs_dir_log_item);
3286 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3287 btrfs_mark_buffer_dirty(path->nodes[0]);
3288 btrfs_release_path(path);
3289 return 0;
3293 * log all the items included in the current transaction for a given
3294 * directory. This also creates the range items in the log tree required
3295 * to replay anything deleted before the fsync
3297 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3298 struct btrfs_root *root, struct btrfs_inode *inode,
3299 struct btrfs_path *path,
3300 struct btrfs_path *dst_path, int key_type,
3301 struct btrfs_log_ctx *ctx,
3302 u64 min_offset, u64 *last_offset_ret)
3304 struct btrfs_key min_key;
3305 struct btrfs_root *log = root->log_root;
3306 struct extent_buffer *src;
3307 int err = 0;
3308 int ret;
3309 int i;
3310 int nritems;
3311 u64 first_offset = min_offset;
3312 u64 last_offset = (u64)-1;
3313 u64 ino = btrfs_ino(inode);
3315 log = root->log_root;
3317 min_key.objectid = ino;
3318 min_key.type = key_type;
3319 min_key.offset = min_offset;
3321 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3324 * we didn't find anything from this transaction, see if there
3325 * is anything at all
3327 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3328 min_key.objectid = ino;
3329 min_key.type = key_type;
3330 min_key.offset = (u64)-1;
3331 btrfs_release_path(path);
3332 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3333 if (ret < 0) {
3334 btrfs_release_path(path);
3335 return ret;
3337 ret = btrfs_previous_item(root, path, ino, key_type);
3339 /* if ret == 0 there are items for this type,
3340 * create a range to tell us the last key of this type.
3341 * otherwise, there are no items in this directory after
3342 * *min_offset, and we create a range to indicate that.
3344 if (ret == 0) {
3345 struct btrfs_key tmp;
3346 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3347 path->slots[0]);
3348 if (key_type == tmp.type)
3349 first_offset = max(min_offset, tmp.offset) + 1;
3351 goto done;
3354 /* go backward to find any previous key */
3355 ret = btrfs_previous_item(root, path, ino, key_type);
3356 if (ret == 0) {
3357 struct btrfs_key tmp;
3358 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3359 if (key_type == tmp.type) {
3360 first_offset = tmp.offset;
3361 ret = overwrite_item(trans, log, dst_path,
3362 path->nodes[0], path->slots[0],
3363 &tmp);
3364 if (ret) {
3365 err = ret;
3366 goto done;
3370 btrfs_release_path(path);
3372 /* find the first key from this transaction again */
3373 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3374 if (WARN_ON(ret != 0))
3375 goto done;
3378 * we have a block from this transaction, log every item in it
3379 * from our directory
3381 while (1) {
3382 struct btrfs_key tmp;
3383 src = path->nodes[0];
3384 nritems = btrfs_header_nritems(src);
3385 for (i = path->slots[0]; i < nritems; i++) {
3386 struct btrfs_dir_item *di;
3388 btrfs_item_key_to_cpu(src, &min_key, i);
3390 if (min_key.objectid != ino || min_key.type != key_type)
3391 goto done;
3392 ret = overwrite_item(trans, log, dst_path, src, i,
3393 &min_key);
3394 if (ret) {
3395 err = ret;
3396 goto done;
3400 * We must make sure that when we log a directory entry,
3401 * the corresponding inode, after log replay, has a
3402 * matching link count. For example:
3404 * touch foo
3405 * mkdir mydir
3406 * sync
3407 * ln foo mydir/bar
3408 * xfs_io -c "fsync" mydir
3409 * <crash>
3410 * <mount fs and log replay>
3412 * Would result in a fsync log that when replayed, our
3413 * file inode would have a link count of 1, but we get
3414 * two directory entries pointing to the same inode.
3415 * After removing one of the names, it would not be
3416 * possible to remove the other name, which resulted
3417 * always in stale file handle errors, and would not
3418 * be possible to rmdir the parent directory, since
3419 * its i_size could never decrement to the value
3420 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3422 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3423 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3424 if (ctx &&
3425 (btrfs_dir_transid(src, di) == trans->transid ||
3426 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3427 tmp.type != BTRFS_ROOT_ITEM_KEY)
3428 ctx->log_new_dentries = true;
3430 path->slots[0] = nritems;
3433 * look ahead to the next item and see if it is also
3434 * from this directory and from this transaction
3436 ret = btrfs_next_leaf(root, path);
3437 if (ret == 1) {
3438 last_offset = (u64)-1;
3439 goto done;
3441 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3442 if (tmp.objectid != ino || tmp.type != key_type) {
3443 last_offset = (u64)-1;
3444 goto done;
3446 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3447 ret = overwrite_item(trans, log, dst_path,
3448 path->nodes[0], path->slots[0],
3449 &tmp);
3450 if (ret)
3451 err = ret;
3452 else
3453 last_offset = tmp.offset;
3454 goto done;
3457 done:
3458 btrfs_release_path(path);
3459 btrfs_release_path(dst_path);
3461 if (err == 0) {
3462 *last_offset_ret = last_offset;
3464 * insert the log range keys to indicate where the log
3465 * is valid
3467 ret = insert_dir_log_key(trans, log, path, key_type,
3468 ino, first_offset, last_offset);
3469 if (ret)
3470 err = ret;
3472 return err;
3476 * logging directories is very similar to logging inodes, We find all the items
3477 * from the current transaction and write them to the log.
3479 * The recovery code scans the directory in the subvolume, and if it finds a
3480 * key in the range logged that is not present in the log tree, then it means
3481 * that dir entry was unlinked during the transaction.
3483 * In order for that scan to work, we must include one key smaller than
3484 * the smallest logged by this transaction and one key larger than the largest
3485 * key logged by this transaction.
3487 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3488 struct btrfs_root *root, struct btrfs_inode *inode,
3489 struct btrfs_path *path,
3490 struct btrfs_path *dst_path,
3491 struct btrfs_log_ctx *ctx)
3493 u64 min_key;
3494 u64 max_key;
3495 int ret;
3496 int key_type = BTRFS_DIR_ITEM_KEY;
3498 again:
3499 min_key = 0;
3500 max_key = 0;
3501 while (1) {
3502 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3503 ctx, min_key, &max_key);
3504 if (ret)
3505 return ret;
3506 if (max_key == (u64)-1)
3507 break;
3508 min_key = max_key + 1;
3511 if (key_type == BTRFS_DIR_ITEM_KEY) {
3512 key_type = BTRFS_DIR_INDEX_KEY;
3513 goto again;
3515 return 0;
3519 * a helper function to drop items from the log before we relog an
3520 * inode. max_key_type indicates the highest item type to remove.
3521 * This cannot be run for file data extents because it does not
3522 * free the extents they point to.
3524 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3525 struct btrfs_root *log,
3526 struct btrfs_path *path,
3527 u64 objectid, int max_key_type)
3529 int ret;
3530 struct btrfs_key key;
3531 struct btrfs_key found_key;
3532 int start_slot;
3534 key.objectid = objectid;
3535 key.type = max_key_type;
3536 key.offset = (u64)-1;
3538 while (1) {
3539 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3540 BUG_ON(ret == 0); /* Logic error */
3541 if (ret < 0)
3542 break;
3544 if (path->slots[0] == 0)
3545 break;
3547 path->slots[0]--;
3548 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3549 path->slots[0]);
3551 if (found_key.objectid != objectid)
3552 break;
3554 found_key.offset = 0;
3555 found_key.type = 0;
3556 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3557 &start_slot);
3559 ret = btrfs_del_items(trans, log, path, start_slot,
3560 path->slots[0] - start_slot + 1);
3562 * If start slot isn't 0 then we don't need to re-search, we've
3563 * found the last guy with the objectid in this tree.
3565 if (ret || start_slot != 0)
3566 break;
3567 btrfs_release_path(path);
3569 btrfs_release_path(path);
3570 if (ret > 0)
3571 ret = 0;
3572 return ret;
3575 static void fill_inode_item(struct btrfs_trans_handle *trans,
3576 struct extent_buffer *leaf,
3577 struct btrfs_inode_item *item,
3578 struct inode *inode, int log_inode_only,
3579 u64 logged_isize)
3581 struct btrfs_map_token token;
3583 btrfs_init_map_token(&token);
3585 if (log_inode_only) {
3586 /* set the generation to zero so the recover code
3587 * can tell the difference between an logging
3588 * just to say 'this inode exists' and a logging
3589 * to say 'update this inode with these values'
3591 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3592 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3593 } else {
3594 btrfs_set_token_inode_generation(leaf, item,
3595 BTRFS_I(inode)->generation,
3596 &token);
3597 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3600 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3601 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3602 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3603 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3605 btrfs_set_token_timespec_sec(leaf, &item->atime,
3606 inode->i_atime.tv_sec, &token);
3607 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3608 inode->i_atime.tv_nsec, &token);
3610 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3611 inode->i_mtime.tv_sec, &token);
3612 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3613 inode->i_mtime.tv_nsec, &token);
3615 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3616 inode->i_ctime.tv_sec, &token);
3617 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3618 inode->i_ctime.tv_nsec, &token);
3620 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3621 &token);
3623 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3624 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3625 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3626 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3627 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3630 static int log_inode_item(struct btrfs_trans_handle *trans,
3631 struct btrfs_root *log, struct btrfs_path *path,
3632 struct btrfs_inode *inode)
3634 struct btrfs_inode_item *inode_item;
3635 int ret;
3637 ret = btrfs_insert_empty_item(trans, log, path,
3638 &inode->location, sizeof(*inode_item));
3639 if (ret && ret != -EEXIST)
3640 return ret;
3641 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3642 struct btrfs_inode_item);
3643 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3644 0, 0);
3645 btrfs_release_path(path);
3646 return 0;
3649 static noinline int copy_items(struct btrfs_trans_handle *trans,
3650 struct btrfs_inode *inode,
3651 struct btrfs_path *dst_path,
3652 struct btrfs_path *src_path, u64 *last_extent,
3653 int start_slot, int nr, int inode_only,
3654 u64 logged_isize)
3656 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
3657 unsigned long src_offset;
3658 unsigned long dst_offset;
3659 struct btrfs_root *log = inode->root->log_root;
3660 struct btrfs_file_extent_item *extent;
3661 struct btrfs_inode_item *inode_item;
3662 struct extent_buffer *src = src_path->nodes[0];
3663 struct btrfs_key first_key, last_key, key;
3664 int ret;
3665 struct btrfs_key *ins_keys;
3666 u32 *ins_sizes;
3667 char *ins_data;
3668 int i;
3669 struct list_head ordered_sums;
3670 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3671 bool has_extents = false;
3672 bool need_find_last_extent = true;
3673 bool done = false;
3675 INIT_LIST_HEAD(&ordered_sums);
3677 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3678 nr * sizeof(u32), GFP_NOFS);
3679 if (!ins_data)
3680 return -ENOMEM;
3682 first_key.objectid = (u64)-1;
3684 ins_sizes = (u32 *)ins_data;
3685 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3687 for (i = 0; i < nr; i++) {
3688 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3689 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3691 ret = btrfs_insert_empty_items(trans, log, dst_path,
3692 ins_keys, ins_sizes, nr);
3693 if (ret) {
3694 kfree(ins_data);
3695 return ret;
3698 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3699 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3700 dst_path->slots[0]);
3702 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3704 if (i == nr - 1)
3705 last_key = ins_keys[i];
3707 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3708 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3709 dst_path->slots[0],
3710 struct btrfs_inode_item);
3711 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3712 &inode->vfs_inode,
3713 inode_only == LOG_INODE_EXISTS,
3714 logged_isize);
3715 } else {
3716 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3717 src_offset, ins_sizes[i]);
3721 * We set need_find_last_extent here in case we know we were
3722 * processing other items and then walk into the first extent in
3723 * the inode. If we don't hit an extent then nothing changes,
3724 * we'll do the last search the next time around.
3726 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3727 has_extents = true;
3728 if (first_key.objectid == (u64)-1)
3729 first_key = ins_keys[i];
3730 } else {
3731 need_find_last_extent = false;
3734 /* take a reference on file data extents so that truncates
3735 * or deletes of this inode don't have to relog the inode
3736 * again
3738 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3739 !skip_csum) {
3740 int found_type;
3741 extent = btrfs_item_ptr(src, start_slot + i,
3742 struct btrfs_file_extent_item);
3744 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3745 continue;
3747 found_type = btrfs_file_extent_type(src, extent);
3748 if (found_type == BTRFS_FILE_EXTENT_REG) {
3749 u64 ds, dl, cs, cl;
3750 ds = btrfs_file_extent_disk_bytenr(src,
3751 extent);
3752 /* ds == 0 is a hole */
3753 if (ds == 0)
3754 continue;
3756 dl = btrfs_file_extent_disk_num_bytes(src,
3757 extent);
3758 cs = btrfs_file_extent_offset(src, extent);
3759 cl = btrfs_file_extent_num_bytes(src,
3760 extent);
3761 if (btrfs_file_extent_compression(src,
3762 extent)) {
3763 cs = 0;
3764 cl = dl;
3767 ret = btrfs_lookup_csums_range(
3768 fs_info->csum_root,
3769 ds + cs, ds + cs + cl - 1,
3770 &ordered_sums, 0);
3771 if (ret) {
3772 btrfs_release_path(dst_path);
3773 kfree(ins_data);
3774 return ret;
3780 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3781 btrfs_release_path(dst_path);
3782 kfree(ins_data);
3785 * we have to do this after the loop above to avoid changing the
3786 * log tree while trying to change the log tree.
3788 ret = 0;
3789 while (!list_empty(&ordered_sums)) {
3790 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3791 struct btrfs_ordered_sum,
3792 list);
3793 if (!ret)
3794 ret = btrfs_csum_file_blocks(trans, log, sums);
3795 list_del(&sums->list);
3796 kfree(sums);
3799 if (!has_extents)
3800 return ret;
3802 if (need_find_last_extent && *last_extent == first_key.offset) {
3804 * We don't have any leafs between our current one and the one
3805 * we processed before that can have file extent items for our
3806 * inode (and have a generation number smaller than our current
3807 * transaction id).
3809 need_find_last_extent = false;
3813 * Because we use btrfs_search_forward we could skip leaves that were
3814 * not modified and then assume *last_extent is valid when it really
3815 * isn't. So back up to the previous leaf and read the end of the last
3816 * extent before we go and fill in holes.
3818 if (need_find_last_extent) {
3819 u64 len;
3821 ret = btrfs_prev_leaf(inode->root, src_path);
3822 if (ret < 0)
3823 return ret;
3824 if (ret)
3825 goto fill_holes;
3826 if (src_path->slots[0])
3827 src_path->slots[0]--;
3828 src = src_path->nodes[0];
3829 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3830 if (key.objectid != btrfs_ino(inode) ||
3831 key.type != BTRFS_EXTENT_DATA_KEY)
3832 goto fill_holes;
3833 extent = btrfs_item_ptr(src, src_path->slots[0],
3834 struct btrfs_file_extent_item);
3835 if (btrfs_file_extent_type(src, extent) ==
3836 BTRFS_FILE_EXTENT_INLINE) {
3837 len = btrfs_file_extent_inline_len(src,
3838 src_path->slots[0],
3839 extent);
3840 *last_extent = ALIGN(key.offset + len,
3841 fs_info->sectorsize);
3842 } else {
3843 len = btrfs_file_extent_num_bytes(src, extent);
3844 *last_extent = key.offset + len;
3847 fill_holes:
3848 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3849 * things could have happened
3851 * 1) A merge could have happened, so we could currently be on a leaf
3852 * that holds what we were copying in the first place.
3853 * 2) A split could have happened, and now not all of the items we want
3854 * are on the same leaf.
3856 * So we need to adjust how we search for holes, we need to drop the
3857 * path and re-search for the first extent key we found, and then walk
3858 * forward until we hit the last one we copied.
3860 if (need_find_last_extent) {
3861 /* btrfs_prev_leaf could return 1 without releasing the path */
3862 btrfs_release_path(src_path);
3863 ret = btrfs_search_slot(NULL, inode->root, &first_key,
3864 src_path, 0, 0);
3865 if (ret < 0)
3866 return ret;
3867 ASSERT(ret == 0);
3868 src = src_path->nodes[0];
3869 i = src_path->slots[0];
3870 } else {
3871 i = start_slot;
3875 * Ok so here we need to go through and fill in any holes we may have
3876 * to make sure that holes are punched for those areas in case they had
3877 * extents previously.
3879 while (!done) {
3880 u64 offset, len;
3881 u64 extent_end;
3883 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3884 ret = btrfs_next_leaf(inode->root, src_path);
3885 if (ret < 0)
3886 return ret;
3887 ASSERT(ret == 0);
3888 src = src_path->nodes[0];
3889 i = 0;
3892 btrfs_item_key_to_cpu(src, &key, i);
3893 if (!btrfs_comp_cpu_keys(&key, &last_key))
3894 done = true;
3895 if (key.objectid != btrfs_ino(inode) ||
3896 key.type != BTRFS_EXTENT_DATA_KEY) {
3897 i++;
3898 continue;
3900 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3901 if (btrfs_file_extent_type(src, extent) ==
3902 BTRFS_FILE_EXTENT_INLINE) {
3903 len = btrfs_file_extent_inline_len(src, i, extent);
3904 extent_end = ALIGN(key.offset + len,
3905 fs_info->sectorsize);
3906 } else {
3907 len = btrfs_file_extent_num_bytes(src, extent);
3908 extent_end = key.offset + len;
3910 i++;
3912 if (*last_extent == key.offset) {
3913 *last_extent = extent_end;
3914 continue;
3916 offset = *last_extent;
3917 len = key.offset - *last_extent;
3918 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3919 offset, 0, 0, len, 0, len, 0, 0, 0);
3920 if (ret)
3921 break;
3922 *last_extent = extent_end;
3925 * Need to let the callers know we dropped the path so they should
3926 * re-search.
3928 if (!ret && need_find_last_extent)
3929 ret = 1;
3930 return ret;
3933 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3935 struct extent_map *em1, *em2;
3937 em1 = list_entry(a, struct extent_map, list);
3938 em2 = list_entry(b, struct extent_map, list);
3940 if (em1->start < em2->start)
3941 return -1;
3942 else if (em1->start > em2->start)
3943 return 1;
3944 return 0;
3947 static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3948 struct inode *inode,
3949 struct btrfs_root *root,
3950 const struct extent_map *em,
3951 const struct list_head *logged_list,
3952 bool *ordered_io_error)
3954 struct btrfs_fs_info *fs_info = root->fs_info;
3955 struct btrfs_ordered_extent *ordered;
3956 struct btrfs_root *log = root->log_root;
3957 u64 mod_start = em->mod_start;
3958 u64 mod_len = em->mod_len;
3959 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3960 u64 csum_offset;
3961 u64 csum_len;
3962 LIST_HEAD(ordered_sums);
3963 int ret = 0;
3965 *ordered_io_error = false;
3967 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3968 em->block_start == EXTENT_MAP_HOLE)
3969 return 0;
3972 * Wait far any ordered extent that covers our extent map. If it
3973 * finishes without an error, first check and see if our csums are on
3974 * our outstanding ordered extents.
3976 list_for_each_entry(ordered, logged_list, log_list) {
3977 struct btrfs_ordered_sum *sum;
3979 if (!mod_len)
3980 break;
3982 if (ordered->file_offset + ordered->len <= mod_start ||
3983 mod_start + mod_len <= ordered->file_offset)
3984 continue;
3986 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3987 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3988 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3989 const u64 start = ordered->file_offset;
3990 const u64 end = ordered->file_offset + ordered->len - 1;
3992 WARN_ON(ordered->inode != inode);
3993 filemap_fdatawrite_range(inode->i_mapping, start, end);
3996 wait_event(ordered->wait,
3997 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3998 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
4000 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
4002 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
4003 * i_mapping flags, so that the next fsync won't get
4004 * an outdated io error too.
4006 filemap_check_errors(inode->i_mapping);
4007 *ordered_io_error = true;
4008 break;
4011 * We are going to copy all the csums on this ordered extent, so
4012 * go ahead and adjust mod_start and mod_len in case this
4013 * ordered extent has already been logged.
4015 if (ordered->file_offset > mod_start) {
4016 if (ordered->file_offset + ordered->len >=
4017 mod_start + mod_len)
4018 mod_len = ordered->file_offset - mod_start;
4020 * If we have this case
4022 * |--------- logged extent ---------|
4023 * |----- ordered extent ----|
4025 * Just don't mess with mod_start and mod_len, we'll
4026 * just end up logging more csums than we need and it
4027 * will be ok.
4029 } else {
4030 if (ordered->file_offset + ordered->len <
4031 mod_start + mod_len) {
4032 mod_len = (mod_start + mod_len) -
4033 (ordered->file_offset + ordered->len);
4034 mod_start = ordered->file_offset +
4035 ordered->len;
4036 } else {
4037 mod_len = 0;
4041 if (skip_csum)
4042 continue;
4045 * To keep us from looping for the above case of an ordered
4046 * extent that falls inside of the logged extent.
4048 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
4049 &ordered->flags))
4050 continue;
4052 list_for_each_entry(sum, &ordered->list, list) {
4053 ret = btrfs_csum_file_blocks(trans, log, sum);
4054 if (ret)
4055 break;
4059 if (*ordered_io_error || !mod_len || ret || skip_csum)
4060 return ret;
4062 if (em->compress_type) {
4063 csum_offset = 0;
4064 csum_len = max(em->block_len, em->orig_block_len);
4065 } else {
4066 csum_offset = mod_start - em->start;
4067 csum_len = mod_len;
4070 /* block start is already adjusted for the file extent offset. */
4071 ret = btrfs_lookup_csums_range(fs_info->csum_root,
4072 em->block_start + csum_offset,
4073 em->block_start + csum_offset +
4074 csum_len - 1, &ordered_sums, 0);
4075 if (ret)
4076 return ret;
4078 while (!list_empty(&ordered_sums)) {
4079 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4080 struct btrfs_ordered_sum,
4081 list);
4082 if (!ret)
4083 ret = btrfs_csum_file_blocks(trans, log, sums);
4084 list_del(&sums->list);
4085 kfree(sums);
4088 return ret;
4091 static int log_one_extent(struct btrfs_trans_handle *trans,
4092 struct btrfs_inode *inode, struct btrfs_root *root,
4093 const struct extent_map *em,
4094 struct btrfs_path *path,
4095 const struct list_head *logged_list,
4096 struct btrfs_log_ctx *ctx)
4098 struct btrfs_root *log = root->log_root;
4099 struct btrfs_file_extent_item *fi;
4100 struct extent_buffer *leaf;
4101 struct btrfs_map_token token;
4102 struct btrfs_key key;
4103 u64 extent_offset = em->start - em->orig_start;
4104 u64 block_len;
4105 int ret;
4106 int extent_inserted = 0;
4107 bool ordered_io_err = false;
4109 ret = wait_ordered_extents(trans, &inode->vfs_inode, root, em,
4110 logged_list, &ordered_io_err);
4111 if (ret)
4112 return ret;
4114 if (ordered_io_err) {
4115 ctx->io_err = -EIO;
4116 return ctx->io_err;
4119 btrfs_init_map_token(&token);
4121 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4122 em->start + em->len, NULL, 0, 1,
4123 sizeof(*fi), &extent_inserted);
4124 if (ret)
4125 return ret;
4127 if (!extent_inserted) {
4128 key.objectid = btrfs_ino(inode);
4129 key.type = BTRFS_EXTENT_DATA_KEY;
4130 key.offset = em->start;
4132 ret = btrfs_insert_empty_item(trans, log, path, &key,
4133 sizeof(*fi));
4134 if (ret)
4135 return ret;
4137 leaf = path->nodes[0];
4138 fi = btrfs_item_ptr(leaf, path->slots[0],
4139 struct btrfs_file_extent_item);
4141 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4142 &token);
4143 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4144 btrfs_set_token_file_extent_type(leaf, fi,
4145 BTRFS_FILE_EXTENT_PREALLOC,
4146 &token);
4147 else
4148 btrfs_set_token_file_extent_type(leaf, fi,
4149 BTRFS_FILE_EXTENT_REG,
4150 &token);
4152 block_len = max(em->block_len, em->orig_block_len);
4153 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4154 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4155 em->block_start,
4156 &token);
4157 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4158 &token);
4159 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4160 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4161 em->block_start -
4162 extent_offset, &token);
4163 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4164 &token);
4165 } else {
4166 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4167 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4168 &token);
4171 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4172 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4173 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4174 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4175 &token);
4176 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4177 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4178 btrfs_mark_buffer_dirty(leaf);
4180 btrfs_release_path(path);
4182 return ret;
4185 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4186 struct btrfs_root *root,
4187 struct btrfs_inode *inode,
4188 struct btrfs_path *path,
4189 struct list_head *logged_list,
4190 struct btrfs_log_ctx *ctx,
4191 const u64 start,
4192 const u64 end)
4194 struct extent_map *em, *n;
4195 struct list_head extents;
4196 struct extent_map_tree *tree = &inode->extent_tree;
4197 u64 logged_start, logged_end;
4198 u64 test_gen;
4199 int ret = 0;
4200 int num = 0;
4202 INIT_LIST_HEAD(&extents);
4204 down_write(&inode->dio_sem);
4205 write_lock(&tree->lock);
4206 test_gen = root->fs_info->last_trans_committed;
4207 logged_start = start;
4208 logged_end = end;
4210 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4211 list_del_init(&em->list);
4213 * Just an arbitrary number, this can be really CPU intensive
4214 * once we start getting a lot of extents, and really once we
4215 * have a bunch of extents we just want to commit since it will
4216 * be faster.
4218 if (++num > 32768) {
4219 list_del_init(&tree->modified_extents);
4220 ret = -EFBIG;
4221 goto process;
4224 if (em->generation <= test_gen)
4225 continue;
4227 if (em->start < logged_start)
4228 logged_start = em->start;
4229 if ((em->start + em->len - 1) > logged_end)
4230 logged_end = em->start + em->len - 1;
4232 /* Need a ref to keep it from getting evicted from cache */
4233 refcount_inc(&em->refs);
4234 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4235 list_add_tail(&em->list, &extents);
4236 num++;
4239 list_sort(NULL, &extents, extent_cmp);
4240 btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
4242 * Some ordered extents started by fsync might have completed
4243 * before we could collect them into the list logged_list, which
4244 * means they're gone, not in our logged_list nor in the inode's
4245 * ordered tree. We want the application/user space to know an
4246 * error happened while attempting to persist file data so that
4247 * it can take proper action. If such error happened, we leave
4248 * without writing to the log tree and the fsync must report the
4249 * file data write error and not commit the current transaction.
4251 ret = filemap_check_errors(inode->vfs_inode.i_mapping);
4252 if (ret)
4253 ctx->io_err = ret;
4254 process:
4255 while (!list_empty(&extents)) {
4256 em = list_entry(extents.next, struct extent_map, list);
4258 list_del_init(&em->list);
4261 * If we had an error we just need to delete everybody from our
4262 * private list.
4264 if (ret) {
4265 clear_em_logging(tree, em);
4266 free_extent_map(em);
4267 continue;
4270 write_unlock(&tree->lock);
4272 ret = log_one_extent(trans, inode, root, em, path, logged_list,
4273 ctx);
4274 write_lock(&tree->lock);
4275 clear_em_logging(tree, em);
4276 free_extent_map(em);
4278 WARN_ON(!list_empty(&extents));
4279 write_unlock(&tree->lock);
4280 up_write(&inode->dio_sem);
4282 btrfs_release_path(path);
4283 return ret;
4286 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4287 struct btrfs_path *path, u64 *size_ret)
4289 struct btrfs_key key;
4290 int ret;
4292 key.objectid = btrfs_ino(inode);
4293 key.type = BTRFS_INODE_ITEM_KEY;
4294 key.offset = 0;
4296 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4297 if (ret < 0) {
4298 return ret;
4299 } else if (ret > 0) {
4300 *size_ret = 0;
4301 } else {
4302 struct btrfs_inode_item *item;
4304 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4305 struct btrfs_inode_item);
4306 *size_ret = btrfs_inode_size(path->nodes[0], item);
4309 btrfs_release_path(path);
4310 return 0;
4314 * At the moment we always log all xattrs. This is to figure out at log replay
4315 * time which xattrs must have their deletion replayed. If a xattr is missing
4316 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4317 * because if a xattr is deleted, the inode is fsynced and a power failure
4318 * happens, causing the log to be replayed the next time the fs is mounted,
4319 * we want the xattr to not exist anymore (same behaviour as other filesystems
4320 * with a journal, ext3/4, xfs, f2fs, etc).
4322 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4323 struct btrfs_root *root,
4324 struct btrfs_inode *inode,
4325 struct btrfs_path *path,
4326 struct btrfs_path *dst_path)
4328 int ret;
4329 struct btrfs_key key;
4330 const u64 ino = btrfs_ino(inode);
4331 int ins_nr = 0;
4332 int start_slot = 0;
4334 key.objectid = ino;
4335 key.type = BTRFS_XATTR_ITEM_KEY;
4336 key.offset = 0;
4338 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4339 if (ret < 0)
4340 return ret;
4342 while (true) {
4343 int slot = path->slots[0];
4344 struct extent_buffer *leaf = path->nodes[0];
4345 int nritems = btrfs_header_nritems(leaf);
4347 if (slot >= nritems) {
4348 if (ins_nr > 0) {
4349 u64 last_extent = 0;
4351 ret = copy_items(trans, inode, dst_path, path,
4352 &last_extent, start_slot,
4353 ins_nr, 1, 0);
4354 /* can't be 1, extent items aren't processed */
4355 ASSERT(ret <= 0);
4356 if (ret < 0)
4357 return ret;
4358 ins_nr = 0;
4360 ret = btrfs_next_leaf(root, path);
4361 if (ret < 0)
4362 return ret;
4363 else if (ret > 0)
4364 break;
4365 continue;
4368 btrfs_item_key_to_cpu(leaf, &key, slot);
4369 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4370 break;
4372 if (ins_nr == 0)
4373 start_slot = slot;
4374 ins_nr++;
4375 path->slots[0]++;
4376 cond_resched();
4378 if (ins_nr > 0) {
4379 u64 last_extent = 0;
4381 ret = copy_items(trans, inode, dst_path, path,
4382 &last_extent, start_slot,
4383 ins_nr, 1, 0);
4384 /* can't be 1, extent items aren't processed */
4385 ASSERT(ret <= 0);
4386 if (ret < 0)
4387 return ret;
4390 return 0;
4394 * If the no holes feature is enabled we need to make sure any hole between the
4395 * last extent and the i_size of our inode is explicitly marked in the log. This
4396 * is to make sure that doing something like:
4398 * 1) create file with 128Kb of data
4399 * 2) truncate file to 64Kb
4400 * 3) truncate file to 256Kb
4401 * 4) fsync file
4402 * 5) <crash/power failure>
4403 * 6) mount fs and trigger log replay
4405 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4406 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4407 * file correspond to a hole. The presence of explicit holes in a log tree is
4408 * what guarantees that log replay will remove/adjust file extent items in the
4409 * fs/subvol tree.
4411 * Here we do not need to care about holes between extents, that is already done
4412 * by copy_items(). We also only need to do this in the full sync path, where we
4413 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4414 * lookup the list of modified extent maps and if any represents a hole, we
4415 * insert a corresponding extent representing a hole in the log tree.
4417 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4418 struct btrfs_root *root,
4419 struct btrfs_inode *inode,
4420 struct btrfs_path *path)
4422 struct btrfs_fs_info *fs_info = root->fs_info;
4423 int ret;
4424 struct btrfs_key key;
4425 u64 hole_start;
4426 u64 hole_size;
4427 struct extent_buffer *leaf;
4428 struct btrfs_root *log = root->log_root;
4429 const u64 ino = btrfs_ino(inode);
4430 const u64 i_size = i_size_read(&inode->vfs_inode);
4432 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4433 return 0;
4435 key.objectid = ino;
4436 key.type = BTRFS_EXTENT_DATA_KEY;
4437 key.offset = (u64)-1;
4439 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4440 ASSERT(ret != 0);
4441 if (ret < 0)
4442 return ret;
4444 ASSERT(path->slots[0] > 0);
4445 path->slots[0]--;
4446 leaf = path->nodes[0];
4447 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4449 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4450 /* inode does not have any extents */
4451 hole_start = 0;
4452 hole_size = i_size;
4453 } else {
4454 struct btrfs_file_extent_item *extent;
4455 u64 len;
4458 * If there's an extent beyond i_size, an explicit hole was
4459 * already inserted by copy_items().
4461 if (key.offset >= i_size)
4462 return 0;
4464 extent = btrfs_item_ptr(leaf, path->slots[0],
4465 struct btrfs_file_extent_item);
4467 if (btrfs_file_extent_type(leaf, extent) ==
4468 BTRFS_FILE_EXTENT_INLINE) {
4469 len = btrfs_file_extent_inline_len(leaf,
4470 path->slots[0],
4471 extent);
4472 ASSERT(len == i_size ||
4473 (len == fs_info->sectorsize &&
4474 btrfs_file_extent_compression(leaf, extent) !=
4475 BTRFS_COMPRESS_NONE));
4476 return 0;
4479 len = btrfs_file_extent_num_bytes(leaf, extent);
4480 /* Last extent goes beyond i_size, no need to log a hole. */
4481 if (key.offset + len > i_size)
4482 return 0;
4483 hole_start = key.offset + len;
4484 hole_size = i_size - hole_start;
4486 btrfs_release_path(path);
4488 /* Last extent ends at i_size. */
4489 if (hole_size == 0)
4490 return 0;
4492 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4493 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4494 hole_size, 0, hole_size, 0, 0, 0);
4495 return ret;
4499 * When we are logging a new inode X, check if it doesn't have a reference that
4500 * matches the reference from some other inode Y created in a past transaction
4501 * and that was renamed in the current transaction. If we don't do this, then at
4502 * log replay time we can lose inode Y (and all its files if it's a directory):
4504 * mkdir /mnt/x
4505 * echo "hello world" > /mnt/x/foobar
4506 * sync
4507 * mv /mnt/x /mnt/y
4508 * mkdir /mnt/x # or touch /mnt/x
4509 * xfs_io -c fsync /mnt/x
4510 * <power fail>
4511 * mount fs, trigger log replay
4513 * After the log replay procedure, we would lose the first directory and all its
4514 * files (file foobar).
4515 * For the case where inode Y is not a directory we simply end up losing it:
4517 * echo "123" > /mnt/foo
4518 * sync
4519 * mv /mnt/foo /mnt/bar
4520 * echo "abc" > /mnt/foo
4521 * xfs_io -c fsync /mnt/foo
4522 * <power fail>
4524 * We also need this for cases where a snapshot entry is replaced by some other
4525 * entry (file or directory) otherwise we end up with an unreplayable log due to
4526 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4527 * if it were a regular entry:
4529 * mkdir /mnt/x
4530 * btrfs subvolume snapshot /mnt /mnt/x/snap
4531 * btrfs subvolume delete /mnt/x/snap
4532 * rmdir /mnt/x
4533 * mkdir /mnt/x
4534 * fsync /mnt/x or fsync some new file inside it
4535 * <power fail>
4537 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4538 * the same transaction.
4540 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4541 const int slot,
4542 const struct btrfs_key *key,
4543 struct btrfs_inode *inode,
4544 u64 *other_ino)
4546 int ret;
4547 struct btrfs_path *search_path;
4548 char *name = NULL;
4549 u32 name_len = 0;
4550 u32 item_size = btrfs_item_size_nr(eb, slot);
4551 u32 cur_offset = 0;
4552 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4554 search_path = btrfs_alloc_path();
4555 if (!search_path)
4556 return -ENOMEM;
4557 search_path->search_commit_root = 1;
4558 search_path->skip_locking = 1;
4560 while (cur_offset < item_size) {
4561 u64 parent;
4562 u32 this_name_len;
4563 u32 this_len;
4564 unsigned long name_ptr;
4565 struct btrfs_dir_item *di;
4567 if (key->type == BTRFS_INODE_REF_KEY) {
4568 struct btrfs_inode_ref *iref;
4570 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4571 parent = key->offset;
4572 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4573 name_ptr = (unsigned long)(iref + 1);
4574 this_len = sizeof(*iref) + this_name_len;
4575 } else {
4576 struct btrfs_inode_extref *extref;
4578 extref = (struct btrfs_inode_extref *)(ptr +
4579 cur_offset);
4580 parent = btrfs_inode_extref_parent(eb, extref);
4581 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4582 name_ptr = (unsigned long)&extref->name;
4583 this_len = sizeof(*extref) + this_name_len;
4586 ret = btrfs_is_name_len_valid(eb, slot, name_ptr,
4587 this_name_len);
4588 if (!ret) {
4589 ret = -EIO;
4590 goto out;
4592 if (this_name_len > name_len) {
4593 char *new_name;
4595 new_name = krealloc(name, this_name_len, GFP_NOFS);
4596 if (!new_name) {
4597 ret = -ENOMEM;
4598 goto out;
4600 name_len = this_name_len;
4601 name = new_name;
4604 read_extent_buffer(eb, name, name_ptr, this_name_len);
4605 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4606 parent, name, this_name_len, 0);
4607 if (di && !IS_ERR(di)) {
4608 struct btrfs_key di_key;
4610 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4611 di, &di_key);
4612 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4613 ret = 1;
4614 *other_ino = di_key.objectid;
4615 } else {
4616 ret = -EAGAIN;
4618 goto out;
4619 } else if (IS_ERR(di)) {
4620 ret = PTR_ERR(di);
4621 goto out;
4623 btrfs_release_path(search_path);
4625 cur_offset += this_len;
4627 ret = 0;
4628 out:
4629 btrfs_free_path(search_path);
4630 kfree(name);
4631 return ret;
4634 /* log a single inode in the tree log.
4635 * At least one parent directory for this inode must exist in the tree
4636 * or be logged already.
4638 * Any items from this inode changed by the current transaction are copied
4639 * to the log tree. An extra reference is taken on any extents in this
4640 * file, allowing us to avoid a whole pile of corner cases around logging
4641 * blocks that have been removed from the tree.
4643 * See LOG_INODE_ALL and related defines for a description of what inode_only
4644 * does.
4646 * This handles both files and directories.
4648 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4649 struct btrfs_root *root, struct btrfs_inode *inode,
4650 int inode_only,
4651 const loff_t start,
4652 const loff_t end,
4653 struct btrfs_log_ctx *ctx)
4655 struct btrfs_fs_info *fs_info = root->fs_info;
4656 struct btrfs_path *path;
4657 struct btrfs_path *dst_path;
4658 struct btrfs_key min_key;
4659 struct btrfs_key max_key;
4660 struct btrfs_root *log = root->log_root;
4661 LIST_HEAD(logged_list);
4662 u64 last_extent = 0;
4663 int err = 0;
4664 int ret;
4665 int nritems;
4666 int ins_start_slot = 0;
4667 int ins_nr;
4668 bool fast_search = false;
4669 u64 ino = btrfs_ino(inode);
4670 struct extent_map_tree *em_tree = &inode->extent_tree;
4671 u64 logged_isize = 0;
4672 bool need_log_inode_item = true;
4674 path = btrfs_alloc_path();
4675 if (!path)
4676 return -ENOMEM;
4677 dst_path = btrfs_alloc_path();
4678 if (!dst_path) {
4679 btrfs_free_path(path);
4680 return -ENOMEM;
4683 min_key.objectid = ino;
4684 min_key.type = BTRFS_INODE_ITEM_KEY;
4685 min_key.offset = 0;
4687 max_key.objectid = ino;
4690 /* today the code can only do partial logging of directories */
4691 if (S_ISDIR(inode->vfs_inode.i_mode) ||
4692 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4693 &inode->runtime_flags) &&
4694 inode_only >= LOG_INODE_EXISTS))
4695 max_key.type = BTRFS_XATTR_ITEM_KEY;
4696 else
4697 max_key.type = (u8)-1;
4698 max_key.offset = (u64)-1;
4701 * Only run delayed items if we are a dir or a new file.
4702 * Otherwise commit the delayed inode only, which is needed in
4703 * order for the log replay code to mark inodes for link count
4704 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4706 if (S_ISDIR(inode->vfs_inode.i_mode) ||
4707 inode->generation > fs_info->last_trans_committed)
4708 ret = btrfs_commit_inode_delayed_items(trans, inode);
4709 else
4710 ret = btrfs_commit_inode_delayed_inode(inode);
4712 if (ret) {
4713 btrfs_free_path(path);
4714 btrfs_free_path(dst_path);
4715 return ret;
4718 if (inode_only == LOG_OTHER_INODE) {
4719 inode_only = LOG_INODE_EXISTS;
4720 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
4721 } else {
4722 mutex_lock(&inode->log_mutex);
4726 * a brute force approach to making sure we get the most uptodate
4727 * copies of everything.
4729 if (S_ISDIR(inode->vfs_inode.i_mode)) {
4730 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4732 if (inode_only == LOG_INODE_EXISTS)
4733 max_key_type = BTRFS_XATTR_ITEM_KEY;
4734 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4735 } else {
4736 if (inode_only == LOG_INODE_EXISTS) {
4738 * Make sure the new inode item we write to the log has
4739 * the same isize as the current one (if it exists).
4740 * This is necessary to prevent data loss after log
4741 * replay, and also to prevent doing a wrong expanding
4742 * truncate - for e.g. create file, write 4K into offset
4743 * 0, fsync, write 4K into offset 4096, add hard link,
4744 * fsync some other file (to sync log), power fail - if
4745 * we use the inode's current i_size, after log replay
4746 * we get a 8Kb file, with the last 4Kb extent as a hole
4747 * (zeroes), as if an expanding truncate happened,
4748 * instead of getting a file of 4Kb only.
4750 err = logged_inode_size(log, inode, path, &logged_isize);
4751 if (err)
4752 goto out_unlock;
4754 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4755 &inode->runtime_flags)) {
4756 if (inode_only == LOG_INODE_EXISTS) {
4757 max_key.type = BTRFS_XATTR_ITEM_KEY;
4758 ret = drop_objectid_items(trans, log, path, ino,
4759 max_key.type);
4760 } else {
4761 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4762 &inode->runtime_flags);
4763 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4764 &inode->runtime_flags);
4765 while(1) {
4766 ret = btrfs_truncate_inode_items(trans,
4767 log, &inode->vfs_inode, 0, 0);
4768 if (ret != -EAGAIN)
4769 break;
4772 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4773 &inode->runtime_flags) ||
4774 inode_only == LOG_INODE_EXISTS) {
4775 if (inode_only == LOG_INODE_ALL)
4776 fast_search = true;
4777 max_key.type = BTRFS_XATTR_ITEM_KEY;
4778 ret = drop_objectid_items(trans, log, path, ino,
4779 max_key.type);
4780 } else {
4781 if (inode_only == LOG_INODE_ALL)
4782 fast_search = true;
4783 goto log_extents;
4787 if (ret) {
4788 err = ret;
4789 goto out_unlock;
4792 while (1) {
4793 ins_nr = 0;
4794 ret = btrfs_search_forward(root, &min_key,
4795 path, trans->transid);
4796 if (ret < 0) {
4797 err = ret;
4798 goto out_unlock;
4800 if (ret != 0)
4801 break;
4802 again:
4803 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4804 if (min_key.objectid != ino)
4805 break;
4806 if (min_key.type > max_key.type)
4807 break;
4809 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4810 need_log_inode_item = false;
4812 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4813 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4814 inode->generation == trans->transid) {
4815 u64 other_ino = 0;
4817 ret = btrfs_check_ref_name_override(path->nodes[0],
4818 path->slots[0], &min_key, inode,
4819 &other_ino);
4820 if (ret < 0) {
4821 err = ret;
4822 goto out_unlock;
4823 } else if (ret > 0 && ctx &&
4824 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
4825 struct btrfs_key inode_key;
4826 struct inode *other_inode;
4828 if (ins_nr > 0) {
4829 ins_nr++;
4830 } else {
4831 ins_nr = 1;
4832 ins_start_slot = path->slots[0];
4834 ret = copy_items(trans, inode, dst_path, path,
4835 &last_extent, ins_start_slot,
4836 ins_nr, inode_only,
4837 logged_isize);
4838 if (ret < 0) {
4839 err = ret;
4840 goto out_unlock;
4842 ins_nr = 0;
4843 btrfs_release_path(path);
4844 inode_key.objectid = other_ino;
4845 inode_key.type = BTRFS_INODE_ITEM_KEY;
4846 inode_key.offset = 0;
4847 other_inode = btrfs_iget(fs_info->sb,
4848 &inode_key, root,
4849 NULL);
4851 * If the other inode that had a conflicting dir
4852 * entry was deleted in the current transaction,
4853 * we don't need to do more work nor fallback to
4854 * a transaction commit.
4856 if (IS_ERR(other_inode) &&
4857 PTR_ERR(other_inode) == -ENOENT) {
4858 goto next_key;
4859 } else if (IS_ERR(other_inode)) {
4860 err = PTR_ERR(other_inode);
4861 goto out_unlock;
4864 * We are safe logging the other inode without
4865 * acquiring its i_mutex as long as we log with
4866 * the LOG_INODE_EXISTS mode. We're safe against
4867 * concurrent renames of the other inode as well
4868 * because during a rename we pin the log and
4869 * update the log with the new name before we
4870 * unpin it.
4872 err = btrfs_log_inode(trans, root,
4873 BTRFS_I(other_inode),
4874 LOG_OTHER_INODE, 0, LLONG_MAX,
4875 ctx);
4876 iput(other_inode);
4877 if (err)
4878 goto out_unlock;
4879 else
4880 goto next_key;
4884 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4885 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4886 if (ins_nr == 0)
4887 goto next_slot;
4888 ret = copy_items(trans, inode, dst_path, path,
4889 &last_extent, ins_start_slot,
4890 ins_nr, inode_only, logged_isize);
4891 if (ret < 0) {
4892 err = ret;
4893 goto out_unlock;
4895 ins_nr = 0;
4896 if (ret) {
4897 btrfs_release_path(path);
4898 continue;
4900 goto next_slot;
4903 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4904 ins_nr++;
4905 goto next_slot;
4906 } else if (!ins_nr) {
4907 ins_start_slot = path->slots[0];
4908 ins_nr = 1;
4909 goto next_slot;
4912 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4913 ins_start_slot, ins_nr, inode_only,
4914 logged_isize);
4915 if (ret < 0) {
4916 err = ret;
4917 goto out_unlock;
4919 if (ret) {
4920 ins_nr = 0;
4921 btrfs_release_path(path);
4922 continue;
4924 ins_nr = 1;
4925 ins_start_slot = path->slots[0];
4926 next_slot:
4928 nritems = btrfs_header_nritems(path->nodes[0]);
4929 path->slots[0]++;
4930 if (path->slots[0] < nritems) {
4931 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4932 path->slots[0]);
4933 goto again;
4935 if (ins_nr) {
4936 ret = copy_items(trans, inode, dst_path, path,
4937 &last_extent, ins_start_slot,
4938 ins_nr, inode_only, logged_isize);
4939 if (ret < 0) {
4940 err = ret;
4941 goto out_unlock;
4943 ret = 0;
4944 ins_nr = 0;
4946 btrfs_release_path(path);
4947 next_key:
4948 if (min_key.offset < (u64)-1) {
4949 min_key.offset++;
4950 } else if (min_key.type < max_key.type) {
4951 min_key.type++;
4952 min_key.offset = 0;
4953 } else {
4954 break;
4957 if (ins_nr) {
4958 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4959 ins_start_slot, ins_nr, inode_only,
4960 logged_isize);
4961 if (ret < 0) {
4962 err = ret;
4963 goto out_unlock;
4965 ret = 0;
4966 ins_nr = 0;
4969 btrfs_release_path(path);
4970 btrfs_release_path(dst_path);
4971 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4972 if (err)
4973 goto out_unlock;
4974 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4975 btrfs_release_path(path);
4976 btrfs_release_path(dst_path);
4977 err = btrfs_log_trailing_hole(trans, root, inode, path);
4978 if (err)
4979 goto out_unlock;
4981 log_extents:
4982 btrfs_release_path(path);
4983 btrfs_release_path(dst_path);
4984 if (need_log_inode_item) {
4985 err = log_inode_item(trans, log, dst_path, inode);
4986 if (err)
4987 goto out_unlock;
4989 if (fast_search) {
4990 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4991 &logged_list, ctx, start, end);
4992 if (ret) {
4993 err = ret;
4994 goto out_unlock;
4996 } else if (inode_only == LOG_INODE_ALL) {
4997 struct extent_map *em, *n;
4999 write_lock(&em_tree->lock);
5001 * We can't just remove every em if we're called for a ranged
5002 * fsync - that is, one that doesn't cover the whole possible
5003 * file range (0 to LLONG_MAX). This is because we can have
5004 * em's that fall outside the range we're logging and therefore
5005 * their ordered operations haven't completed yet
5006 * (btrfs_finish_ordered_io() not invoked yet). This means we
5007 * didn't get their respective file extent item in the fs/subvol
5008 * tree yet, and need to let the next fast fsync (one which
5009 * consults the list of modified extent maps) find the em so
5010 * that it logs a matching file extent item and waits for the
5011 * respective ordered operation to complete (if it's still
5012 * running).
5014 * Removing every em outside the range we're logging would make
5015 * the next fast fsync not log their matching file extent items,
5016 * therefore making us lose data after a log replay.
5018 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5019 list) {
5020 const u64 mod_end = em->mod_start + em->mod_len - 1;
5022 if (em->mod_start >= start && mod_end <= end)
5023 list_del_init(&em->list);
5025 write_unlock(&em_tree->lock);
5028 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5029 ret = log_directory_changes(trans, root, inode, path, dst_path,
5030 ctx);
5031 if (ret) {
5032 err = ret;
5033 goto out_unlock;
5037 spin_lock(&inode->lock);
5038 inode->logged_trans = trans->transid;
5039 inode->last_log_commit = inode->last_sub_trans;
5040 spin_unlock(&inode->lock);
5041 out_unlock:
5042 if (unlikely(err))
5043 btrfs_put_logged_extents(&logged_list);
5044 else
5045 btrfs_submit_logged_extents(&logged_list, log);
5046 mutex_unlock(&inode->log_mutex);
5048 btrfs_free_path(path);
5049 btrfs_free_path(dst_path);
5050 return err;
5054 * Check if we must fallback to a transaction commit when logging an inode.
5055 * This must be called after logging the inode and is used only in the context
5056 * when fsyncing an inode requires the need to log some other inode - in which
5057 * case we can't lock the i_mutex of each other inode we need to log as that
5058 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5059 * log inodes up or down in the hierarchy) or rename operations for example. So
5060 * we take the log_mutex of the inode after we have logged it and then check for
5061 * its last_unlink_trans value - this is safe because any task setting
5062 * last_unlink_trans must take the log_mutex and it must do this before it does
5063 * the actual unlink operation, so if we do this check before a concurrent task
5064 * sets last_unlink_trans it means we've logged a consistent version/state of
5065 * all the inode items, otherwise we are not sure and must do a transaction
5066 * commit (the concurrent task might have only updated last_unlink_trans before
5067 * we logged the inode or it might have also done the unlink).
5069 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5070 struct btrfs_inode *inode)
5072 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5073 bool ret = false;
5075 mutex_lock(&inode->log_mutex);
5076 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5078 * Make sure any commits to the log are forced to be full
5079 * commits.
5081 btrfs_set_log_full_commit(fs_info, trans);
5082 ret = true;
5084 mutex_unlock(&inode->log_mutex);
5086 return ret;
5090 * follow the dentry parent pointers up the chain and see if any
5091 * of the directories in it require a full commit before they can
5092 * be logged. Returns zero if nothing special needs to be done or 1 if
5093 * a full commit is required.
5095 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5096 struct btrfs_inode *inode,
5097 struct dentry *parent,
5098 struct super_block *sb,
5099 u64 last_committed)
5101 int ret = 0;
5102 struct dentry *old_parent = NULL;
5103 struct btrfs_inode *orig_inode = inode;
5106 * for regular files, if its inode is already on disk, we don't
5107 * have to worry about the parents at all. This is because
5108 * we can use the last_unlink_trans field to record renames
5109 * and other fun in this file.
5111 if (S_ISREG(inode->vfs_inode.i_mode) &&
5112 inode->generation <= last_committed &&
5113 inode->last_unlink_trans <= last_committed)
5114 goto out;
5116 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5117 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5118 goto out;
5119 inode = BTRFS_I(d_inode(parent));
5122 while (1) {
5124 * If we are logging a directory then we start with our inode,
5125 * not our parent's inode, so we need to skip setting the
5126 * logged_trans so that further down in the log code we don't
5127 * think this inode has already been logged.
5129 if (inode != orig_inode)
5130 inode->logged_trans = trans->transid;
5131 smp_mb();
5133 if (btrfs_must_commit_transaction(trans, inode)) {
5134 ret = 1;
5135 break;
5138 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5139 break;
5141 if (IS_ROOT(parent)) {
5142 inode = BTRFS_I(d_inode(parent));
5143 if (btrfs_must_commit_transaction(trans, inode))
5144 ret = 1;
5145 break;
5148 parent = dget_parent(parent);
5149 dput(old_parent);
5150 old_parent = parent;
5151 inode = BTRFS_I(d_inode(parent));
5154 dput(old_parent);
5155 out:
5156 return ret;
5159 struct btrfs_dir_list {
5160 u64 ino;
5161 struct list_head list;
5165 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5166 * details about the why it is needed.
5167 * This is a recursive operation - if an existing dentry corresponds to a
5168 * directory, that directory's new entries are logged too (same behaviour as
5169 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5170 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5171 * complains about the following circular lock dependency / possible deadlock:
5173 * CPU0 CPU1
5174 * ---- ----
5175 * lock(&type->i_mutex_dir_key#3/2);
5176 * lock(sb_internal#2);
5177 * lock(&type->i_mutex_dir_key#3/2);
5178 * lock(&sb->s_type->i_mutex_key#14);
5180 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5181 * sb_start_intwrite() in btrfs_start_transaction().
5182 * Not locking i_mutex of the inodes is still safe because:
5184 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5185 * that while logging the inode new references (names) are added or removed
5186 * from the inode, leaving the logged inode item with a link count that does
5187 * not match the number of logged inode reference items. This is fine because
5188 * at log replay time we compute the real number of links and correct the
5189 * link count in the inode item (see replay_one_buffer() and
5190 * link_to_fixup_dir());
5192 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5193 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5194 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5195 * has a size that doesn't match the sum of the lengths of all the logged
5196 * names. This does not result in a problem because if a dir_item key is
5197 * logged but its matching dir_index key is not logged, at log replay time we
5198 * don't use it to replay the respective name (see replay_one_name()). On the
5199 * other hand if only the dir_index key ends up being logged, the respective
5200 * name is added to the fs/subvol tree with both the dir_item and dir_index
5201 * keys created (see replay_one_name()).
5202 * The directory's inode item with a wrong i_size is not a problem as well,
5203 * since we don't use it at log replay time to set the i_size in the inode
5204 * item of the fs/subvol tree (see overwrite_item()).
5206 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5207 struct btrfs_root *root,
5208 struct btrfs_inode *start_inode,
5209 struct btrfs_log_ctx *ctx)
5211 struct btrfs_fs_info *fs_info = root->fs_info;
5212 struct btrfs_root *log = root->log_root;
5213 struct btrfs_path *path;
5214 LIST_HEAD(dir_list);
5215 struct btrfs_dir_list *dir_elem;
5216 int ret = 0;
5218 path = btrfs_alloc_path();
5219 if (!path)
5220 return -ENOMEM;
5222 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5223 if (!dir_elem) {
5224 btrfs_free_path(path);
5225 return -ENOMEM;
5227 dir_elem->ino = btrfs_ino(start_inode);
5228 list_add_tail(&dir_elem->list, &dir_list);
5230 while (!list_empty(&dir_list)) {
5231 struct extent_buffer *leaf;
5232 struct btrfs_key min_key;
5233 int nritems;
5234 int i;
5236 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5237 list);
5238 if (ret)
5239 goto next_dir_inode;
5241 min_key.objectid = dir_elem->ino;
5242 min_key.type = BTRFS_DIR_ITEM_KEY;
5243 min_key.offset = 0;
5244 again:
5245 btrfs_release_path(path);
5246 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5247 if (ret < 0) {
5248 goto next_dir_inode;
5249 } else if (ret > 0) {
5250 ret = 0;
5251 goto next_dir_inode;
5254 process_leaf:
5255 leaf = path->nodes[0];
5256 nritems = btrfs_header_nritems(leaf);
5257 for (i = path->slots[0]; i < nritems; i++) {
5258 struct btrfs_dir_item *di;
5259 struct btrfs_key di_key;
5260 struct inode *di_inode;
5261 struct btrfs_dir_list *new_dir_elem;
5262 int log_mode = LOG_INODE_EXISTS;
5263 int type;
5265 btrfs_item_key_to_cpu(leaf, &min_key, i);
5266 if (min_key.objectid != dir_elem->ino ||
5267 min_key.type != BTRFS_DIR_ITEM_KEY)
5268 goto next_dir_inode;
5270 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5271 type = btrfs_dir_type(leaf, di);
5272 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5273 type != BTRFS_FT_DIR)
5274 continue;
5275 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5276 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5277 continue;
5279 btrfs_release_path(path);
5280 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5281 if (IS_ERR(di_inode)) {
5282 ret = PTR_ERR(di_inode);
5283 goto next_dir_inode;
5286 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5287 iput(di_inode);
5288 break;
5291 ctx->log_new_dentries = false;
5292 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5293 log_mode = LOG_INODE_ALL;
5294 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5295 log_mode, 0, LLONG_MAX, ctx);
5296 if (!ret &&
5297 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5298 ret = 1;
5299 iput(di_inode);
5300 if (ret)
5301 goto next_dir_inode;
5302 if (ctx->log_new_dentries) {
5303 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5304 GFP_NOFS);
5305 if (!new_dir_elem) {
5306 ret = -ENOMEM;
5307 goto next_dir_inode;
5309 new_dir_elem->ino = di_key.objectid;
5310 list_add_tail(&new_dir_elem->list, &dir_list);
5312 break;
5314 if (i == nritems) {
5315 ret = btrfs_next_leaf(log, path);
5316 if (ret < 0) {
5317 goto next_dir_inode;
5318 } else if (ret > 0) {
5319 ret = 0;
5320 goto next_dir_inode;
5322 goto process_leaf;
5324 if (min_key.offset < (u64)-1) {
5325 min_key.offset++;
5326 goto again;
5328 next_dir_inode:
5329 list_del(&dir_elem->list);
5330 kfree(dir_elem);
5333 btrfs_free_path(path);
5334 return ret;
5337 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5338 struct btrfs_inode *inode,
5339 struct btrfs_log_ctx *ctx)
5341 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
5342 int ret;
5343 struct btrfs_path *path;
5344 struct btrfs_key key;
5345 struct btrfs_root *root = inode->root;
5346 const u64 ino = btrfs_ino(inode);
5348 path = btrfs_alloc_path();
5349 if (!path)
5350 return -ENOMEM;
5351 path->skip_locking = 1;
5352 path->search_commit_root = 1;
5354 key.objectid = ino;
5355 key.type = BTRFS_INODE_REF_KEY;
5356 key.offset = 0;
5357 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5358 if (ret < 0)
5359 goto out;
5361 while (true) {
5362 struct extent_buffer *leaf = path->nodes[0];
5363 int slot = path->slots[0];
5364 u32 cur_offset = 0;
5365 u32 item_size;
5366 unsigned long ptr;
5368 if (slot >= btrfs_header_nritems(leaf)) {
5369 ret = btrfs_next_leaf(root, path);
5370 if (ret < 0)
5371 goto out;
5372 else if (ret > 0)
5373 break;
5374 continue;
5377 btrfs_item_key_to_cpu(leaf, &key, slot);
5378 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5379 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5380 break;
5382 item_size = btrfs_item_size_nr(leaf, slot);
5383 ptr = btrfs_item_ptr_offset(leaf, slot);
5384 while (cur_offset < item_size) {
5385 struct btrfs_key inode_key;
5386 struct inode *dir_inode;
5388 inode_key.type = BTRFS_INODE_ITEM_KEY;
5389 inode_key.offset = 0;
5391 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5392 struct btrfs_inode_extref *extref;
5394 extref = (struct btrfs_inode_extref *)
5395 (ptr + cur_offset);
5396 inode_key.objectid = btrfs_inode_extref_parent(
5397 leaf, extref);
5398 cur_offset += sizeof(*extref);
5399 cur_offset += btrfs_inode_extref_name_len(leaf,
5400 extref);
5401 } else {
5402 inode_key.objectid = key.offset;
5403 cur_offset = item_size;
5406 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5407 root, NULL);
5408 /* If parent inode was deleted, skip it. */
5409 if (IS_ERR(dir_inode))
5410 continue;
5412 if (ctx)
5413 ctx->log_new_dentries = false;
5414 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5415 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5416 if (!ret &&
5417 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5418 ret = 1;
5419 if (!ret && ctx && ctx->log_new_dentries)
5420 ret = log_new_dir_dentries(trans, root,
5421 BTRFS_I(dir_inode), ctx);
5422 iput(dir_inode);
5423 if (ret)
5424 goto out;
5426 path->slots[0]++;
5428 ret = 0;
5429 out:
5430 btrfs_free_path(path);
5431 return ret;
5435 * helper function around btrfs_log_inode to make sure newly created
5436 * parent directories also end up in the log. A minimal inode and backref
5437 * only logging is done of any parent directories that are older than
5438 * the last committed transaction
5440 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5441 struct btrfs_root *root,
5442 struct btrfs_inode *inode,
5443 struct dentry *parent,
5444 const loff_t start,
5445 const loff_t end,
5446 int exists_only,
5447 struct btrfs_log_ctx *ctx)
5449 struct btrfs_fs_info *fs_info = root->fs_info;
5450 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
5451 struct super_block *sb;
5452 struct dentry *old_parent = NULL;
5453 int ret = 0;
5454 u64 last_committed = fs_info->last_trans_committed;
5455 bool log_dentries = false;
5456 struct btrfs_inode *orig_inode = inode;
5458 sb = inode->vfs_inode.i_sb;
5460 if (btrfs_test_opt(fs_info, NOTREELOG)) {
5461 ret = 1;
5462 goto end_no_trans;
5466 * The prev transaction commit doesn't complete, we need do
5467 * full commit by ourselves.
5469 if (fs_info->last_trans_log_full_commit >
5470 fs_info->last_trans_committed) {
5471 ret = 1;
5472 goto end_no_trans;
5475 if (root != inode->root || btrfs_root_refs(&root->root_item) == 0) {
5476 ret = 1;
5477 goto end_no_trans;
5480 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
5481 last_committed);
5482 if (ret)
5483 goto end_no_trans;
5485 if (btrfs_inode_in_log(inode, trans->transid)) {
5486 ret = BTRFS_NO_LOG_SYNC;
5487 goto end_no_trans;
5490 ret = start_log_trans(trans, root, ctx);
5491 if (ret)
5492 goto end_no_trans;
5494 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5495 if (ret)
5496 goto end_trans;
5499 * for regular files, if its inode is already on disk, we don't
5500 * have to worry about the parents at all. This is because
5501 * we can use the last_unlink_trans field to record renames
5502 * and other fun in this file.
5504 if (S_ISREG(inode->vfs_inode.i_mode) &&
5505 inode->generation <= last_committed &&
5506 inode->last_unlink_trans <= last_committed) {
5507 ret = 0;
5508 goto end_trans;
5511 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
5512 log_dentries = true;
5515 * On unlink we must make sure all our current and old parent directory
5516 * inodes are fully logged. This is to prevent leaving dangling
5517 * directory index entries in directories that were our parents but are
5518 * not anymore. Not doing this results in old parent directory being
5519 * impossible to delete after log replay (rmdir will always fail with
5520 * error -ENOTEMPTY).
5522 * Example 1:
5524 * mkdir testdir
5525 * touch testdir/foo
5526 * ln testdir/foo testdir/bar
5527 * sync
5528 * unlink testdir/bar
5529 * xfs_io -c fsync testdir/foo
5530 * <power failure>
5531 * mount fs, triggers log replay
5533 * If we don't log the parent directory (testdir), after log replay the
5534 * directory still has an entry pointing to the file inode using the bar
5535 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5536 * the file inode has a link count of 1.
5538 * Example 2:
5540 * mkdir testdir
5541 * touch foo
5542 * ln foo testdir/foo2
5543 * ln foo testdir/foo3
5544 * sync
5545 * unlink testdir/foo3
5546 * xfs_io -c fsync foo
5547 * <power failure>
5548 * mount fs, triggers log replay
5550 * Similar as the first example, after log replay the parent directory
5551 * testdir still has an entry pointing to the inode file with name foo3
5552 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5553 * and has a link count of 2.
5555 if (inode->last_unlink_trans > last_committed) {
5556 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5557 if (ret)
5558 goto end_trans;
5561 while (1) {
5562 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5563 break;
5565 inode = BTRFS_I(d_inode(parent));
5566 if (root != inode->root)
5567 break;
5569 if (inode->generation > last_committed) {
5570 ret = btrfs_log_inode(trans, root, inode,
5571 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5572 if (ret)
5573 goto end_trans;
5575 if (IS_ROOT(parent))
5576 break;
5578 parent = dget_parent(parent);
5579 dput(old_parent);
5580 old_parent = parent;
5582 if (log_dentries)
5583 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5584 else
5585 ret = 0;
5586 end_trans:
5587 dput(old_parent);
5588 if (ret < 0) {
5589 btrfs_set_log_full_commit(fs_info, trans);
5590 ret = 1;
5593 if (ret)
5594 btrfs_remove_log_ctx(root, ctx);
5595 btrfs_end_log_trans(root);
5596 end_no_trans:
5597 return ret;
5601 * it is not safe to log dentry if the chunk root has added new
5602 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5603 * If this returns 1, you must commit the transaction to safely get your
5604 * data on disk.
5606 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5607 struct btrfs_root *root, struct dentry *dentry,
5608 const loff_t start,
5609 const loff_t end,
5610 struct btrfs_log_ctx *ctx)
5612 struct dentry *parent = dget_parent(dentry);
5613 int ret;
5615 ret = btrfs_log_inode_parent(trans, root, BTRFS_I(d_inode(dentry)),
5616 parent, start, end, 0, ctx);
5617 dput(parent);
5619 return ret;
5623 * should be called during mount to recover any replay any log trees
5624 * from the FS
5626 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5628 int ret;
5629 struct btrfs_path *path;
5630 struct btrfs_trans_handle *trans;
5631 struct btrfs_key key;
5632 struct btrfs_key found_key;
5633 struct btrfs_key tmp_key;
5634 struct btrfs_root *log;
5635 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5636 struct walk_control wc = {
5637 .process_func = process_one_buffer,
5638 .stage = 0,
5641 path = btrfs_alloc_path();
5642 if (!path)
5643 return -ENOMEM;
5645 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5647 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5648 if (IS_ERR(trans)) {
5649 ret = PTR_ERR(trans);
5650 goto error;
5653 wc.trans = trans;
5654 wc.pin = 1;
5656 ret = walk_log_tree(trans, log_root_tree, &wc);
5657 if (ret) {
5658 btrfs_handle_fs_error(fs_info, ret,
5659 "Failed to pin buffers while recovering log root tree.");
5660 goto error;
5663 again:
5664 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5665 key.offset = (u64)-1;
5666 key.type = BTRFS_ROOT_ITEM_KEY;
5668 while (1) {
5669 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5671 if (ret < 0) {
5672 btrfs_handle_fs_error(fs_info, ret,
5673 "Couldn't find tree log root.");
5674 goto error;
5676 if (ret > 0) {
5677 if (path->slots[0] == 0)
5678 break;
5679 path->slots[0]--;
5681 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5682 path->slots[0]);
5683 btrfs_release_path(path);
5684 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5685 break;
5687 log = btrfs_read_fs_root(log_root_tree, &found_key);
5688 if (IS_ERR(log)) {
5689 ret = PTR_ERR(log);
5690 btrfs_handle_fs_error(fs_info, ret,
5691 "Couldn't read tree log root.");
5692 goto error;
5695 tmp_key.objectid = found_key.offset;
5696 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5697 tmp_key.offset = (u64)-1;
5699 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5700 if (IS_ERR(wc.replay_dest)) {
5701 ret = PTR_ERR(wc.replay_dest);
5702 free_extent_buffer(log->node);
5703 free_extent_buffer(log->commit_root);
5704 kfree(log);
5705 btrfs_handle_fs_error(fs_info, ret,
5706 "Couldn't read target root for tree log recovery.");
5707 goto error;
5710 wc.replay_dest->log_root = log;
5711 btrfs_record_root_in_trans(trans, wc.replay_dest);
5712 ret = walk_log_tree(trans, log, &wc);
5714 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5715 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5716 path);
5719 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5720 struct btrfs_root *root = wc.replay_dest;
5722 btrfs_release_path(path);
5725 * We have just replayed everything, and the highest
5726 * objectid of fs roots probably has changed in case
5727 * some inode_item's got replayed.
5729 * root->objectid_mutex is not acquired as log replay
5730 * could only happen during mount.
5732 ret = btrfs_find_highest_objectid(root,
5733 &root->highest_objectid);
5736 key.offset = found_key.offset - 1;
5737 wc.replay_dest->log_root = NULL;
5738 free_extent_buffer(log->node);
5739 free_extent_buffer(log->commit_root);
5740 kfree(log);
5742 if (ret)
5743 goto error;
5745 if (found_key.offset == 0)
5746 break;
5748 btrfs_release_path(path);
5750 /* step one is to pin it all, step two is to replay just inodes */
5751 if (wc.pin) {
5752 wc.pin = 0;
5753 wc.process_func = replay_one_buffer;
5754 wc.stage = LOG_WALK_REPLAY_INODES;
5755 goto again;
5757 /* step three is to replay everything */
5758 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5759 wc.stage++;
5760 goto again;
5763 btrfs_free_path(path);
5765 /* step 4: commit the transaction, which also unpins the blocks */
5766 ret = btrfs_commit_transaction(trans);
5767 if (ret)
5768 return ret;
5770 free_extent_buffer(log_root_tree->node);
5771 log_root_tree->log_root = NULL;
5772 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5773 kfree(log_root_tree);
5775 return 0;
5776 error:
5777 if (wc.trans)
5778 btrfs_end_transaction(wc.trans);
5779 btrfs_free_path(path);
5780 return ret;
5784 * there are some corner cases where we want to force a full
5785 * commit instead of allowing a directory to be logged.
5787 * They revolve around files there were unlinked from the directory, and
5788 * this function updates the parent directory so that a full commit is
5789 * properly done if it is fsync'd later after the unlinks are done.
5791 * Must be called before the unlink operations (updates to the subvolume tree,
5792 * inodes, etc) are done.
5794 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5795 struct btrfs_inode *dir, struct btrfs_inode *inode,
5796 int for_rename)
5799 * when we're logging a file, if it hasn't been renamed
5800 * or unlinked, and its inode is fully committed on disk,
5801 * we don't have to worry about walking up the directory chain
5802 * to log its parents.
5804 * So, we use the last_unlink_trans field to put this transid
5805 * into the file. When the file is logged we check it and
5806 * don't log the parents if the file is fully on disk.
5808 mutex_lock(&inode->log_mutex);
5809 inode->last_unlink_trans = trans->transid;
5810 mutex_unlock(&inode->log_mutex);
5813 * if this directory was already logged any new
5814 * names for this file/dir will get recorded
5816 smp_mb();
5817 if (dir->logged_trans == trans->transid)
5818 return;
5821 * if the inode we're about to unlink was logged,
5822 * the log will be properly updated for any new names
5824 if (inode->logged_trans == trans->transid)
5825 return;
5828 * when renaming files across directories, if the directory
5829 * there we're unlinking from gets fsync'd later on, there's
5830 * no way to find the destination directory later and fsync it
5831 * properly. So, we have to be conservative and force commits
5832 * so the new name gets discovered.
5834 if (for_rename)
5835 goto record;
5837 /* we can safely do the unlink without any special recording */
5838 return;
5840 record:
5841 mutex_lock(&dir->log_mutex);
5842 dir->last_unlink_trans = trans->transid;
5843 mutex_unlock(&dir->log_mutex);
5847 * Make sure that if someone attempts to fsync the parent directory of a deleted
5848 * snapshot, it ends up triggering a transaction commit. This is to guarantee
5849 * that after replaying the log tree of the parent directory's root we will not
5850 * see the snapshot anymore and at log replay time we will not see any log tree
5851 * corresponding to the deleted snapshot's root, which could lead to replaying
5852 * it after replaying the log tree of the parent directory (which would replay
5853 * the snapshot delete operation).
5855 * Must be called before the actual snapshot destroy operation (updates to the
5856 * parent root and tree of tree roots trees, etc) are done.
5858 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
5859 struct btrfs_inode *dir)
5861 mutex_lock(&dir->log_mutex);
5862 dir->last_unlink_trans = trans->transid;
5863 mutex_unlock(&dir->log_mutex);
5867 * Call this after adding a new name for a file and it will properly
5868 * update the log to reflect the new name.
5870 * It will return zero if all goes well, and it will return 1 if a
5871 * full transaction commit is required.
5873 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5874 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
5875 struct dentry *parent)
5877 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
5878 struct btrfs_root *root = inode->root;
5881 * this will force the logging code to walk the dentry chain
5882 * up for the file
5884 if (S_ISREG(inode->vfs_inode.i_mode))
5885 inode->last_unlink_trans = trans->transid;
5888 * if this inode hasn't been logged and directory we're renaming it
5889 * from hasn't been logged, we don't need to log it
5891 if (inode->logged_trans <= fs_info->last_trans_committed &&
5892 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
5893 return 0;
5895 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
5896 LLONG_MAX, 1, NULL);