xen: Prevent buffer overflow in privcmd ioctl
[linux/fpc-iii.git] / fs / btrfs / tree-log.c
bloba36bb75383dc3067e6b4138f1583b75fbae7cbf1
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
23 #include "tree-log.h"
24 #include "disk-io.h"
25 #include "locking.h"
26 #include "print-tree.h"
27 #include "backref.h"
28 #include "hash.h"
29 #include "compression.h"
30 #include "qgroup.h"
31 #include "inode-map.h"
33 /* magic values for the inode_only field in btrfs_log_inode:
35 * LOG_INODE_ALL means to log everything
36 * LOG_INODE_EXISTS means to log just enough to recreate the inode
37 * during log replay
39 #define LOG_INODE_ALL 0
40 #define LOG_INODE_EXISTS 1
41 #define LOG_OTHER_INODE 2
44 * directory trouble cases
46 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
47 * log, we must force a full commit before doing an fsync of the directory
48 * where the unlink was done.
49 * ---> record transid of last unlink/rename per directory
51 * mkdir foo/some_dir
52 * normal commit
53 * rename foo/some_dir foo2/some_dir
54 * mkdir foo/some_dir
55 * fsync foo/some_dir/some_file
57 * The fsync above will unlink the original some_dir without recording
58 * it in its new location (foo2). After a crash, some_dir will be gone
59 * unless the fsync of some_file forces a full commit
61 * 2) we must log any new names for any file or dir that is in the fsync
62 * log. ---> check inode while renaming/linking.
64 * 2a) we must log any new names for any file or dir during rename
65 * when the directory they are being removed from was logged.
66 * ---> check inode and old parent dir during rename
68 * 2a is actually the more important variant. With the extra logging
69 * a crash might unlink the old name without recreating the new one
71 * 3) after a crash, we must go through any directories with a link count
72 * of zero and redo the rm -rf
74 * mkdir f1/foo
75 * normal commit
76 * rm -rf f1/foo
77 * fsync(f1)
79 * The directory f1 was fully removed from the FS, but fsync was never
80 * called on f1, only its parent dir. After a crash the rm -rf must
81 * be replayed. This must be able to recurse down the entire
82 * directory tree. The inode link count fixup code takes care of the
83 * ugly details.
87 * stages for the tree walking. The first
88 * stage (0) is to only pin down the blocks we find
89 * the second stage (1) is to make sure that all the inodes
90 * we find in the log are created in the subvolume.
92 * The last stage is to deal with directories and links and extents
93 * and all the other fun semantics
95 #define LOG_WALK_PIN_ONLY 0
96 #define LOG_WALK_REPLAY_INODES 1
97 #define LOG_WALK_REPLAY_DIR_INDEX 2
98 #define LOG_WALK_REPLAY_ALL 3
100 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root, struct inode *inode,
102 int inode_only,
103 const loff_t start,
104 const loff_t end,
105 struct btrfs_log_ctx *ctx);
106 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root,
108 struct btrfs_path *path, u64 objectid);
109 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
110 struct btrfs_root *root,
111 struct btrfs_root *log,
112 struct btrfs_path *path,
113 u64 dirid, int del_all);
116 * tree logging is a special write ahead log used to make sure that
117 * fsyncs and O_SYNCs can happen without doing full tree commits.
119 * Full tree commits are expensive because they require commonly
120 * modified blocks to be recowed, creating many dirty pages in the
121 * extent tree an 4x-6x higher write load than ext3.
123 * Instead of doing a tree commit on every fsync, we use the
124 * key ranges and transaction ids to find items for a given file or directory
125 * that have changed in this transaction. Those items are copied into
126 * a special tree (one per subvolume root), that tree is written to disk
127 * and then the fsync is considered complete.
129 * After a crash, items are copied out of the log-tree back into the
130 * subvolume tree. Any file data extents found are recorded in the extent
131 * allocation tree, and the log-tree freed.
133 * The log tree is read three times, once to pin down all the extents it is
134 * using in ram and once, once to create all the inodes logged in the tree
135 * and once to do all the other items.
139 * start a sub transaction and setup the log tree
140 * this increments the log tree writer count to make the people
141 * syncing the tree wait for us to finish
143 static int start_log_trans(struct btrfs_trans_handle *trans,
144 struct btrfs_root *root,
145 struct btrfs_log_ctx *ctx)
147 int ret = 0;
149 mutex_lock(&root->log_mutex);
151 if (root->log_root) {
152 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
153 ret = -EAGAIN;
154 goto out;
157 if (!root->log_start_pid) {
158 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
159 root->log_start_pid = current->pid;
160 } else if (root->log_start_pid != current->pid) {
161 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
163 } else {
164 mutex_lock(&root->fs_info->tree_log_mutex);
165 if (!root->fs_info->log_root_tree)
166 ret = btrfs_init_log_root_tree(trans, root->fs_info);
167 mutex_unlock(&root->fs_info->tree_log_mutex);
168 if (ret)
169 goto out;
171 ret = btrfs_add_log_tree(trans, root);
172 if (ret)
173 goto out;
175 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
176 root->log_start_pid = current->pid;
179 atomic_inc(&root->log_batch);
180 atomic_inc(&root->log_writers);
181 if (ctx) {
182 int index = root->log_transid % 2;
183 list_add_tail(&ctx->list, &root->log_ctxs[index]);
184 ctx->log_transid = root->log_transid;
187 out:
188 mutex_unlock(&root->log_mutex);
189 return ret;
193 * returns 0 if there was a log transaction running and we were able
194 * to join, or returns -ENOENT if there were not transactions
195 * in progress
197 static int join_running_log_trans(struct btrfs_root *root)
199 int ret = -ENOENT;
201 smp_mb();
202 if (!root->log_root)
203 return -ENOENT;
205 mutex_lock(&root->log_mutex);
206 if (root->log_root) {
207 ret = 0;
208 atomic_inc(&root->log_writers);
210 mutex_unlock(&root->log_mutex);
211 return ret;
215 * This either makes the current running log transaction wait
216 * until you call btrfs_end_log_trans() or it makes any future
217 * log transactions wait until you call btrfs_end_log_trans()
219 int btrfs_pin_log_trans(struct btrfs_root *root)
221 int ret = -ENOENT;
223 mutex_lock(&root->log_mutex);
224 atomic_inc(&root->log_writers);
225 mutex_unlock(&root->log_mutex);
226 return ret;
230 * indicate we're done making changes to the log tree
231 * and wake up anyone waiting to do a sync
233 void btrfs_end_log_trans(struct btrfs_root *root)
235 if (atomic_dec_and_test(&root->log_writers)) {
237 * Implicit memory barrier after atomic_dec_and_test
239 if (waitqueue_active(&root->log_writer_wait))
240 wake_up(&root->log_writer_wait);
246 * the walk control struct is used to pass state down the chain when
247 * processing the log tree. The stage field tells us which part
248 * of the log tree processing we are currently doing. The others
249 * are state fields used for that specific part
251 struct walk_control {
252 /* should we free the extent on disk when done? This is used
253 * at transaction commit time while freeing a log tree
255 int free;
257 /* should we write out the extent buffer? This is used
258 * while flushing the log tree to disk during a sync
260 int write;
262 /* should we wait for the extent buffer io to finish? Also used
263 * while flushing the log tree to disk for a sync
265 int wait;
267 /* pin only walk, we record which extents on disk belong to the
268 * log trees
270 int pin;
272 /* what stage of the replay code we're currently in */
273 int stage;
275 /* the root we are currently replaying */
276 struct btrfs_root *replay_dest;
278 /* the trans handle for the current replay */
279 struct btrfs_trans_handle *trans;
281 /* the function that gets used to process blocks we find in the
282 * tree. Note the extent_buffer might not be up to date when it is
283 * passed in, and it must be checked or read if you need the data
284 * inside it
286 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
287 struct walk_control *wc, u64 gen);
291 * process_func used to pin down extents, write them or wait on them
293 static int process_one_buffer(struct btrfs_root *log,
294 struct extent_buffer *eb,
295 struct walk_control *wc, u64 gen)
297 int ret = 0;
300 * If this fs is mixed then we need to be able to process the leaves to
301 * pin down any logged extents, so we have to read the block.
303 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
304 ret = btrfs_read_buffer(eb, gen);
305 if (ret)
306 return ret;
309 if (wc->pin)
310 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
311 eb->start, eb->len);
313 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
314 if (wc->pin && btrfs_header_level(eb) == 0)
315 ret = btrfs_exclude_logged_extents(log, eb);
316 if (wc->write)
317 btrfs_write_tree_block(eb);
318 if (wc->wait)
319 btrfs_wait_tree_block_writeback(eb);
321 return ret;
325 * Item overwrite used by replay and tree logging. eb, slot and key all refer
326 * to the src data we are copying out.
328 * root is the tree we are copying into, and path is a scratch
329 * path for use in this function (it should be released on entry and
330 * will be released on exit).
332 * If the key is already in the destination tree the existing item is
333 * overwritten. If the existing item isn't big enough, it is extended.
334 * If it is too large, it is truncated.
336 * If the key isn't in the destination yet, a new item is inserted.
338 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
339 struct btrfs_root *root,
340 struct btrfs_path *path,
341 struct extent_buffer *eb, int slot,
342 struct btrfs_key *key)
344 int ret;
345 u32 item_size;
346 u64 saved_i_size = 0;
347 int save_old_i_size = 0;
348 unsigned long src_ptr;
349 unsigned long dst_ptr;
350 int overwrite_root = 0;
351 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
353 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
354 overwrite_root = 1;
356 item_size = btrfs_item_size_nr(eb, slot);
357 src_ptr = btrfs_item_ptr_offset(eb, slot);
359 /* look for the key in the destination tree */
360 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
361 if (ret < 0)
362 return ret;
364 if (ret == 0) {
365 char *src_copy;
366 char *dst_copy;
367 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
368 path->slots[0]);
369 if (dst_size != item_size)
370 goto insert;
372 if (item_size == 0) {
373 btrfs_release_path(path);
374 return 0;
376 dst_copy = kmalloc(item_size, GFP_NOFS);
377 src_copy = kmalloc(item_size, GFP_NOFS);
378 if (!dst_copy || !src_copy) {
379 btrfs_release_path(path);
380 kfree(dst_copy);
381 kfree(src_copy);
382 return -ENOMEM;
385 read_extent_buffer(eb, src_copy, src_ptr, item_size);
387 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
388 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
389 item_size);
390 ret = memcmp(dst_copy, src_copy, item_size);
392 kfree(dst_copy);
393 kfree(src_copy);
395 * they have the same contents, just return, this saves
396 * us from cowing blocks in the destination tree and doing
397 * extra writes that may not have been done by a previous
398 * sync
400 if (ret == 0) {
401 btrfs_release_path(path);
402 return 0;
406 * We need to load the old nbytes into the inode so when we
407 * replay the extents we've logged we get the right nbytes.
409 if (inode_item) {
410 struct btrfs_inode_item *item;
411 u64 nbytes;
412 u32 mode;
414 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
415 struct btrfs_inode_item);
416 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
417 item = btrfs_item_ptr(eb, slot,
418 struct btrfs_inode_item);
419 btrfs_set_inode_nbytes(eb, item, nbytes);
422 * If this is a directory we need to reset the i_size to
423 * 0 so that we can set it up properly when replaying
424 * the rest of the items in this log.
426 mode = btrfs_inode_mode(eb, item);
427 if (S_ISDIR(mode))
428 btrfs_set_inode_size(eb, item, 0);
430 } else if (inode_item) {
431 struct btrfs_inode_item *item;
432 u32 mode;
435 * New inode, set nbytes to 0 so that the nbytes comes out
436 * properly when we replay the extents.
438 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
439 btrfs_set_inode_nbytes(eb, item, 0);
442 * If this is a directory we need to reset the i_size to 0 so
443 * that we can set it up properly when replaying the rest of
444 * the items in this log.
446 mode = btrfs_inode_mode(eb, item);
447 if (S_ISDIR(mode))
448 btrfs_set_inode_size(eb, item, 0);
450 insert:
451 btrfs_release_path(path);
452 /* try to insert the key into the destination tree */
453 path->skip_release_on_error = 1;
454 ret = btrfs_insert_empty_item(trans, root, path,
455 key, item_size);
456 path->skip_release_on_error = 0;
458 /* make sure any existing item is the correct size */
459 if (ret == -EEXIST || ret == -EOVERFLOW) {
460 u32 found_size;
461 found_size = btrfs_item_size_nr(path->nodes[0],
462 path->slots[0]);
463 if (found_size > item_size)
464 btrfs_truncate_item(root, path, item_size, 1);
465 else if (found_size < item_size)
466 btrfs_extend_item(root, path,
467 item_size - found_size);
468 } else if (ret) {
469 return ret;
471 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
472 path->slots[0]);
474 /* don't overwrite an existing inode if the generation number
475 * was logged as zero. This is done when the tree logging code
476 * is just logging an inode to make sure it exists after recovery.
478 * Also, don't overwrite i_size on directories during replay.
479 * log replay inserts and removes directory items based on the
480 * state of the tree found in the subvolume, and i_size is modified
481 * as it goes
483 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
484 struct btrfs_inode_item *src_item;
485 struct btrfs_inode_item *dst_item;
487 src_item = (struct btrfs_inode_item *)src_ptr;
488 dst_item = (struct btrfs_inode_item *)dst_ptr;
490 if (btrfs_inode_generation(eb, src_item) == 0) {
491 struct extent_buffer *dst_eb = path->nodes[0];
492 const u64 ino_size = btrfs_inode_size(eb, src_item);
495 * For regular files an ino_size == 0 is used only when
496 * logging that an inode exists, as part of a directory
497 * fsync, and the inode wasn't fsynced before. In this
498 * case don't set the size of the inode in the fs/subvol
499 * tree, otherwise we would be throwing valid data away.
501 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
502 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
503 ino_size != 0) {
504 struct btrfs_map_token token;
506 btrfs_init_map_token(&token);
507 btrfs_set_token_inode_size(dst_eb, dst_item,
508 ino_size, &token);
510 goto no_copy;
513 if (overwrite_root &&
514 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
515 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
516 save_old_i_size = 1;
517 saved_i_size = btrfs_inode_size(path->nodes[0],
518 dst_item);
522 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
523 src_ptr, item_size);
525 if (save_old_i_size) {
526 struct btrfs_inode_item *dst_item;
527 dst_item = (struct btrfs_inode_item *)dst_ptr;
528 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
531 /* make sure the generation is filled in */
532 if (key->type == BTRFS_INODE_ITEM_KEY) {
533 struct btrfs_inode_item *dst_item;
534 dst_item = (struct btrfs_inode_item *)dst_ptr;
535 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
536 btrfs_set_inode_generation(path->nodes[0], dst_item,
537 trans->transid);
540 no_copy:
541 btrfs_mark_buffer_dirty(path->nodes[0]);
542 btrfs_release_path(path);
543 return 0;
547 * simple helper to read an inode off the disk from a given root
548 * This can only be called for subvolume roots and not for the log
550 static noinline struct inode *read_one_inode(struct btrfs_root *root,
551 u64 objectid)
553 struct btrfs_key key;
554 struct inode *inode;
556 key.objectid = objectid;
557 key.type = BTRFS_INODE_ITEM_KEY;
558 key.offset = 0;
559 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
560 if (IS_ERR(inode)) {
561 inode = NULL;
562 } else if (is_bad_inode(inode)) {
563 iput(inode);
564 inode = NULL;
566 return inode;
569 /* replays a single extent in 'eb' at 'slot' with 'key' into the
570 * subvolume 'root'. path is released on entry and should be released
571 * on exit.
573 * extents in the log tree have not been allocated out of the extent
574 * tree yet. So, this completes the allocation, taking a reference
575 * as required if the extent already exists or creating a new extent
576 * if it isn't in the extent allocation tree yet.
578 * The extent is inserted into the file, dropping any existing extents
579 * from the file that overlap the new one.
581 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
582 struct btrfs_root *root,
583 struct btrfs_path *path,
584 struct extent_buffer *eb, int slot,
585 struct btrfs_key *key)
587 int found_type;
588 u64 extent_end;
589 u64 start = key->offset;
590 u64 nbytes = 0;
591 struct btrfs_file_extent_item *item;
592 struct inode *inode = NULL;
593 unsigned long size;
594 int ret = 0;
596 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
597 found_type = btrfs_file_extent_type(eb, item);
599 if (found_type == BTRFS_FILE_EXTENT_REG ||
600 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
601 nbytes = btrfs_file_extent_num_bytes(eb, item);
602 extent_end = start + nbytes;
605 * We don't add to the inodes nbytes if we are prealloc or a
606 * hole.
608 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
609 nbytes = 0;
610 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
611 size = btrfs_file_extent_inline_len(eb, slot, item);
612 nbytes = btrfs_file_extent_ram_bytes(eb, item);
613 extent_end = ALIGN(start + size, root->sectorsize);
614 } else {
615 ret = 0;
616 goto out;
619 inode = read_one_inode(root, key->objectid);
620 if (!inode) {
621 ret = -EIO;
622 goto out;
626 * first check to see if we already have this extent in the
627 * file. This must be done before the btrfs_drop_extents run
628 * so we don't try to drop this extent.
630 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
631 start, 0);
633 if (ret == 0 &&
634 (found_type == BTRFS_FILE_EXTENT_REG ||
635 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
636 struct btrfs_file_extent_item cmp1;
637 struct btrfs_file_extent_item cmp2;
638 struct btrfs_file_extent_item *existing;
639 struct extent_buffer *leaf;
641 leaf = path->nodes[0];
642 existing = btrfs_item_ptr(leaf, path->slots[0],
643 struct btrfs_file_extent_item);
645 read_extent_buffer(eb, &cmp1, (unsigned long)item,
646 sizeof(cmp1));
647 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
648 sizeof(cmp2));
651 * we already have a pointer to this exact extent,
652 * we don't have to do anything
654 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
655 btrfs_release_path(path);
656 goto out;
659 btrfs_release_path(path);
661 /* drop any overlapping extents */
662 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
663 if (ret)
664 goto out;
666 if (found_type == BTRFS_FILE_EXTENT_REG ||
667 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
668 u64 offset;
669 unsigned long dest_offset;
670 struct btrfs_key ins;
672 ret = btrfs_insert_empty_item(trans, root, path, key,
673 sizeof(*item));
674 if (ret)
675 goto out;
676 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
677 path->slots[0]);
678 copy_extent_buffer(path->nodes[0], eb, dest_offset,
679 (unsigned long)item, sizeof(*item));
681 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
682 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
683 ins.type = BTRFS_EXTENT_ITEM_KEY;
684 offset = key->offset - btrfs_file_extent_offset(eb, item);
687 * Manually record dirty extent, as here we did a shallow
688 * file extent item copy and skip normal backref update,
689 * but modifying extent tree all by ourselves.
690 * So need to manually record dirty extent for qgroup,
691 * as the owner of the file extent changed from log tree
692 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
694 ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
695 btrfs_file_extent_disk_bytenr(eb, item),
696 btrfs_file_extent_disk_num_bytes(eb, item),
697 GFP_NOFS);
698 if (ret < 0)
699 goto out;
701 if (ins.objectid > 0) {
702 u64 csum_start;
703 u64 csum_end;
704 LIST_HEAD(ordered_sums);
706 * is this extent already allocated in the extent
707 * allocation tree? If so, just add a reference
709 ret = btrfs_lookup_data_extent(root, ins.objectid,
710 ins.offset);
711 if (ret == 0) {
712 ret = btrfs_inc_extent_ref(trans, root,
713 ins.objectid, ins.offset,
714 0, root->root_key.objectid,
715 key->objectid, offset);
716 if (ret)
717 goto out;
718 } else {
720 * insert the extent pointer in the extent
721 * allocation tree
723 ret = btrfs_alloc_logged_file_extent(trans,
724 root, root->root_key.objectid,
725 key->objectid, offset, &ins);
726 if (ret)
727 goto out;
729 btrfs_release_path(path);
731 if (btrfs_file_extent_compression(eb, item)) {
732 csum_start = ins.objectid;
733 csum_end = csum_start + ins.offset;
734 } else {
735 csum_start = ins.objectid +
736 btrfs_file_extent_offset(eb, item);
737 csum_end = csum_start +
738 btrfs_file_extent_num_bytes(eb, item);
741 ret = btrfs_lookup_csums_range(root->log_root,
742 csum_start, csum_end - 1,
743 &ordered_sums, 0);
744 if (ret)
745 goto out;
747 * Now delete all existing cums in the csum root that
748 * cover our range. We do this because we can have an
749 * extent that is completely referenced by one file
750 * extent item and partially referenced by another
751 * file extent item (like after using the clone or
752 * extent_same ioctls). In this case if we end up doing
753 * the replay of the one that partially references the
754 * extent first, and we do not do the csum deletion
755 * below, we can get 2 csum items in the csum tree that
756 * overlap each other. For example, imagine our log has
757 * the two following file extent items:
759 * key (257 EXTENT_DATA 409600)
760 * extent data disk byte 12845056 nr 102400
761 * extent data offset 20480 nr 20480 ram 102400
763 * key (257 EXTENT_DATA 819200)
764 * extent data disk byte 12845056 nr 102400
765 * extent data offset 0 nr 102400 ram 102400
767 * Where the second one fully references the 100K extent
768 * that starts at disk byte 12845056, and the log tree
769 * has a single csum item that covers the entire range
770 * of the extent:
772 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
774 * After the first file extent item is replayed, the
775 * csum tree gets the following csum item:
777 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
779 * Which covers the 20K sub-range starting at offset 20K
780 * of our extent. Now when we replay the second file
781 * extent item, if we do not delete existing csum items
782 * that cover any of its blocks, we end up getting two
783 * csum items in our csum tree that overlap each other:
785 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
786 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
788 * Which is a problem, because after this anyone trying
789 * to lookup up for the checksum of any block of our
790 * extent starting at an offset of 40K or higher, will
791 * end up looking at the second csum item only, which
792 * does not contain the checksum for any block starting
793 * at offset 40K or higher of our extent.
795 while (!list_empty(&ordered_sums)) {
796 struct btrfs_ordered_sum *sums;
797 sums = list_entry(ordered_sums.next,
798 struct btrfs_ordered_sum,
799 list);
800 if (!ret)
801 ret = btrfs_del_csums(trans,
802 root->fs_info->csum_root,
803 sums->bytenr,
804 sums->len);
805 if (!ret)
806 ret = btrfs_csum_file_blocks(trans,
807 root->fs_info->csum_root,
808 sums);
809 list_del(&sums->list);
810 kfree(sums);
812 if (ret)
813 goto out;
814 } else {
815 btrfs_release_path(path);
817 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
818 /* inline extents are easy, we just overwrite them */
819 ret = overwrite_item(trans, root, path, eb, slot, key);
820 if (ret)
821 goto out;
824 inode_add_bytes(inode, nbytes);
825 ret = btrfs_update_inode(trans, root, inode);
826 out:
827 if (inode)
828 iput(inode);
829 return ret;
833 * when cleaning up conflicts between the directory names in the
834 * subvolume, directory names in the log and directory names in the
835 * inode back references, we may have to unlink inodes from directories.
837 * This is a helper function to do the unlink of a specific directory
838 * item
840 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
841 struct btrfs_root *root,
842 struct btrfs_path *path,
843 struct inode *dir,
844 struct btrfs_dir_item *di)
846 struct inode *inode;
847 char *name;
848 int name_len;
849 struct extent_buffer *leaf;
850 struct btrfs_key location;
851 int ret;
853 leaf = path->nodes[0];
855 btrfs_dir_item_key_to_cpu(leaf, di, &location);
856 name_len = btrfs_dir_name_len(leaf, di);
857 name = kmalloc(name_len, GFP_NOFS);
858 if (!name)
859 return -ENOMEM;
861 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
862 btrfs_release_path(path);
864 inode = read_one_inode(root, location.objectid);
865 if (!inode) {
866 ret = -EIO;
867 goto out;
870 ret = link_to_fixup_dir(trans, root, path, location.objectid);
871 if (ret)
872 goto out;
874 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
875 if (ret)
876 goto out;
877 else
878 ret = btrfs_run_delayed_items(trans, root);
879 out:
880 kfree(name);
881 iput(inode);
882 return ret;
886 * helper function to see if a given name and sequence number found
887 * in an inode back reference are already in a directory and correctly
888 * point to this inode
890 static noinline int inode_in_dir(struct btrfs_root *root,
891 struct btrfs_path *path,
892 u64 dirid, u64 objectid, u64 index,
893 const char *name, int name_len)
895 struct btrfs_dir_item *di;
896 struct btrfs_key location;
897 int match = 0;
899 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
900 index, name, name_len, 0);
901 if (di && !IS_ERR(di)) {
902 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
903 if (location.objectid != objectid)
904 goto out;
905 } else
906 goto out;
907 btrfs_release_path(path);
909 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
910 if (di && !IS_ERR(di)) {
911 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
912 if (location.objectid != objectid)
913 goto out;
914 } else
915 goto out;
916 match = 1;
917 out:
918 btrfs_release_path(path);
919 return match;
923 * helper function to check a log tree for a named back reference in
924 * an inode. This is used to decide if a back reference that is
925 * found in the subvolume conflicts with what we find in the log.
927 * inode backreferences may have multiple refs in a single item,
928 * during replay we process one reference at a time, and we don't
929 * want to delete valid links to a file from the subvolume if that
930 * link is also in the log.
932 static noinline int backref_in_log(struct btrfs_root *log,
933 struct btrfs_key *key,
934 u64 ref_objectid,
935 const char *name, int namelen)
937 struct btrfs_path *path;
938 struct btrfs_inode_ref *ref;
939 unsigned long ptr;
940 unsigned long ptr_end;
941 unsigned long name_ptr;
942 int found_name_len;
943 int item_size;
944 int ret;
945 int match = 0;
947 path = btrfs_alloc_path();
948 if (!path)
949 return -ENOMEM;
951 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
952 if (ret != 0)
953 goto out;
955 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
957 if (key->type == BTRFS_INODE_EXTREF_KEY) {
958 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
959 name, namelen, NULL))
960 match = 1;
962 goto out;
965 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
966 ptr_end = ptr + item_size;
967 while (ptr < ptr_end) {
968 ref = (struct btrfs_inode_ref *)ptr;
969 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
970 if (found_name_len == namelen) {
971 name_ptr = (unsigned long)(ref + 1);
972 ret = memcmp_extent_buffer(path->nodes[0], name,
973 name_ptr, namelen);
974 if (ret == 0) {
975 match = 1;
976 goto out;
979 ptr = (unsigned long)(ref + 1) + found_name_len;
981 out:
982 btrfs_free_path(path);
983 return match;
986 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
987 struct btrfs_root *root,
988 struct btrfs_path *path,
989 struct btrfs_root *log_root,
990 struct inode *dir, struct inode *inode,
991 struct extent_buffer *eb,
992 u64 inode_objectid, u64 parent_objectid,
993 u64 ref_index, char *name, int namelen,
994 int *search_done)
996 int ret;
997 char *victim_name;
998 int victim_name_len;
999 struct extent_buffer *leaf;
1000 struct btrfs_dir_item *di;
1001 struct btrfs_key search_key;
1002 struct btrfs_inode_extref *extref;
1004 again:
1005 /* Search old style refs */
1006 search_key.objectid = inode_objectid;
1007 search_key.type = BTRFS_INODE_REF_KEY;
1008 search_key.offset = parent_objectid;
1009 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1010 if (ret == 0) {
1011 struct btrfs_inode_ref *victim_ref;
1012 unsigned long ptr;
1013 unsigned long ptr_end;
1015 leaf = path->nodes[0];
1017 /* are we trying to overwrite a back ref for the root directory
1018 * if so, just jump out, we're done
1020 if (search_key.objectid == search_key.offset)
1021 return 1;
1023 /* check all the names in this back reference to see
1024 * if they are in the log. if so, we allow them to stay
1025 * otherwise they must be unlinked as a conflict
1027 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1028 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1029 while (ptr < ptr_end) {
1030 victim_ref = (struct btrfs_inode_ref *)ptr;
1031 victim_name_len = btrfs_inode_ref_name_len(leaf,
1032 victim_ref);
1033 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1034 if (!victim_name)
1035 return -ENOMEM;
1037 read_extent_buffer(leaf, victim_name,
1038 (unsigned long)(victim_ref + 1),
1039 victim_name_len);
1041 if (!backref_in_log(log_root, &search_key,
1042 parent_objectid,
1043 victim_name,
1044 victim_name_len)) {
1045 inc_nlink(inode);
1046 btrfs_release_path(path);
1048 ret = btrfs_unlink_inode(trans, root, dir,
1049 inode, victim_name,
1050 victim_name_len);
1051 kfree(victim_name);
1052 if (ret)
1053 return ret;
1054 ret = btrfs_run_delayed_items(trans, root);
1055 if (ret)
1056 return ret;
1057 *search_done = 1;
1058 goto again;
1060 kfree(victim_name);
1062 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1066 * NOTE: we have searched root tree and checked the
1067 * corresponding ref, it does not need to check again.
1069 *search_done = 1;
1071 btrfs_release_path(path);
1073 /* Same search but for extended refs */
1074 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1075 inode_objectid, parent_objectid, 0,
1077 if (!IS_ERR_OR_NULL(extref)) {
1078 u32 item_size;
1079 u32 cur_offset = 0;
1080 unsigned long base;
1081 struct inode *victim_parent;
1083 leaf = path->nodes[0];
1085 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1086 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1088 while (cur_offset < item_size) {
1089 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1091 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1093 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1094 goto next;
1096 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1097 if (!victim_name)
1098 return -ENOMEM;
1099 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1100 victim_name_len);
1102 search_key.objectid = inode_objectid;
1103 search_key.type = BTRFS_INODE_EXTREF_KEY;
1104 search_key.offset = btrfs_extref_hash(parent_objectid,
1105 victim_name,
1106 victim_name_len);
1107 ret = 0;
1108 if (!backref_in_log(log_root, &search_key,
1109 parent_objectid, victim_name,
1110 victim_name_len)) {
1111 ret = -ENOENT;
1112 victim_parent = read_one_inode(root,
1113 parent_objectid);
1114 if (victim_parent) {
1115 inc_nlink(inode);
1116 btrfs_release_path(path);
1118 ret = btrfs_unlink_inode(trans, root,
1119 victim_parent,
1120 inode,
1121 victim_name,
1122 victim_name_len);
1123 if (!ret)
1124 ret = btrfs_run_delayed_items(
1125 trans, root);
1127 iput(victim_parent);
1128 kfree(victim_name);
1129 if (ret)
1130 return ret;
1131 *search_done = 1;
1132 goto again;
1134 kfree(victim_name);
1135 if (ret)
1136 return ret;
1137 next:
1138 cur_offset += victim_name_len + sizeof(*extref);
1140 *search_done = 1;
1142 btrfs_release_path(path);
1144 /* look for a conflicting sequence number */
1145 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1146 ref_index, name, namelen, 0);
1147 if (di && !IS_ERR(di)) {
1148 ret = drop_one_dir_item(trans, root, path, dir, di);
1149 if (ret)
1150 return ret;
1152 btrfs_release_path(path);
1154 /* look for a conflicing name */
1155 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1156 name, namelen, 0);
1157 if (di && !IS_ERR(di)) {
1158 ret = drop_one_dir_item(trans, root, path, dir, di);
1159 if (ret)
1160 return ret;
1162 btrfs_release_path(path);
1164 return 0;
1167 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1168 u32 *namelen, char **name, u64 *index,
1169 u64 *parent_objectid)
1171 struct btrfs_inode_extref *extref;
1173 extref = (struct btrfs_inode_extref *)ref_ptr;
1175 *namelen = btrfs_inode_extref_name_len(eb, extref);
1176 *name = kmalloc(*namelen, GFP_NOFS);
1177 if (*name == NULL)
1178 return -ENOMEM;
1180 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1181 *namelen);
1183 *index = btrfs_inode_extref_index(eb, extref);
1184 if (parent_objectid)
1185 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1187 return 0;
1190 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1191 u32 *namelen, char **name, u64 *index)
1193 struct btrfs_inode_ref *ref;
1195 ref = (struct btrfs_inode_ref *)ref_ptr;
1197 *namelen = btrfs_inode_ref_name_len(eb, ref);
1198 *name = kmalloc(*namelen, GFP_NOFS);
1199 if (*name == NULL)
1200 return -ENOMEM;
1202 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1204 *index = btrfs_inode_ref_index(eb, ref);
1206 return 0;
1210 * replay one inode back reference item found in the log tree.
1211 * eb, slot and key refer to the buffer and key found in the log tree.
1212 * root is the destination we are replaying into, and path is for temp
1213 * use by this function. (it should be released on return).
1215 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1216 struct btrfs_root *root,
1217 struct btrfs_root *log,
1218 struct btrfs_path *path,
1219 struct extent_buffer *eb, int slot,
1220 struct btrfs_key *key)
1222 struct inode *dir = NULL;
1223 struct inode *inode = NULL;
1224 unsigned long ref_ptr;
1225 unsigned long ref_end;
1226 char *name = NULL;
1227 int namelen;
1228 int ret;
1229 int search_done = 0;
1230 int log_ref_ver = 0;
1231 u64 parent_objectid;
1232 u64 inode_objectid;
1233 u64 ref_index = 0;
1234 int ref_struct_size;
1236 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1237 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1239 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1240 struct btrfs_inode_extref *r;
1242 ref_struct_size = sizeof(struct btrfs_inode_extref);
1243 log_ref_ver = 1;
1244 r = (struct btrfs_inode_extref *)ref_ptr;
1245 parent_objectid = btrfs_inode_extref_parent(eb, r);
1246 } else {
1247 ref_struct_size = sizeof(struct btrfs_inode_ref);
1248 parent_objectid = key->offset;
1250 inode_objectid = key->objectid;
1253 * it is possible that we didn't log all the parent directories
1254 * for a given inode. If we don't find the dir, just don't
1255 * copy the back ref in. The link count fixup code will take
1256 * care of the rest
1258 dir = read_one_inode(root, parent_objectid);
1259 if (!dir) {
1260 ret = -ENOENT;
1261 goto out;
1264 inode = read_one_inode(root, inode_objectid);
1265 if (!inode) {
1266 ret = -EIO;
1267 goto out;
1270 while (ref_ptr < ref_end) {
1271 if (log_ref_ver) {
1272 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1273 &ref_index, &parent_objectid);
1275 * parent object can change from one array
1276 * item to another.
1278 if (!dir)
1279 dir = read_one_inode(root, parent_objectid);
1280 if (!dir) {
1281 ret = -ENOENT;
1282 goto out;
1284 } else {
1285 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1286 &ref_index);
1288 if (ret)
1289 goto out;
1291 /* if we already have a perfect match, we're done */
1292 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1293 ref_index, name, namelen)) {
1295 * look for a conflicting back reference in the
1296 * metadata. if we find one we have to unlink that name
1297 * of the file before we add our new link. Later on, we
1298 * overwrite any existing back reference, and we don't
1299 * want to create dangling pointers in the directory.
1302 if (!search_done) {
1303 ret = __add_inode_ref(trans, root, path, log,
1304 dir, inode, eb,
1305 inode_objectid,
1306 parent_objectid,
1307 ref_index, name, namelen,
1308 &search_done);
1309 if (ret) {
1310 if (ret == 1)
1311 ret = 0;
1312 goto out;
1316 /* insert our name */
1317 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1318 0, ref_index);
1319 if (ret)
1320 goto out;
1322 btrfs_update_inode(trans, root, inode);
1325 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1326 kfree(name);
1327 name = NULL;
1328 if (log_ref_ver) {
1329 iput(dir);
1330 dir = NULL;
1334 /* finally write the back reference in the inode */
1335 ret = overwrite_item(trans, root, path, eb, slot, key);
1336 out:
1337 btrfs_release_path(path);
1338 kfree(name);
1339 iput(dir);
1340 iput(inode);
1341 return ret;
1344 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1345 struct btrfs_root *root, u64 ino)
1347 int ret;
1349 ret = btrfs_insert_orphan_item(trans, root, ino);
1350 if (ret == -EEXIST)
1351 ret = 0;
1353 return ret;
1356 static int count_inode_extrefs(struct btrfs_root *root,
1357 struct inode *inode, struct btrfs_path *path)
1359 int ret = 0;
1360 int name_len;
1361 unsigned int nlink = 0;
1362 u32 item_size;
1363 u32 cur_offset = 0;
1364 u64 inode_objectid = btrfs_ino(inode);
1365 u64 offset = 0;
1366 unsigned long ptr;
1367 struct btrfs_inode_extref *extref;
1368 struct extent_buffer *leaf;
1370 while (1) {
1371 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1372 &extref, &offset);
1373 if (ret)
1374 break;
1376 leaf = path->nodes[0];
1377 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1378 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1379 cur_offset = 0;
1381 while (cur_offset < item_size) {
1382 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1383 name_len = btrfs_inode_extref_name_len(leaf, extref);
1385 nlink++;
1387 cur_offset += name_len + sizeof(*extref);
1390 offset++;
1391 btrfs_release_path(path);
1393 btrfs_release_path(path);
1395 if (ret < 0 && ret != -ENOENT)
1396 return ret;
1397 return nlink;
1400 static int count_inode_refs(struct btrfs_root *root,
1401 struct inode *inode, struct btrfs_path *path)
1403 int ret;
1404 struct btrfs_key key;
1405 unsigned int nlink = 0;
1406 unsigned long ptr;
1407 unsigned long ptr_end;
1408 int name_len;
1409 u64 ino = btrfs_ino(inode);
1411 key.objectid = ino;
1412 key.type = BTRFS_INODE_REF_KEY;
1413 key.offset = (u64)-1;
1415 while (1) {
1416 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1417 if (ret < 0)
1418 break;
1419 if (ret > 0) {
1420 if (path->slots[0] == 0)
1421 break;
1422 path->slots[0]--;
1424 process_slot:
1425 btrfs_item_key_to_cpu(path->nodes[0], &key,
1426 path->slots[0]);
1427 if (key.objectid != ino ||
1428 key.type != BTRFS_INODE_REF_KEY)
1429 break;
1430 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1431 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1432 path->slots[0]);
1433 while (ptr < ptr_end) {
1434 struct btrfs_inode_ref *ref;
1436 ref = (struct btrfs_inode_ref *)ptr;
1437 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1438 ref);
1439 ptr = (unsigned long)(ref + 1) + name_len;
1440 nlink++;
1443 if (key.offset == 0)
1444 break;
1445 if (path->slots[0] > 0) {
1446 path->slots[0]--;
1447 goto process_slot;
1449 key.offset--;
1450 btrfs_release_path(path);
1452 btrfs_release_path(path);
1454 return nlink;
1458 * There are a few corners where the link count of the file can't
1459 * be properly maintained during replay. So, instead of adding
1460 * lots of complexity to the log code, we just scan the backrefs
1461 * for any file that has been through replay.
1463 * The scan will update the link count on the inode to reflect the
1464 * number of back refs found. If it goes down to zero, the iput
1465 * will free the inode.
1467 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1468 struct btrfs_root *root,
1469 struct inode *inode)
1471 struct btrfs_path *path;
1472 int ret;
1473 u64 nlink = 0;
1474 u64 ino = btrfs_ino(inode);
1476 path = btrfs_alloc_path();
1477 if (!path)
1478 return -ENOMEM;
1480 ret = count_inode_refs(root, inode, path);
1481 if (ret < 0)
1482 goto out;
1484 nlink = ret;
1486 ret = count_inode_extrefs(root, inode, path);
1487 if (ret < 0)
1488 goto out;
1490 nlink += ret;
1492 ret = 0;
1494 if (nlink != inode->i_nlink) {
1495 set_nlink(inode, nlink);
1496 btrfs_update_inode(trans, root, inode);
1498 BTRFS_I(inode)->index_cnt = (u64)-1;
1500 if (inode->i_nlink == 0) {
1501 if (S_ISDIR(inode->i_mode)) {
1502 ret = replay_dir_deletes(trans, root, NULL, path,
1503 ino, 1);
1504 if (ret)
1505 goto out;
1507 ret = insert_orphan_item(trans, root, ino);
1510 out:
1511 btrfs_free_path(path);
1512 return ret;
1515 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1516 struct btrfs_root *root,
1517 struct btrfs_path *path)
1519 int ret;
1520 struct btrfs_key key;
1521 struct inode *inode;
1523 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1524 key.type = BTRFS_ORPHAN_ITEM_KEY;
1525 key.offset = (u64)-1;
1526 while (1) {
1527 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1528 if (ret < 0)
1529 break;
1531 if (ret == 1) {
1532 if (path->slots[0] == 0)
1533 break;
1534 path->slots[0]--;
1537 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1538 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1539 key.type != BTRFS_ORPHAN_ITEM_KEY)
1540 break;
1542 ret = btrfs_del_item(trans, root, path);
1543 if (ret)
1544 goto out;
1546 btrfs_release_path(path);
1547 inode = read_one_inode(root, key.offset);
1548 if (!inode)
1549 return -EIO;
1551 ret = fixup_inode_link_count(trans, root, inode);
1552 iput(inode);
1553 if (ret)
1554 goto out;
1557 * fixup on a directory may create new entries,
1558 * make sure we always look for the highset possible
1559 * offset
1561 key.offset = (u64)-1;
1563 ret = 0;
1564 out:
1565 btrfs_release_path(path);
1566 return ret;
1571 * record a given inode in the fixup dir so we can check its link
1572 * count when replay is done. The link count is incremented here
1573 * so the inode won't go away until we check it
1575 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1576 struct btrfs_root *root,
1577 struct btrfs_path *path,
1578 u64 objectid)
1580 struct btrfs_key key;
1581 int ret = 0;
1582 struct inode *inode;
1584 inode = read_one_inode(root, objectid);
1585 if (!inode)
1586 return -EIO;
1588 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1589 key.type = BTRFS_ORPHAN_ITEM_KEY;
1590 key.offset = objectid;
1592 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1594 btrfs_release_path(path);
1595 if (ret == 0) {
1596 if (!inode->i_nlink)
1597 set_nlink(inode, 1);
1598 else
1599 inc_nlink(inode);
1600 ret = btrfs_update_inode(trans, root, inode);
1601 } else if (ret == -EEXIST) {
1602 ret = 0;
1603 } else {
1604 BUG(); /* Logic Error */
1606 iput(inode);
1608 return ret;
1612 * when replaying the log for a directory, we only insert names
1613 * for inodes that actually exist. This means an fsync on a directory
1614 * does not implicitly fsync all the new files in it
1616 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1617 struct btrfs_root *root,
1618 u64 dirid, u64 index,
1619 char *name, int name_len,
1620 struct btrfs_key *location)
1622 struct inode *inode;
1623 struct inode *dir;
1624 int ret;
1626 inode = read_one_inode(root, location->objectid);
1627 if (!inode)
1628 return -ENOENT;
1630 dir = read_one_inode(root, dirid);
1631 if (!dir) {
1632 iput(inode);
1633 return -EIO;
1636 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1638 /* FIXME, put inode into FIXUP list */
1640 iput(inode);
1641 iput(dir);
1642 return ret;
1646 * Return true if an inode reference exists in the log for the given name,
1647 * inode and parent inode.
1649 static bool name_in_log_ref(struct btrfs_root *log_root,
1650 const char *name, const int name_len,
1651 const u64 dirid, const u64 ino)
1653 struct btrfs_key search_key;
1655 search_key.objectid = ino;
1656 search_key.type = BTRFS_INODE_REF_KEY;
1657 search_key.offset = dirid;
1658 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1659 return true;
1661 search_key.type = BTRFS_INODE_EXTREF_KEY;
1662 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1663 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1664 return true;
1666 return false;
1670 * take a single entry in a log directory item and replay it into
1671 * the subvolume.
1673 * if a conflicting item exists in the subdirectory already,
1674 * the inode it points to is unlinked and put into the link count
1675 * fix up tree.
1677 * If a name from the log points to a file or directory that does
1678 * not exist in the FS, it is skipped. fsyncs on directories
1679 * do not force down inodes inside that directory, just changes to the
1680 * names or unlinks in a directory.
1682 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1683 * non-existing inode) and 1 if the name was replayed.
1685 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1686 struct btrfs_root *root,
1687 struct btrfs_path *path,
1688 struct extent_buffer *eb,
1689 struct btrfs_dir_item *di,
1690 struct btrfs_key *key)
1692 char *name;
1693 int name_len;
1694 struct btrfs_dir_item *dst_di;
1695 struct btrfs_key found_key;
1696 struct btrfs_key log_key;
1697 struct inode *dir;
1698 u8 log_type;
1699 int exists;
1700 int ret = 0;
1701 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1702 bool name_added = false;
1704 dir = read_one_inode(root, key->objectid);
1705 if (!dir)
1706 return -EIO;
1708 name_len = btrfs_dir_name_len(eb, di);
1709 name = kmalloc(name_len, GFP_NOFS);
1710 if (!name) {
1711 ret = -ENOMEM;
1712 goto out;
1715 log_type = btrfs_dir_type(eb, di);
1716 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1717 name_len);
1719 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1720 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1721 if (exists == 0)
1722 exists = 1;
1723 else
1724 exists = 0;
1725 btrfs_release_path(path);
1727 if (key->type == BTRFS_DIR_ITEM_KEY) {
1728 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1729 name, name_len, 1);
1730 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1731 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1732 key->objectid,
1733 key->offset, name,
1734 name_len, 1);
1735 } else {
1736 /* Corruption */
1737 ret = -EINVAL;
1738 goto out;
1740 if (IS_ERR_OR_NULL(dst_di)) {
1741 /* we need a sequence number to insert, so we only
1742 * do inserts for the BTRFS_DIR_INDEX_KEY types
1744 if (key->type != BTRFS_DIR_INDEX_KEY)
1745 goto out;
1746 goto insert;
1749 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1750 /* the existing item matches the logged item */
1751 if (found_key.objectid == log_key.objectid &&
1752 found_key.type == log_key.type &&
1753 found_key.offset == log_key.offset &&
1754 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1755 update_size = false;
1756 goto out;
1760 * don't drop the conflicting directory entry if the inode
1761 * for the new entry doesn't exist
1763 if (!exists)
1764 goto out;
1766 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1767 if (ret)
1768 goto out;
1770 if (key->type == BTRFS_DIR_INDEX_KEY)
1771 goto insert;
1772 out:
1773 btrfs_release_path(path);
1774 if (!ret && update_size) {
1775 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1776 ret = btrfs_update_inode(trans, root, dir);
1778 kfree(name);
1779 iput(dir);
1780 if (!ret && name_added)
1781 ret = 1;
1782 return ret;
1784 insert:
1785 if (name_in_log_ref(root->log_root, name, name_len,
1786 key->objectid, log_key.objectid)) {
1787 /* The dentry will be added later. */
1788 ret = 0;
1789 update_size = false;
1790 goto out;
1792 btrfs_release_path(path);
1793 ret = insert_one_name(trans, root, key->objectid, key->offset,
1794 name, name_len, &log_key);
1795 if (ret && ret != -ENOENT && ret != -EEXIST)
1796 goto out;
1797 if (!ret)
1798 name_added = true;
1799 update_size = false;
1800 ret = 0;
1801 goto out;
1805 * find all the names in a directory item and reconcile them into
1806 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1807 * one name in a directory item, but the same code gets used for
1808 * both directory index types
1810 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1811 struct btrfs_root *root,
1812 struct btrfs_path *path,
1813 struct extent_buffer *eb, int slot,
1814 struct btrfs_key *key)
1816 int ret = 0;
1817 u32 item_size = btrfs_item_size_nr(eb, slot);
1818 struct btrfs_dir_item *di;
1819 int name_len;
1820 unsigned long ptr;
1821 unsigned long ptr_end;
1822 struct btrfs_path *fixup_path = NULL;
1824 ptr = btrfs_item_ptr_offset(eb, slot);
1825 ptr_end = ptr + item_size;
1826 while (ptr < ptr_end) {
1827 di = (struct btrfs_dir_item *)ptr;
1828 if (verify_dir_item(root, eb, di))
1829 return -EIO;
1830 name_len = btrfs_dir_name_len(eb, di);
1831 ret = replay_one_name(trans, root, path, eb, di, key);
1832 if (ret < 0)
1833 break;
1834 ptr = (unsigned long)(di + 1);
1835 ptr += name_len;
1838 * If this entry refers to a non-directory (directories can not
1839 * have a link count > 1) and it was added in the transaction
1840 * that was not committed, make sure we fixup the link count of
1841 * the inode it the entry points to. Otherwise something like
1842 * the following would result in a directory pointing to an
1843 * inode with a wrong link that does not account for this dir
1844 * entry:
1846 * mkdir testdir
1847 * touch testdir/foo
1848 * touch testdir/bar
1849 * sync
1851 * ln testdir/bar testdir/bar_link
1852 * ln testdir/foo testdir/foo_link
1853 * xfs_io -c "fsync" testdir/bar
1855 * <power failure>
1857 * mount fs, log replay happens
1859 * File foo would remain with a link count of 1 when it has two
1860 * entries pointing to it in the directory testdir. This would
1861 * make it impossible to ever delete the parent directory has
1862 * it would result in stale dentries that can never be deleted.
1864 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1865 struct btrfs_key di_key;
1867 if (!fixup_path) {
1868 fixup_path = btrfs_alloc_path();
1869 if (!fixup_path) {
1870 ret = -ENOMEM;
1871 break;
1875 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1876 ret = link_to_fixup_dir(trans, root, fixup_path,
1877 di_key.objectid);
1878 if (ret)
1879 break;
1881 ret = 0;
1883 btrfs_free_path(fixup_path);
1884 return ret;
1888 * directory replay has two parts. There are the standard directory
1889 * items in the log copied from the subvolume, and range items
1890 * created in the log while the subvolume was logged.
1892 * The range items tell us which parts of the key space the log
1893 * is authoritative for. During replay, if a key in the subvolume
1894 * directory is in a logged range item, but not actually in the log
1895 * that means it was deleted from the directory before the fsync
1896 * and should be removed.
1898 static noinline int find_dir_range(struct btrfs_root *root,
1899 struct btrfs_path *path,
1900 u64 dirid, int key_type,
1901 u64 *start_ret, u64 *end_ret)
1903 struct btrfs_key key;
1904 u64 found_end;
1905 struct btrfs_dir_log_item *item;
1906 int ret;
1907 int nritems;
1909 if (*start_ret == (u64)-1)
1910 return 1;
1912 key.objectid = dirid;
1913 key.type = key_type;
1914 key.offset = *start_ret;
1916 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1917 if (ret < 0)
1918 goto out;
1919 if (ret > 0) {
1920 if (path->slots[0] == 0)
1921 goto out;
1922 path->slots[0]--;
1924 if (ret != 0)
1925 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1927 if (key.type != key_type || key.objectid != dirid) {
1928 ret = 1;
1929 goto next;
1931 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1932 struct btrfs_dir_log_item);
1933 found_end = btrfs_dir_log_end(path->nodes[0], item);
1935 if (*start_ret >= key.offset && *start_ret <= found_end) {
1936 ret = 0;
1937 *start_ret = key.offset;
1938 *end_ret = found_end;
1939 goto out;
1941 ret = 1;
1942 next:
1943 /* check the next slot in the tree to see if it is a valid item */
1944 nritems = btrfs_header_nritems(path->nodes[0]);
1945 path->slots[0]++;
1946 if (path->slots[0] >= nritems) {
1947 ret = btrfs_next_leaf(root, path);
1948 if (ret)
1949 goto out;
1952 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1954 if (key.type != key_type || key.objectid != dirid) {
1955 ret = 1;
1956 goto out;
1958 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1959 struct btrfs_dir_log_item);
1960 found_end = btrfs_dir_log_end(path->nodes[0], item);
1961 *start_ret = key.offset;
1962 *end_ret = found_end;
1963 ret = 0;
1964 out:
1965 btrfs_release_path(path);
1966 return ret;
1970 * this looks for a given directory item in the log. If the directory
1971 * item is not in the log, the item is removed and the inode it points
1972 * to is unlinked
1974 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1975 struct btrfs_root *root,
1976 struct btrfs_root *log,
1977 struct btrfs_path *path,
1978 struct btrfs_path *log_path,
1979 struct inode *dir,
1980 struct btrfs_key *dir_key)
1982 int ret;
1983 struct extent_buffer *eb;
1984 int slot;
1985 u32 item_size;
1986 struct btrfs_dir_item *di;
1987 struct btrfs_dir_item *log_di;
1988 int name_len;
1989 unsigned long ptr;
1990 unsigned long ptr_end;
1991 char *name;
1992 struct inode *inode;
1993 struct btrfs_key location;
1995 again:
1996 eb = path->nodes[0];
1997 slot = path->slots[0];
1998 item_size = btrfs_item_size_nr(eb, slot);
1999 ptr = btrfs_item_ptr_offset(eb, slot);
2000 ptr_end = ptr + item_size;
2001 while (ptr < ptr_end) {
2002 di = (struct btrfs_dir_item *)ptr;
2003 if (verify_dir_item(root, eb, di)) {
2004 ret = -EIO;
2005 goto out;
2008 name_len = btrfs_dir_name_len(eb, di);
2009 name = kmalloc(name_len, GFP_NOFS);
2010 if (!name) {
2011 ret = -ENOMEM;
2012 goto out;
2014 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2015 name_len);
2016 log_di = NULL;
2017 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2018 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2019 dir_key->objectid,
2020 name, name_len, 0);
2021 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2022 log_di = btrfs_lookup_dir_index_item(trans, log,
2023 log_path,
2024 dir_key->objectid,
2025 dir_key->offset,
2026 name, name_len, 0);
2028 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
2029 btrfs_dir_item_key_to_cpu(eb, di, &location);
2030 btrfs_release_path(path);
2031 btrfs_release_path(log_path);
2032 inode = read_one_inode(root, location.objectid);
2033 if (!inode) {
2034 kfree(name);
2035 return -EIO;
2038 ret = link_to_fixup_dir(trans, root,
2039 path, location.objectid);
2040 if (ret) {
2041 kfree(name);
2042 iput(inode);
2043 goto out;
2046 inc_nlink(inode);
2047 ret = btrfs_unlink_inode(trans, root, dir, inode,
2048 name, name_len);
2049 if (!ret)
2050 ret = btrfs_run_delayed_items(trans, root);
2051 kfree(name);
2052 iput(inode);
2053 if (ret)
2054 goto out;
2056 /* there might still be more names under this key
2057 * check and repeat if required
2059 ret = btrfs_search_slot(NULL, root, dir_key, path,
2060 0, 0);
2061 if (ret == 0)
2062 goto again;
2063 ret = 0;
2064 goto out;
2065 } else if (IS_ERR(log_di)) {
2066 kfree(name);
2067 return PTR_ERR(log_di);
2069 btrfs_release_path(log_path);
2070 kfree(name);
2072 ptr = (unsigned long)(di + 1);
2073 ptr += name_len;
2075 ret = 0;
2076 out:
2077 btrfs_release_path(path);
2078 btrfs_release_path(log_path);
2079 return ret;
2082 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2083 struct btrfs_root *root,
2084 struct btrfs_root *log,
2085 struct btrfs_path *path,
2086 const u64 ino)
2088 struct btrfs_key search_key;
2089 struct btrfs_path *log_path;
2090 int i;
2091 int nritems;
2092 int ret;
2094 log_path = btrfs_alloc_path();
2095 if (!log_path)
2096 return -ENOMEM;
2098 search_key.objectid = ino;
2099 search_key.type = BTRFS_XATTR_ITEM_KEY;
2100 search_key.offset = 0;
2101 again:
2102 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2103 if (ret < 0)
2104 goto out;
2105 process_leaf:
2106 nritems = btrfs_header_nritems(path->nodes[0]);
2107 for (i = path->slots[0]; i < nritems; i++) {
2108 struct btrfs_key key;
2109 struct btrfs_dir_item *di;
2110 struct btrfs_dir_item *log_di;
2111 u32 total_size;
2112 u32 cur;
2114 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2115 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2116 ret = 0;
2117 goto out;
2120 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2121 total_size = btrfs_item_size_nr(path->nodes[0], i);
2122 cur = 0;
2123 while (cur < total_size) {
2124 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2125 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2126 u32 this_len = sizeof(*di) + name_len + data_len;
2127 char *name;
2129 name = kmalloc(name_len, GFP_NOFS);
2130 if (!name) {
2131 ret = -ENOMEM;
2132 goto out;
2134 read_extent_buffer(path->nodes[0], name,
2135 (unsigned long)(di + 1), name_len);
2137 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2138 name, name_len, 0);
2139 btrfs_release_path(log_path);
2140 if (!log_di) {
2141 /* Doesn't exist in log tree, so delete it. */
2142 btrfs_release_path(path);
2143 di = btrfs_lookup_xattr(trans, root, path, ino,
2144 name, name_len, -1);
2145 kfree(name);
2146 if (IS_ERR(di)) {
2147 ret = PTR_ERR(di);
2148 goto out;
2150 ASSERT(di);
2151 ret = btrfs_delete_one_dir_name(trans, root,
2152 path, di);
2153 if (ret)
2154 goto out;
2155 btrfs_release_path(path);
2156 search_key = key;
2157 goto again;
2159 kfree(name);
2160 if (IS_ERR(log_di)) {
2161 ret = PTR_ERR(log_di);
2162 goto out;
2164 cur += this_len;
2165 di = (struct btrfs_dir_item *)((char *)di + this_len);
2168 ret = btrfs_next_leaf(root, path);
2169 if (ret > 0)
2170 ret = 0;
2171 else if (ret == 0)
2172 goto process_leaf;
2173 out:
2174 btrfs_free_path(log_path);
2175 btrfs_release_path(path);
2176 return ret;
2181 * deletion replay happens before we copy any new directory items
2182 * out of the log or out of backreferences from inodes. It
2183 * scans the log to find ranges of keys that log is authoritative for,
2184 * and then scans the directory to find items in those ranges that are
2185 * not present in the log.
2187 * Anything we don't find in the log is unlinked and removed from the
2188 * directory.
2190 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2191 struct btrfs_root *root,
2192 struct btrfs_root *log,
2193 struct btrfs_path *path,
2194 u64 dirid, int del_all)
2196 u64 range_start;
2197 u64 range_end;
2198 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2199 int ret = 0;
2200 struct btrfs_key dir_key;
2201 struct btrfs_key found_key;
2202 struct btrfs_path *log_path;
2203 struct inode *dir;
2205 dir_key.objectid = dirid;
2206 dir_key.type = BTRFS_DIR_ITEM_KEY;
2207 log_path = btrfs_alloc_path();
2208 if (!log_path)
2209 return -ENOMEM;
2211 dir = read_one_inode(root, dirid);
2212 /* it isn't an error if the inode isn't there, that can happen
2213 * because we replay the deletes before we copy in the inode item
2214 * from the log
2216 if (!dir) {
2217 btrfs_free_path(log_path);
2218 return 0;
2220 again:
2221 range_start = 0;
2222 range_end = 0;
2223 while (1) {
2224 if (del_all)
2225 range_end = (u64)-1;
2226 else {
2227 ret = find_dir_range(log, path, dirid, key_type,
2228 &range_start, &range_end);
2229 if (ret != 0)
2230 break;
2233 dir_key.offset = range_start;
2234 while (1) {
2235 int nritems;
2236 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2237 0, 0);
2238 if (ret < 0)
2239 goto out;
2241 nritems = btrfs_header_nritems(path->nodes[0]);
2242 if (path->slots[0] >= nritems) {
2243 ret = btrfs_next_leaf(root, path);
2244 if (ret == 1)
2245 break;
2246 else if (ret < 0)
2247 goto out;
2249 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2250 path->slots[0]);
2251 if (found_key.objectid != dirid ||
2252 found_key.type != dir_key.type)
2253 goto next_type;
2255 if (found_key.offset > range_end)
2256 break;
2258 ret = check_item_in_log(trans, root, log, path,
2259 log_path, dir,
2260 &found_key);
2261 if (ret)
2262 goto out;
2263 if (found_key.offset == (u64)-1)
2264 break;
2265 dir_key.offset = found_key.offset + 1;
2267 btrfs_release_path(path);
2268 if (range_end == (u64)-1)
2269 break;
2270 range_start = range_end + 1;
2273 next_type:
2274 ret = 0;
2275 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2276 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2277 dir_key.type = BTRFS_DIR_INDEX_KEY;
2278 btrfs_release_path(path);
2279 goto again;
2281 out:
2282 btrfs_release_path(path);
2283 btrfs_free_path(log_path);
2284 iput(dir);
2285 return ret;
2289 * the process_func used to replay items from the log tree. This
2290 * gets called in two different stages. The first stage just looks
2291 * for inodes and makes sure they are all copied into the subvolume.
2293 * The second stage copies all the other item types from the log into
2294 * the subvolume. The two stage approach is slower, but gets rid of
2295 * lots of complexity around inodes referencing other inodes that exist
2296 * only in the log (references come from either directory items or inode
2297 * back refs).
2299 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2300 struct walk_control *wc, u64 gen)
2302 int nritems;
2303 struct btrfs_path *path;
2304 struct btrfs_root *root = wc->replay_dest;
2305 struct btrfs_key key;
2306 int level;
2307 int i;
2308 int ret;
2310 ret = btrfs_read_buffer(eb, gen);
2311 if (ret)
2312 return ret;
2314 level = btrfs_header_level(eb);
2316 if (level != 0)
2317 return 0;
2319 path = btrfs_alloc_path();
2320 if (!path)
2321 return -ENOMEM;
2323 nritems = btrfs_header_nritems(eb);
2324 for (i = 0; i < nritems; i++) {
2325 btrfs_item_key_to_cpu(eb, &key, i);
2327 /* inode keys are done during the first stage */
2328 if (key.type == BTRFS_INODE_ITEM_KEY &&
2329 wc->stage == LOG_WALK_REPLAY_INODES) {
2330 struct btrfs_inode_item *inode_item;
2331 u32 mode;
2333 inode_item = btrfs_item_ptr(eb, i,
2334 struct btrfs_inode_item);
2335 ret = replay_xattr_deletes(wc->trans, root, log,
2336 path, key.objectid);
2337 if (ret)
2338 break;
2339 mode = btrfs_inode_mode(eb, inode_item);
2340 if (S_ISDIR(mode)) {
2341 ret = replay_dir_deletes(wc->trans,
2342 root, log, path, key.objectid, 0);
2343 if (ret)
2344 break;
2346 ret = overwrite_item(wc->trans, root, path,
2347 eb, i, &key);
2348 if (ret)
2349 break;
2351 /* for regular files, make sure corresponding
2352 * orphan item exist. extents past the new EOF
2353 * will be truncated later by orphan cleanup.
2355 if (S_ISREG(mode)) {
2356 ret = insert_orphan_item(wc->trans, root,
2357 key.objectid);
2358 if (ret)
2359 break;
2362 ret = link_to_fixup_dir(wc->trans, root,
2363 path, key.objectid);
2364 if (ret)
2365 break;
2368 if (key.type == BTRFS_DIR_INDEX_KEY &&
2369 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2370 ret = replay_one_dir_item(wc->trans, root, path,
2371 eb, i, &key);
2372 if (ret)
2373 break;
2376 if (wc->stage < LOG_WALK_REPLAY_ALL)
2377 continue;
2379 /* these keys are simply copied */
2380 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2381 ret = overwrite_item(wc->trans, root, path,
2382 eb, i, &key);
2383 if (ret)
2384 break;
2385 } else if (key.type == BTRFS_INODE_REF_KEY ||
2386 key.type == BTRFS_INODE_EXTREF_KEY) {
2387 ret = add_inode_ref(wc->trans, root, log, path,
2388 eb, i, &key);
2389 if (ret && ret != -ENOENT)
2390 break;
2391 ret = 0;
2392 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2393 ret = replay_one_extent(wc->trans, root, path,
2394 eb, i, &key);
2395 if (ret)
2396 break;
2397 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2398 ret = replay_one_dir_item(wc->trans, root, path,
2399 eb, i, &key);
2400 if (ret)
2401 break;
2404 btrfs_free_path(path);
2405 return ret;
2408 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2409 struct btrfs_root *root,
2410 struct btrfs_path *path, int *level,
2411 struct walk_control *wc)
2413 u64 root_owner;
2414 u64 bytenr;
2415 u64 ptr_gen;
2416 struct extent_buffer *next;
2417 struct extent_buffer *cur;
2418 struct extent_buffer *parent;
2419 u32 blocksize;
2420 int ret = 0;
2422 WARN_ON(*level < 0);
2423 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2425 while (*level > 0) {
2426 WARN_ON(*level < 0);
2427 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2428 cur = path->nodes[*level];
2430 WARN_ON(btrfs_header_level(cur) != *level);
2432 if (path->slots[*level] >=
2433 btrfs_header_nritems(cur))
2434 break;
2436 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2437 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2438 blocksize = root->nodesize;
2440 parent = path->nodes[*level];
2441 root_owner = btrfs_header_owner(parent);
2443 next = btrfs_find_create_tree_block(root, bytenr);
2444 if (IS_ERR(next))
2445 return PTR_ERR(next);
2447 if (*level == 1) {
2448 ret = wc->process_func(root, next, wc, ptr_gen);
2449 if (ret) {
2450 free_extent_buffer(next);
2451 return ret;
2454 path->slots[*level]++;
2455 if (wc->free) {
2456 ret = btrfs_read_buffer(next, ptr_gen);
2457 if (ret) {
2458 free_extent_buffer(next);
2459 return ret;
2462 if (trans) {
2463 btrfs_tree_lock(next);
2464 btrfs_set_lock_blocking(next);
2465 clean_tree_block(trans, root->fs_info,
2466 next);
2467 btrfs_wait_tree_block_writeback(next);
2468 btrfs_tree_unlock(next);
2469 } else {
2470 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2471 clear_extent_buffer_dirty(next);
2474 WARN_ON(root_owner !=
2475 BTRFS_TREE_LOG_OBJECTID);
2476 ret = btrfs_free_and_pin_reserved_extent(root,
2477 bytenr, blocksize);
2478 if (ret) {
2479 free_extent_buffer(next);
2480 return ret;
2483 free_extent_buffer(next);
2484 continue;
2486 ret = btrfs_read_buffer(next, ptr_gen);
2487 if (ret) {
2488 free_extent_buffer(next);
2489 return ret;
2492 WARN_ON(*level <= 0);
2493 if (path->nodes[*level-1])
2494 free_extent_buffer(path->nodes[*level-1]);
2495 path->nodes[*level-1] = next;
2496 *level = btrfs_header_level(next);
2497 path->slots[*level] = 0;
2498 cond_resched();
2500 WARN_ON(*level < 0);
2501 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2503 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2505 cond_resched();
2506 return 0;
2509 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2510 struct btrfs_root *root,
2511 struct btrfs_path *path, int *level,
2512 struct walk_control *wc)
2514 u64 root_owner;
2515 int i;
2516 int slot;
2517 int ret;
2519 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2520 slot = path->slots[i];
2521 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2522 path->slots[i]++;
2523 *level = i;
2524 WARN_ON(*level == 0);
2525 return 0;
2526 } else {
2527 struct extent_buffer *parent;
2528 if (path->nodes[*level] == root->node)
2529 parent = path->nodes[*level];
2530 else
2531 parent = path->nodes[*level + 1];
2533 root_owner = btrfs_header_owner(parent);
2534 ret = wc->process_func(root, path->nodes[*level], wc,
2535 btrfs_header_generation(path->nodes[*level]));
2536 if (ret)
2537 return ret;
2539 if (wc->free) {
2540 struct extent_buffer *next;
2542 next = path->nodes[*level];
2544 if (trans) {
2545 btrfs_tree_lock(next);
2546 btrfs_set_lock_blocking(next);
2547 clean_tree_block(trans, root->fs_info,
2548 next);
2549 btrfs_wait_tree_block_writeback(next);
2550 btrfs_tree_unlock(next);
2551 } else {
2552 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2553 clear_extent_buffer_dirty(next);
2556 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2557 ret = btrfs_free_and_pin_reserved_extent(root,
2558 path->nodes[*level]->start,
2559 path->nodes[*level]->len);
2560 if (ret)
2561 return ret;
2563 free_extent_buffer(path->nodes[*level]);
2564 path->nodes[*level] = NULL;
2565 *level = i + 1;
2568 return 1;
2572 * drop the reference count on the tree rooted at 'snap'. This traverses
2573 * the tree freeing any blocks that have a ref count of zero after being
2574 * decremented.
2576 static int walk_log_tree(struct btrfs_trans_handle *trans,
2577 struct btrfs_root *log, struct walk_control *wc)
2579 int ret = 0;
2580 int wret;
2581 int level;
2582 struct btrfs_path *path;
2583 int orig_level;
2585 path = btrfs_alloc_path();
2586 if (!path)
2587 return -ENOMEM;
2589 level = btrfs_header_level(log->node);
2590 orig_level = level;
2591 path->nodes[level] = log->node;
2592 extent_buffer_get(log->node);
2593 path->slots[level] = 0;
2595 while (1) {
2596 wret = walk_down_log_tree(trans, log, path, &level, wc);
2597 if (wret > 0)
2598 break;
2599 if (wret < 0) {
2600 ret = wret;
2601 goto out;
2604 wret = walk_up_log_tree(trans, log, path, &level, wc);
2605 if (wret > 0)
2606 break;
2607 if (wret < 0) {
2608 ret = wret;
2609 goto out;
2613 /* was the root node processed? if not, catch it here */
2614 if (path->nodes[orig_level]) {
2615 ret = wc->process_func(log, path->nodes[orig_level], wc,
2616 btrfs_header_generation(path->nodes[orig_level]));
2617 if (ret)
2618 goto out;
2619 if (wc->free) {
2620 struct extent_buffer *next;
2622 next = path->nodes[orig_level];
2624 if (trans) {
2625 btrfs_tree_lock(next);
2626 btrfs_set_lock_blocking(next);
2627 clean_tree_block(trans, log->fs_info, next);
2628 btrfs_wait_tree_block_writeback(next);
2629 btrfs_tree_unlock(next);
2630 } else {
2631 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2632 clear_extent_buffer_dirty(next);
2635 WARN_ON(log->root_key.objectid !=
2636 BTRFS_TREE_LOG_OBJECTID);
2637 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2638 next->len);
2639 if (ret)
2640 goto out;
2644 out:
2645 btrfs_free_path(path);
2646 return ret;
2650 * helper function to update the item for a given subvolumes log root
2651 * in the tree of log roots
2653 static int update_log_root(struct btrfs_trans_handle *trans,
2654 struct btrfs_root *log)
2656 int ret;
2658 if (log->log_transid == 1) {
2659 /* insert root item on the first sync */
2660 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2661 &log->root_key, &log->root_item);
2662 } else {
2663 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2664 &log->root_key, &log->root_item);
2666 return ret;
2669 static void wait_log_commit(struct btrfs_root *root, int transid)
2671 DEFINE_WAIT(wait);
2672 int index = transid % 2;
2675 * we only allow two pending log transactions at a time,
2676 * so we know that if ours is more than 2 older than the
2677 * current transaction, we're done
2679 do {
2680 prepare_to_wait(&root->log_commit_wait[index],
2681 &wait, TASK_UNINTERRUPTIBLE);
2682 mutex_unlock(&root->log_mutex);
2684 if (root->log_transid_committed < transid &&
2685 atomic_read(&root->log_commit[index]))
2686 schedule();
2688 finish_wait(&root->log_commit_wait[index], &wait);
2689 mutex_lock(&root->log_mutex);
2690 } while (root->log_transid_committed < transid &&
2691 atomic_read(&root->log_commit[index]));
2694 static void wait_for_writer(struct btrfs_root *root)
2696 DEFINE_WAIT(wait);
2698 while (atomic_read(&root->log_writers)) {
2699 prepare_to_wait(&root->log_writer_wait,
2700 &wait, TASK_UNINTERRUPTIBLE);
2701 mutex_unlock(&root->log_mutex);
2702 if (atomic_read(&root->log_writers))
2703 schedule();
2704 finish_wait(&root->log_writer_wait, &wait);
2705 mutex_lock(&root->log_mutex);
2709 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2710 struct btrfs_log_ctx *ctx)
2712 if (!ctx)
2713 return;
2715 mutex_lock(&root->log_mutex);
2716 list_del_init(&ctx->list);
2717 mutex_unlock(&root->log_mutex);
2721 * Invoked in log mutex context, or be sure there is no other task which
2722 * can access the list.
2724 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2725 int index, int error)
2727 struct btrfs_log_ctx *ctx;
2728 struct btrfs_log_ctx *safe;
2730 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2731 list_del_init(&ctx->list);
2732 ctx->log_ret = error;
2735 INIT_LIST_HEAD(&root->log_ctxs[index]);
2739 * btrfs_sync_log does sends a given tree log down to the disk and
2740 * updates the super blocks to record it. When this call is done,
2741 * you know that any inodes previously logged are safely on disk only
2742 * if it returns 0.
2744 * Any other return value means you need to call btrfs_commit_transaction.
2745 * Some of the edge cases for fsyncing directories that have had unlinks
2746 * or renames done in the past mean that sometimes the only safe
2747 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2748 * that has happened.
2750 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2751 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2753 int index1;
2754 int index2;
2755 int mark;
2756 int ret;
2757 struct btrfs_root *log = root->log_root;
2758 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2759 int log_transid = 0;
2760 struct btrfs_log_ctx root_log_ctx;
2761 struct blk_plug plug;
2763 mutex_lock(&root->log_mutex);
2764 log_transid = ctx->log_transid;
2765 if (root->log_transid_committed >= log_transid) {
2766 mutex_unlock(&root->log_mutex);
2767 return ctx->log_ret;
2770 index1 = log_transid % 2;
2771 if (atomic_read(&root->log_commit[index1])) {
2772 wait_log_commit(root, log_transid);
2773 mutex_unlock(&root->log_mutex);
2774 return ctx->log_ret;
2776 ASSERT(log_transid == root->log_transid);
2777 atomic_set(&root->log_commit[index1], 1);
2779 /* wait for previous tree log sync to complete */
2780 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2781 wait_log_commit(root, log_transid - 1);
2783 while (1) {
2784 int batch = atomic_read(&root->log_batch);
2785 /* when we're on an ssd, just kick the log commit out */
2786 if (!btrfs_test_opt(root->fs_info, SSD) &&
2787 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2788 mutex_unlock(&root->log_mutex);
2789 schedule_timeout_uninterruptible(1);
2790 mutex_lock(&root->log_mutex);
2792 wait_for_writer(root);
2793 if (batch == atomic_read(&root->log_batch))
2794 break;
2797 /* bail out if we need to do a full commit */
2798 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2799 ret = -EAGAIN;
2800 btrfs_free_logged_extents(log, log_transid);
2801 mutex_unlock(&root->log_mutex);
2802 goto out;
2805 if (log_transid % 2 == 0)
2806 mark = EXTENT_DIRTY;
2807 else
2808 mark = EXTENT_NEW;
2810 /* we start IO on all the marked extents here, but we don't actually
2811 * wait for them until later.
2813 blk_start_plug(&plug);
2814 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2815 if (ret) {
2816 blk_finish_plug(&plug);
2817 btrfs_abort_transaction(trans, ret);
2818 btrfs_free_logged_extents(log, log_transid);
2819 btrfs_set_log_full_commit(root->fs_info, trans);
2820 mutex_unlock(&root->log_mutex);
2821 goto out;
2824 btrfs_set_root_node(&log->root_item, log->node);
2826 root->log_transid++;
2827 log->log_transid = root->log_transid;
2828 root->log_start_pid = 0;
2830 * IO has been started, blocks of the log tree have WRITTEN flag set
2831 * in their headers. new modifications of the log will be written to
2832 * new positions. so it's safe to allow log writers to go in.
2834 mutex_unlock(&root->log_mutex);
2836 btrfs_init_log_ctx(&root_log_ctx, NULL);
2838 mutex_lock(&log_root_tree->log_mutex);
2839 atomic_inc(&log_root_tree->log_batch);
2840 atomic_inc(&log_root_tree->log_writers);
2842 index2 = log_root_tree->log_transid % 2;
2843 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2844 root_log_ctx.log_transid = log_root_tree->log_transid;
2846 mutex_unlock(&log_root_tree->log_mutex);
2848 ret = update_log_root(trans, log);
2850 mutex_lock(&log_root_tree->log_mutex);
2851 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2853 * Implicit memory barrier after atomic_dec_and_test
2855 if (waitqueue_active(&log_root_tree->log_writer_wait))
2856 wake_up(&log_root_tree->log_writer_wait);
2859 if (ret) {
2860 if (!list_empty(&root_log_ctx.list))
2861 list_del_init(&root_log_ctx.list);
2863 blk_finish_plug(&plug);
2864 btrfs_set_log_full_commit(root->fs_info, trans);
2866 if (ret != -ENOSPC) {
2867 btrfs_abort_transaction(trans, ret);
2868 mutex_unlock(&log_root_tree->log_mutex);
2869 goto out;
2871 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2872 btrfs_free_logged_extents(log, log_transid);
2873 mutex_unlock(&log_root_tree->log_mutex);
2874 ret = -EAGAIN;
2875 goto out;
2878 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2879 blk_finish_plug(&plug);
2880 list_del_init(&root_log_ctx.list);
2881 mutex_unlock(&log_root_tree->log_mutex);
2882 ret = root_log_ctx.log_ret;
2883 goto out;
2886 index2 = root_log_ctx.log_transid % 2;
2887 if (atomic_read(&log_root_tree->log_commit[index2])) {
2888 blk_finish_plug(&plug);
2889 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
2890 mark);
2891 btrfs_wait_logged_extents(trans, log, log_transid);
2892 wait_log_commit(log_root_tree,
2893 root_log_ctx.log_transid);
2894 mutex_unlock(&log_root_tree->log_mutex);
2895 if (!ret)
2896 ret = root_log_ctx.log_ret;
2897 goto out;
2899 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2900 atomic_set(&log_root_tree->log_commit[index2], 1);
2902 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2903 wait_log_commit(log_root_tree,
2904 root_log_ctx.log_transid - 1);
2907 wait_for_writer(log_root_tree);
2910 * now that we've moved on to the tree of log tree roots,
2911 * check the full commit flag again
2913 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2914 blk_finish_plug(&plug);
2915 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2916 btrfs_free_logged_extents(log, log_transid);
2917 mutex_unlock(&log_root_tree->log_mutex);
2918 ret = -EAGAIN;
2919 goto out_wake_log_root;
2922 ret = btrfs_write_marked_extents(log_root_tree,
2923 &log_root_tree->dirty_log_pages,
2924 EXTENT_DIRTY | EXTENT_NEW);
2925 blk_finish_plug(&plug);
2926 if (ret) {
2927 btrfs_set_log_full_commit(root->fs_info, trans);
2928 btrfs_abort_transaction(trans, ret);
2929 btrfs_free_logged_extents(log, log_transid);
2930 mutex_unlock(&log_root_tree->log_mutex);
2931 goto out_wake_log_root;
2933 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2934 if (!ret)
2935 ret = btrfs_wait_marked_extents(log_root_tree,
2936 &log_root_tree->dirty_log_pages,
2937 EXTENT_NEW | EXTENT_DIRTY);
2938 if (ret) {
2939 btrfs_set_log_full_commit(root->fs_info, trans);
2940 btrfs_free_logged_extents(log, log_transid);
2941 mutex_unlock(&log_root_tree->log_mutex);
2942 goto out_wake_log_root;
2944 btrfs_wait_logged_extents(trans, log, log_transid);
2946 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2947 log_root_tree->node->start);
2948 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2949 btrfs_header_level(log_root_tree->node));
2951 log_root_tree->log_transid++;
2952 mutex_unlock(&log_root_tree->log_mutex);
2955 * nobody else is going to jump in and write the the ctree
2956 * super here because the log_commit atomic below is protecting
2957 * us. We must be called with a transaction handle pinning
2958 * the running transaction open, so a full commit can't hop
2959 * in and cause problems either.
2961 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2962 if (ret) {
2963 btrfs_set_log_full_commit(root->fs_info, trans);
2964 btrfs_abort_transaction(trans, ret);
2965 goto out_wake_log_root;
2968 mutex_lock(&root->log_mutex);
2969 if (root->last_log_commit < log_transid)
2970 root->last_log_commit = log_transid;
2971 mutex_unlock(&root->log_mutex);
2973 out_wake_log_root:
2974 mutex_lock(&log_root_tree->log_mutex);
2975 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2977 log_root_tree->log_transid_committed++;
2978 atomic_set(&log_root_tree->log_commit[index2], 0);
2979 mutex_unlock(&log_root_tree->log_mutex);
2982 * The barrier before waitqueue_active is needed so all the updates
2983 * above are seen by the woken threads. It might not be necessary, but
2984 * proving that seems to be hard.
2986 smp_mb();
2987 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2988 wake_up(&log_root_tree->log_commit_wait[index2]);
2989 out:
2990 mutex_lock(&root->log_mutex);
2991 btrfs_remove_all_log_ctxs(root, index1, ret);
2992 root->log_transid_committed++;
2993 atomic_set(&root->log_commit[index1], 0);
2994 mutex_unlock(&root->log_mutex);
2997 * The barrier before waitqueue_active is needed so all the updates
2998 * above are seen by the woken threads. It might not be necessary, but
2999 * proving that seems to be hard.
3001 smp_mb();
3002 if (waitqueue_active(&root->log_commit_wait[index1]))
3003 wake_up(&root->log_commit_wait[index1]);
3004 return ret;
3007 static void free_log_tree(struct btrfs_trans_handle *trans,
3008 struct btrfs_root *log)
3010 int ret;
3011 u64 start;
3012 u64 end;
3013 struct walk_control wc = {
3014 .free = 1,
3015 .process_func = process_one_buffer
3018 ret = walk_log_tree(trans, log, &wc);
3019 if (ret) {
3020 if (trans)
3021 btrfs_abort_transaction(trans, ret);
3022 else
3023 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3026 while (1) {
3027 ret = find_first_extent_bit(&log->dirty_log_pages,
3028 0, &start, &end,
3029 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
3030 NULL);
3031 if (ret)
3032 break;
3034 clear_extent_bits(&log->dirty_log_pages, start, end,
3035 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3039 * We may have short-circuited the log tree with the full commit logic
3040 * and left ordered extents on our list, so clear these out to keep us
3041 * from leaking inodes and memory.
3043 btrfs_free_logged_extents(log, 0);
3044 btrfs_free_logged_extents(log, 1);
3046 free_extent_buffer(log->node);
3047 kfree(log);
3051 * free all the extents used by the tree log. This should be called
3052 * at commit time of the full transaction
3054 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3056 if (root->log_root) {
3057 free_log_tree(trans, root->log_root);
3058 root->log_root = NULL;
3060 return 0;
3063 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3064 struct btrfs_fs_info *fs_info)
3066 if (fs_info->log_root_tree) {
3067 free_log_tree(trans, fs_info->log_root_tree);
3068 fs_info->log_root_tree = NULL;
3070 return 0;
3074 * If both a file and directory are logged, and unlinks or renames are
3075 * mixed in, we have a few interesting corners:
3077 * create file X in dir Y
3078 * link file X to X.link in dir Y
3079 * fsync file X
3080 * unlink file X but leave X.link
3081 * fsync dir Y
3083 * After a crash we would expect only X.link to exist. But file X
3084 * didn't get fsync'd again so the log has back refs for X and X.link.
3086 * We solve this by removing directory entries and inode backrefs from the
3087 * log when a file that was logged in the current transaction is
3088 * unlinked. Any later fsync will include the updated log entries, and
3089 * we'll be able to reconstruct the proper directory items from backrefs.
3091 * This optimizations allows us to avoid relogging the entire inode
3092 * or the entire directory.
3094 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3095 struct btrfs_root *root,
3096 const char *name, int name_len,
3097 struct inode *dir, u64 index)
3099 struct btrfs_root *log;
3100 struct btrfs_dir_item *di;
3101 struct btrfs_path *path;
3102 int ret;
3103 int err = 0;
3104 int bytes_del = 0;
3105 u64 dir_ino = btrfs_ino(dir);
3107 if (BTRFS_I(dir)->logged_trans < trans->transid)
3108 return 0;
3110 ret = join_running_log_trans(root);
3111 if (ret)
3112 return 0;
3114 mutex_lock(&BTRFS_I(dir)->log_mutex);
3116 log = root->log_root;
3117 path = btrfs_alloc_path();
3118 if (!path) {
3119 err = -ENOMEM;
3120 goto out_unlock;
3123 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3124 name, name_len, -1);
3125 if (IS_ERR(di)) {
3126 err = PTR_ERR(di);
3127 goto fail;
3129 if (di) {
3130 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3131 bytes_del += name_len;
3132 if (ret) {
3133 err = ret;
3134 goto fail;
3137 btrfs_release_path(path);
3138 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3139 index, name, name_len, -1);
3140 if (IS_ERR(di)) {
3141 err = PTR_ERR(di);
3142 goto fail;
3144 if (di) {
3145 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3146 bytes_del += name_len;
3147 if (ret) {
3148 err = ret;
3149 goto fail;
3153 /* update the directory size in the log to reflect the names
3154 * we have removed
3156 if (bytes_del) {
3157 struct btrfs_key key;
3159 key.objectid = dir_ino;
3160 key.offset = 0;
3161 key.type = BTRFS_INODE_ITEM_KEY;
3162 btrfs_release_path(path);
3164 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3165 if (ret < 0) {
3166 err = ret;
3167 goto fail;
3169 if (ret == 0) {
3170 struct btrfs_inode_item *item;
3171 u64 i_size;
3173 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3174 struct btrfs_inode_item);
3175 i_size = btrfs_inode_size(path->nodes[0], item);
3176 if (i_size > bytes_del)
3177 i_size -= bytes_del;
3178 else
3179 i_size = 0;
3180 btrfs_set_inode_size(path->nodes[0], item, i_size);
3181 btrfs_mark_buffer_dirty(path->nodes[0]);
3182 } else
3183 ret = 0;
3184 btrfs_release_path(path);
3186 fail:
3187 btrfs_free_path(path);
3188 out_unlock:
3189 mutex_unlock(&BTRFS_I(dir)->log_mutex);
3190 if (ret == -ENOSPC) {
3191 btrfs_set_log_full_commit(root->fs_info, trans);
3192 ret = 0;
3193 } else if (ret < 0)
3194 btrfs_abort_transaction(trans, ret);
3196 btrfs_end_log_trans(root);
3198 return err;
3201 /* see comments for btrfs_del_dir_entries_in_log */
3202 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3203 struct btrfs_root *root,
3204 const char *name, int name_len,
3205 struct inode *inode, u64 dirid)
3207 struct btrfs_root *log;
3208 u64 index;
3209 int ret;
3211 if (BTRFS_I(inode)->logged_trans < trans->transid)
3212 return 0;
3214 ret = join_running_log_trans(root);
3215 if (ret)
3216 return 0;
3217 log = root->log_root;
3218 mutex_lock(&BTRFS_I(inode)->log_mutex);
3220 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3221 dirid, &index);
3222 mutex_unlock(&BTRFS_I(inode)->log_mutex);
3223 if (ret == -ENOSPC) {
3224 btrfs_set_log_full_commit(root->fs_info, trans);
3225 ret = 0;
3226 } else if (ret < 0 && ret != -ENOENT)
3227 btrfs_abort_transaction(trans, ret);
3228 btrfs_end_log_trans(root);
3230 return ret;
3234 * creates a range item in the log for 'dirid'. first_offset and
3235 * last_offset tell us which parts of the key space the log should
3236 * be considered authoritative for.
3238 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3239 struct btrfs_root *log,
3240 struct btrfs_path *path,
3241 int key_type, u64 dirid,
3242 u64 first_offset, u64 last_offset)
3244 int ret;
3245 struct btrfs_key key;
3246 struct btrfs_dir_log_item *item;
3248 key.objectid = dirid;
3249 key.offset = first_offset;
3250 if (key_type == BTRFS_DIR_ITEM_KEY)
3251 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3252 else
3253 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3254 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3255 if (ret)
3256 return ret;
3258 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3259 struct btrfs_dir_log_item);
3260 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3261 btrfs_mark_buffer_dirty(path->nodes[0]);
3262 btrfs_release_path(path);
3263 return 0;
3267 * log all the items included in the current transaction for a given
3268 * directory. This also creates the range items in the log tree required
3269 * to replay anything deleted before the fsync
3271 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3272 struct btrfs_root *root, struct inode *inode,
3273 struct btrfs_path *path,
3274 struct btrfs_path *dst_path, int key_type,
3275 struct btrfs_log_ctx *ctx,
3276 u64 min_offset, u64 *last_offset_ret)
3278 struct btrfs_key min_key;
3279 struct btrfs_root *log = root->log_root;
3280 struct extent_buffer *src;
3281 int err = 0;
3282 int ret;
3283 int i;
3284 int nritems;
3285 u64 first_offset = min_offset;
3286 u64 last_offset = (u64)-1;
3287 u64 ino = btrfs_ino(inode);
3289 log = root->log_root;
3291 min_key.objectid = ino;
3292 min_key.type = key_type;
3293 min_key.offset = min_offset;
3295 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3298 * we didn't find anything from this transaction, see if there
3299 * is anything at all
3301 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3302 min_key.objectid = ino;
3303 min_key.type = key_type;
3304 min_key.offset = (u64)-1;
3305 btrfs_release_path(path);
3306 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3307 if (ret < 0) {
3308 btrfs_release_path(path);
3309 return ret;
3311 ret = btrfs_previous_item(root, path, ino, key_type);
3313 /* if ret == 0 there are items for this type,
3314 * create a range to tell us the last key of this type.
3315 * otherwise, there are no items in this directory after
3316 * *min_offset, and we create a range to indicate that.
3318 if (ret == 0) {
3319 struct btrfs_key tmp;
3320 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3321 path->slots[0]);
3322 if (key_type == tmp.type)
3323 first_offset = max(min_offset, tmp.offset) + 1;
3325 goto done;
3328 /* go backward to find any previous key */
3329 ret = btrfs_previous_item(root, path, ino, key_type);
3330 if (ret == 0) {
3331 struct btrfs_key tmp;
3332 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3333 if (key_type == tmp.type) {
3334 first_offset = tmp.offset;
3335 ret = overwrite_item(trans, log, dst_path,
3336 path->nodes[0], path->slots[0],
3337 &tmp);
3338 if (ret) {
3339 err = ret;
3340 goto done;
3344 btrfs_release_path(path);
3347 * Find the first key from this transaction again. See the note for
3348 * log_new_dir_dentries, if we're logging a directory recursively we
3349 * won't be holding its i_mutex, which means we can modify the directory
3350 * while we're logging it. If we remove an entry between our first
3351 * search and this search we'll not find the key again and can just
3352 * bail.
3354 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3355 if (ret != 0)
3356 goto done;
3359 * we have a block from this transaction, log every item in it
3360 * from our directory
3362 while (1) {
3363 struct btrfs_key tmp;
3364 src = path->nodes[0];
3365 nritems = btrfs_header_nritems(src);
3366 for (i = path->slots[0]; i < nritems; i++) {
3367 struct btrfs_dir_item *di;
3369 btrfs_item_key_to_cpu(src, &min_key, i);
3371 if (min_key.objectid != ino || min_key.type != key_type)
3372 goto done;
3373 ret = overwrite_item(trans, log, dst_path, src, i,
3374 &min_key);
3375 if (ret) {
3376 err = ret;
3377 goto done;
3381 * We must make sure that when we log a directory entry,
3382 * the corresponding inode, after log replay, has a
3383 * matching link count. For example:
3385 * touch foo
3386 * mkdir mydir
3387 * sync
3388 * ln foo mydir/bar
3389 * xfs_io -c "fsync" mydir
3390 * <crash>
3391 * <mount fs and log replay>
3393 * Would result in a fsync log that when replayed, our
3394 * file inode would have a link count of 1, but we get
3395 * two directory entries pointing to the same inode.
3396 * After removing one of the names, it would not be
3397 * possible to remove the other name, which resulted
3398 * always in stale file handle errors, and would not
3399 * be possible to rmdir the parent directory, since
3400 * its i_size could never decrement to the value
3401 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3403 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3404 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3405 if (ctx &&
3406 (btrfs_dir_transid(src, di) == trans->transid ||
3407 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3408 tmp.type != BTRFS_ROOT_ITEM_KEY)
3409 ctx->log_new_dentries = true;
3411 path->slots[0] = nritems;
3414 * look ahead to the next item and see if it is also
3415 * from this directory and from this transaction
3417 ret = btrfs_next_leaf(root, path);
3418 if (ret) {
3419 if (ret == 1)
3420 last_offset = (u64)-1;
3421 else
3422 err = ret;
3423 goto done;
3425 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3426 if (tmp.objectid != ino || tmp.type != key_type) {
3427 last_offset = (u64)-1;
3428 goto done;
3430 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3431 ret = overwrite_item(trans, log, dst_path,
3432 path->nodes[0], path->slots[0],
3433 &tmp);
3434 if (ret)
3435 err = ret;
3436 else
3437 last_offset = tmp.offset;
3438 goto done;
3441 done:
3442 btrfs_release_path(path);
3443 btrfs_release_path(dst_path);
3445 if (err == 0) {
3446 *last_offset_ret = last_offset;
3448 * insert the log range keys to indicate where the log
3449 * is valid
3451 ret = insert_dir_log_key(trans, log, path, key_type,
3452 ino, first_offset, last_offset);
3453 if (ret)
3454 err = ret;
3456 return err;
3460 * logging directories is very similar to logging inodes, We find all the items
3461 * from the current transaction and write them to the log.
3463 * The recovery code scans the directory in the subvolume, and if it finds a
3464 * key in the range logged that is not present in the log tree, then it means
3465 * that dir entry was unlinked during the transaction.
3467 * In order for that scan to work, we must include one key smaller than
3468 * the smallest logged by this transaction and one key larger than the largest
3469 * key logged by this transaction.
3471 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3472 struct btrfs_root *root, struct inode *inode,
3473 struct btrfs_path *path,
3474 struct btrfs_path *dst_path,
3475 struct btrfs_log_ctx *ctx)
3477 u64 min_key;
3478 u64 max_key;
3479 int ret;
3480 int key_type = BTRFS_DIR_ITEM_KEY;
3482 again:
3483 min_key = 0;
3484 max_key = 0;
3485 while (1) {
3486 ret = log_dir_items(trans, root, inode, path,
3487 dst_path, key_type, ctx, min_key,
3488 &max_key);
3489 if (ret)
3490 return ret;
3491 if (max_key == (u64)-1)
3492 break;
3493 min_key = max_key + 1;
3496 if (key_type == BTRFS_DIR_ITEM_KEY) {
3497 key_type = BTRFS_DIR_INDEX_KEY;
3498 goto again;
3500 return 0;
3504 * a helper function to drop items from the log before we relog an
3505 * inode. max_key_type indicates the highest item type to remove.
3506 * This cannot be run for file data extents because it does not
3507 * free the extents they point to.
3509 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3510 struct btrfs_root *log,
3511 struct btrfs_path *path,
3512 u64 objectid, int max_key_type)
3514 int ret;
3515 struct btrfs_key key;
3516 struct btrfs_key found_key;
3517 int start_slot;
3519 key.objectid = objectid;
3520 key.type = max_key_type;
3521 key.offset = (u64)-1;
3523 while (1) {
3524 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3525 BUG_ON(ret == 0); /* Logic error */
3526 if (ret < 0)
3527 break;
3529 if (path->slots[0] == 0)
3530 break;
3532 path->slots[0]--;
3533 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3534 path->slots[0]);
3536 if (found_key.objectid != objectid)
3537 break;
3539 found_key.offset = 0;
3540 found_key.type = 0;
3541 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3542 &start_slot);
3544 ret = btrfs_del_items(trans, log, path, start_slot,
3545 path->slots[0] - start_slot + 1);
3547 * If start slot isn't 0 then we don't need to re-search, we've
3548 * found the last guy with the objectid in this tree.
3550 if (ret || start_slot != 0)
3551 break;
3552 btrfs_release_path(path);
3554 btrfs_release_path(path);
3555 if (ret > 0)
3556 ret = 0;
3557 return ret;
3560 static void fill_inode_item(struct btrfs_trans_handle *trans,
3561 struct extent_buffer *leaf,
3562 struct btrfs_inode_item *item,
3563 struct inode *inode, int log_inode_only,
3564 u64 logged_isize)
3566 struct btrfs_map_token token;
3568 btrfs_init_map_token(&token);
3570 if (log_inode_only) {
3571 /* set the generation to zero so the recover code
3572 * can tell the difference between an logging
3573 * just to say 'this inode exists' and a logging
3574 * to say 'update this inode with these values'
3576 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3577 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3578 } else {
3579 btrfs_set_token_inode_generation(leaf, item,
3580 BTRFS_I(inode)->generation,
3581 &token);
3582 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3585 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3586 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3587 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3588 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3590 btrfs_set_token_timespec_sec(leaf, &item->atime,
3591 inode->i_atime.tv_sec, &token);
3592 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3593 inode->i_atime.tv_nsec, &token);
3595 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3596 inode->i_mtime.tv_sec, &token);
3597 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3598 inode->i_mtime.tv_nsec, &token);
3600 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3601 inode->i_ctime.tv_sec, &token);
3602 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3603 inode->i_ctime.tv_nsec, &token);
3605 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3606 &token);
3608 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3609 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3610 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3611 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3612 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3615 static int log_inode_item(struct btrfs_trans_handle *trans,
3616 struct btrfs_root *log, struct btrfs_path *path,
3617 struct inode *inode)
3619 struct btrfs_inode_item *inode_item;
3620 int ret;
3622 ret = btrfs_insert_empty_item(trans, log, path,
3623 &BTRFS_I(inode)->location,
3624 sizeof(*inode_item));
3625 if (ret && ret != -EEXIST)
3626 return ret;
3627 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3628 struct btrfs_inode_item);
3629 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
3630 btrfs_release_path(path);
3631 return 0;
3634 static noinline int copy_items(struct btrfs_trans_handle *trans,
3635 struct inode *inode,
3636 struct btrfs_path *dst_path,
3637 struct btrfs_path *src_path, u64 *last_extent,
3638 int start_slot, int nr, int inode_only,
3639 u64 logged_isize)
3641 unsigned long src_offset;
3642 unsigned long dst_offset;
3643 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3644 struct btrfs_file_extent_item *extent;
3645 struct btrfs_inode_item *inode_item;
3646 struct extent_buffer *src = src_path->nodes[0];
3647 struct btrfs_key first_key, last_key, key;
3648 int ret;
3649 struct btrfs_key *ins_keys;
3650 u32 *ins_sizes;
3651 char *ins_data;
3652 int i;
3653 struct list_head ordered_sums;
3654 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3655 bool has_extents = false;
3656 bool need_find_last_extent = true;
3657 bool done = false;
3659 INIT_LIST_HEAD(&ordered_sums);
3661 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3662 nr * sizeof(u32), GFP_NOFS);
3663 if (!ins_data)
3664 return -ENOMEM;
3666 first_key.objectid = (u64)-1;
3668 ins_sizes = (u32 *)ins_data;
3669 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3671 for (i = 0; i < nr; i++) {
3672 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3673 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3675 ret = btrfs_insert_empty_items(trans, log, dst_path,
3676 ins_keys, ins_sizes, nr);
3677 if (ret) {
3678 kfree(ins_data);
3679 return ret;
3682 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3683 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3684 dst_path->slots[0]);
3686 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3688 if (i == nr - 1)
3689 last_key = ins_keys[i];
3691 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3692 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3693 dst_path->slots[0],
3694 struct btrfs_inode_item);
3695 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3696 inode, inode_only == LOG_INODE_EXISTS,
3697 logged_isize);
3698 } else {
3699 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3700 src_offset, ins_sizes[i]);
3704 * We set need_find_last_extent here in case we know we were
3705 * processing other items and then walk into the first extent in
3706 * the inode. If we don't hit an extent then nothing changes,
3707 * we'll do the last search the next time around.
3709 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3710 has_extents = true;
3711 if (first_key.objectid == (u64)-1)
3712 first_key = ins_keys[i];
3713 } else {
3714 need_find_last_extent = false;
3717 /* take a reference on file data extents so that truncates
3718 * or deletes of this inode don't have to relog the inode
3719 * again
3721 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3722 !skip_csum) {
3723 int found_type;
3724 extent = btrfs_item_ptr(src, start_slot + i,
3725 struct btrfs_file_extent_item);
3727 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3728 continue;
3730 found_type = btrfs_file_extent_type(src, extent);
3731 if (found_type == BTRFS_FILE_EXTENT_REG) {
3732 u64 ds, dl, cs, cl;
3733 ds = btrfs_file_extent_disk_bytenr(src,
3734 extent);
3735 /* ds == 0 is a hole */
3736 if (ds == 0)
3737 continue;
3739 dl = btrfs_file_extent_disk_num_bytes(src,
3740 extent);
3741 cs = btrfs_file_extent_offset(src, extent);
3742 cl = btrfs_file_extent_num_bytes(src,
3743 extent);
3744 if (btrfs_file_extent_compression(src,
3745 extent)) {
3746 cs = 0;
3747 cl = dl;
3750 ret = btrfs_lookup_csums_range(
3751 log->fs_info->csum_root,
3752 ds + cs, ds + cs + cl - 1,
3753 &ordered_sums, 0);
3754 if (ret) {
3755 btrfs_release_path(dst_path);
3756 kfree(ins_data);
3757 return ret;
3763 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3764 btrfs_release_path(dst_path);
3765 kfree(ins_data);
3768 * we have to do this after the loop above to avoid changing the
3769 * log tree while trying to change the log tree.
3771 ret = 0;
3772 while (!list_empty(&ordered_sums)) {
3773 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3774 struct btrfs_ordered_sum,
3775 list);
3776 if (!ret)
3777 ret = btrfs_csum_file_blocks(trans, log, sums);
3778 list_del(&sums->list);
3779 kfree(sums);
3782 if (!has_extents)
3783 return ret;
3785 if (need_find_last_extent && *last_extent == first_key.offset) {
3787 * We don't have any leafs between our current one and the one
3788 * we processed before that can have file extent items for our
3789 * inode (and have a generation number smaller than our current
3790 * transaction id).
3792 need_find_last_extent = false;
3796 * Because we use btrfs_search_forward we could skip leaves that were
3797 * not modified and then assume *last_extent is valid when it really
3798 * isn't. So back up to the previous leaf and read the end of the last
3799 * extent before we go and fill in holes.
3801 if (need_find_last_extent) {
3802 u64 len;
3804 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3805 if (ret < 0)
3806 return ret;
3807 if (ret)
3808 goto fill_holes;
3809 if (src_path->slots[0])
3810 src_path->slots[0]--;
3811 src = src_path->nodes[0];
3812 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3813 if (key.objectid != btrfs_ino(inode) ||
3814 key.type != BTRFS_EXTENT_DATA_KEY)
3815 goto fill_holes;
3816 extent = btrfs_item_ptr(src, src_path->slots[0],
3817 struct btrfs_file_extent_item);
3818 if (btrfs_file_extent_type(src, extent) ==
3819 BTRFS_FILE_EXTENT_INLINE) {
3820 len = btrfs_file_extent_inline_len(src,
3821 src_path->slots[0],
3822 extent);
3823 *last_extent = ALIGN(key.offset + len,
3824 log->sectorsize);
3825 } else {
3826 len = btrfs_file_extent_num_bytes(src, extent);
3827 *last_extent = key.offset + len;
3830 fill_holes:
3831 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3832 * things could have happened
3834 * 1) A merge could have happened, so we could currently be on a leaf
3835 * that holds what we were copying in the first place.
3836 * 2) A split could have happened, and now not all of the items we want
3837 * are on the same leaf.
3839 * So we need to adjust how we search for holes, we need to drop the
3840 * path and re-search for the first extent key we found, and then walk
3841 * forward until we hit the last one we copied.
3843 if (need_find_last_extent) {
3844 /* btrfs_prev_leaf could return 1 without releasing the path */
3845 btrfs_release_path(src_path);
3846 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3847 src_path, 0, 0);
3848 if (ret < 0)
3849 return ret;
3850 ASSERT(ret == 0);
3851 src = src_path->nodes[0];
3852 i = src_path->slots[0];
3853 } else {
3854 i = start_slot;
3858 * Ok so here we need to go through and fill in any holes we may have
3859 * to make sure that holes are punched for those areas in case they had
3860 * extents previously.
3862 while (!done) {
3863 u64 offset, len;
3864 u64 extent_end;
3866 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3867 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3868 if (ret < 0)
3869 return ret;
3870 ASSERT(ret == 0);
3871 src = src_path->nodes[0];
3872 i = 0;
3873 need_find_last_extent = true;
3876 btrfs_item_key_to_cpu(src, &key, i);
3877 if (!btrfs_comp_cpu_keys(&key, &last_key))
3878 done = true;
3879 if (key.objectid != btrfs_ino(inode) ||
3880 key.type != BTRFS_EXTENT_DATA_KEY) {
3881 i++;
3882 continue;
3884 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3885 if (btrfs_file_extent_type(src, extent) ==
3886 BTRFS_FILE_EXTENT_INLINE) {
3887 len = btrfs_file_extent_inline_len(src, i, extent);
3888 extent_end = ALIGN(key.offset + len, log->sectorsize);
3889 } else {
3890 len = btrfs_file_extent_num_bytes(src, extent);
3891 extent_end = key.offset + len;
3893 i++;
3895 if (*last_extent == key.offset) {
3896 *last_extent = extent_end;
3897 continue;
3899 offset = *last_extent;
3900 len = key.offset - *last_extent;
3901 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3902 offset, 0, 0, len, 0, len, 0,
3903 0, 0);
3904 if (ret)
3905 break;
3906 *last_extent = extent_end;
3909 * Need to let the callers know we dropped the path so they should
3910 * re-search.
3912 if (!ret && need_find_last_extent)
3913 ret = 1;
3914 return ret;
3917 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3919 struct extent_map *em1, *em2;
3921 em1 = list_entry(a, struct extent_map, list);
3922 em2 = list_entry(b, struct extent_map, list);
3924 if (em1->start < em2->start)
3925 return -1;
3926 else if (em1->start > em2->start)
3927 return 1;
3928 return 0;
3931 static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3932 struct inode *inode,
3933 struct btrfs_root *root,
3934 const struct extent_map *em,
3935 const struct list_head *logged_list,
3936 bool *ordered_io_error)
3938 struct btrfs_ordered_extent *ordered;
3939 struct btrfs_root *log = root->log_root;
3940 u64 mod_start = em->mod_start;
3941 u64 mod_len = em->mod_len;
3942 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3943 u64 csum_offset;
3944 u64 csum_len;
3945 LIST_HEAD(ordered_sums);
3946 int ret = 0;
3948 *ordered_io_error = false;
3950 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3951 em->block_start == EXTENT_MAP_HOLE)
3952 return 0;
3955 * Wait far any ordered extent that covers our extent map. If it
3956 * finishes without an error, first check and see if our csums are on
3957 * our outstanding ordered extents.
3959 list_for_each_entry(ordered, logged_list, log_list) {
3960 struct btrfs_ordered_sum *sum;
3962 if (!mod_len)
3963 break;
3965 if (ordered->file_offset + ordered->len <= mod_start ||
3966 mod_start + mod_len <= ordered->file_offset)
3967 continue;
3969 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3970 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3971 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3972 const u64 start = ordered->file_offset;
3973 const u64 end = ordered->file_offset + ordered->len - 1;
3975 WARN_ON(ordered->inode != inode);
3976 filemap_fdatawrite_range(inode->i_mapping, start, end);
3979 wait_event(ordered->wait,
3980 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3981 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3983 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3985 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3986 * i_mapping flags, so that the next fsync won't get
3987 * an outdated io error too.
3989 filemap_check_errors(inode->i_mapping);
3990 *ordered_io_error = true;
3991 break;
3994 * We are going to copy all the csums on this ordered extent, so
3995 * go ahead and adjust mod_start and mod_len in case this
3996 * ordered extent has already been logged.
3998 if (ordered->file_offset > mod_start) {
3999 if (ordered->file_offset + ordered->len >=
4000 mod_start + mod_len)
4001 mod_len = ordered->file_offset - mod_start;
4003 * If we have this case
4005 * |--------- logged extent ---------|
4006 * |----- ordered extent ----|
4008 * Just don't mess with mod_start and mod_len, we'll
4009 * just end up logging more csums than we need and it
4010 * will be ok.
4012 } else {
4013 if (ordered->file_offset + ordered->len <
4014 mod_start + mod_len) {
4015 mod_len = (mod_start + mod_len) -
4016 (ordered->file_offset + ordered->len);
4017 mod_start = ordered->file_offset +
4018 ordered->len;
4019 } else {
4020 mod_len = 0;
4024 if (skip_csum)
4025 continue;
4028 * To keep us from looping for the above case of an ordered
4029 * extent that falls inside of the logged extent.
4031 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
4032 &ordered->flags))
4033 continue;
4035 list_for_each_entry(sum, &ordered->list, list) {
4036 ret = btrfs_csum_file_blocks(trans, log, sum);
4037 if (ret)
4038 break;
4042 if (*ordered_io_error || !mod_len || ret || skip_csum)
4043 return ret;
4045 if (em->compress_type) {
4046 csum_offset = 0;
4047 csum_len = max(em->block_len, em->orig_block_len);
4048 } else {
4049 csum_offset = mod_start - em->start;
4050 csum_len = mod_len;
4053 /* block start is already adjusted for the file extent offset. */
4054 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
4055 em->block_start + csum_offset,
4056 em->block_start + csum_offset +
4057 csum_len - 1, &ordered_sums, 0);
4058 if (ret)
4059 return ret;
4061 while (!list_empty(&ordered_sums)) {
4062 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4063 struct btrfs_ordered_sum,
4064 list);
4065 if (!ret)
4066 ret = btrfs_csum_file_blocks(trans, log, sums);
4067 list_del(&sums->list);
4068 kfree(sums);
4071 return ret;
4074 static int log_one_extent(struct btrfs_trans_handle *trans,
4075 struct inode *inode, struct btrfs_root *root,
4076 const struct extent_map *em,
4077 struct btrfs_path *path,
4078 const struct list_head *logged_list,
4079 struct btrfs_log_ctx *ctx)
4081 struct btrfs_root *log = root->log_root;
4082 struct btrfs_file_extent_item *fi;
4083 struct extent_buffer *leaf;
4084 struct btrfs_map_token token;
4085 struct btrfs_key key;
4086 u64 extent_offset = em->start - em->orig_start;
4087 u64 block_len;
4088 int ret;
4089 int extent_inserted = 0;
4090 bool ordered_io_err = false;
4092 ret = wait_ordered_extents(trans, inode, root, em, logged_list,
4093 &ordered_io_err);
4094 if (ret)
4095 return ret;
4097 if (ordered_io_err) {
4098 ctx->io_err = -EIO;
4099 return 0;
4102 btrfs_init_map_token(&token);
4104 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
4105 em->start + em->len, NULL, 0, 1,
4106 sizeof(*fi), &extent_inserted);
4107 if (ret)
4108 return ret;
4110 if (!extent_inserted) {
4111 key.objectid = btrfs_ino(inode);
4112 key.type = BTRFS_EXTENT_DATA_KEY;
4113 key.offset = em->start;
4115 ret = btrfs_insert_empty_item(trans, log, path, &key,
4116 sizeof(*fi));
4117 if (ret)
4118 return ret;
4120 leaf = path->nodes[0];
4121 fi = btrfs_item_ptr(leaf, path->slots[0],
4122 struct btrfs_file_extent_item);
4124 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4125 &token);
4126 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4127 btrfs_set_token_file_extent_type(leaf, fi,
4128 BTRFS_FILE_EXTENT_PREALLOC,
4129 &token);
4130 else
4131 btrfs_set_token_file_extent_type(leaf, fi,
4132 BTRFS_FILE_EXTENT_REG,
4133 &token);
4135 block_len = max(em->block_len, em->orig_block_len);
4136 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4137 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4138 em->block_start,
4139 &token);
4140 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4141 &token);
4142 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4143 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4144 em->block_start -
4145 extent_offset, &token);
4146 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4147 &token);
4148 } else {
4149 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4150 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4151 &token);
4154 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4155 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4156 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4157 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4158 &token);
4159 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4160 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4161 btrfs_mark_buffer_dirty(leaf);
4163 btrfs_release_path(path);
4165 return ret;
4168 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4169 struct btrfs_root *root,
4170 struct inode *inode,
4171 struct btrfs_path *path,
4172 struct list_head *logged_list,
4173 struct btrfs_log_ctx *ctx,
4174 const u64 start,
4175 const u64 end)
4177 struct extent_map *em, *n;
4178 struct list_head extents;
4179 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
4180 u64 test_gen;
4181 int ret = 0;
4182 int num = 0;
4184 INIT_LIST_HEAD(&extents);
4186 down_write(&BTRFS_I(inode)->dio_sem);
4187 write_lock(&tree->lock);
4188 test_gen = root->fs_info->last_trans_committed;
4190 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4191 list_del_init(&em->list);
4194 * Just an arbitrary number, this can be really CPU intensive
4195 * once we start getting a lot of extents, and really once we
4196 * have a bunch of extents we just want to commit since it will
4197 * be faster.
4199 if (++num > 32768) {
4200 list_del_init(&tree->modified_extents);
4201 ret = -EFBIG;
4202 goto process;
4205 if (em->generation <= test_gen)
4206 continue;
4207 /* Need a ref to keep it from getting evicted from cache */
4208 atomic_inc(&em->refs);
4209 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4210 list_add_tail(&em->list, &extents);
4211 num++;
4214 list_sort(NULL, &extents, extent_cmp);
4215 btrfs_get_logged_extents(inode, logged_list, start, end);
4217 * Some ordered extents started by fsync might have completed
4218 * before we could collect them into the list logged_list, which
4219 * means they're gone, not in our logged_list nor in the inode's
4220 * ordered tree. We want the application/user space to know an
4221 * error happened while attempting to persist file data so that
4222 * it can take proper action. If such error happened, we leave
4223 * without writing to the log tree and the fsync must report the
4224 * file data write error and not commit the current transaction.
4226 ret = filemap_check_errors(inode->i_mapping);
4227 if (ret)
4228 ctx->io_err = ret;
4229 process:
4230 while (!list_empty(&extents)) {
4231 em = list_entry(extents.next, struct extent_map, list);
4233 list_del_init(&em->list);
4236 * If we had an error we just need to delete everybody from our
4237 * private list.
4239 if (ret) {
4240 clear_em_logging(tree, em);
4241 free_extent_map(em);
4242 continue;
4245 write_unlock(&tree->lock);
4247 ret = log_one_extent(trans, inode, root, em, path, logged_list,
4248 ctx);
4249 write_lock(&tree->lock);
4250 clear_em_logging(tree, em);
4251 free_extent_map(em);
4253 WARN_ON(!list_empty(&extents));
4254 write_unlock(&tree->lock);
4255 up_write(&BTRFS_I(inode)->dio_sem);
4257 btrfs_release_path(path);
4258 return ret;
4261 static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
4262 struct btrfs_path *path, u64 *size_ret)
4264 struct btrfs_key key;
4265 int ret;
4267 key.objectid = btrfs_ino(inode);
4268 key.type = BTRFS_INODE_ITEM_KEY;
4269 key.offset = 0;
4271 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4272 if (ret < 0) {
4273 return ret;
4274 } else if (ret > 0) {
4275 *size_ret = 0;
4276 } else {
4277 struct btrfs_inode_item *item;
4279 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4280 struct btrfs_inode_item);
4281 *size_ret = btrfs_inode_size(path->nodes[0], item);
4284 btrfs_release_path(path);
4285 return 0;
4289 * At the moment we always log all xattrs. This is to figure out at log replay
4290 * time which xattrs must have their deletion replayed. If a xattr is missing
4291 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4292 * because if a xattr is deleted, the inode is fsynced and a power failure
4293 * happens, causing the log to be replayed the next time the fs is mounted,
4294 * we want the xattr to not exist anymore (same behaviour as other filesystems
4295 * with a journal, ext3/4, xfs, f2fs, etc).
4297 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4298 struct btrfs_root *root,
4299 struct inode *inode,
4300 struct btrfs_path *path,
4301 struct btrfs_path *dst_path)
4303 int ret;
4304 struct btrfs_key key;
4305 const u64 ino = btrfs_ino(inode);
4306 int ins_nr = 0;
4307 int start_slot = 0;
4309 key.objectid = ino;
4310 key.type = BTRFS_XATTR_ITEM_KEY;
4311 key.offset = 0;
4313 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4314 if (ret < 0)
4315 return ret;
4317 while (true) {
4318 int slot = path->slots[0];
4319 struct extent_buffer *leaf = path->nodes[0];
4320 int nritems = btrfs_header_nritems(leaf);
4322 if (slot >= nritems) {
4323 if (ins_nr > 0) {
4324 u64 last_extent = 0;
4326 ret = copy_items(trans, inode, dst_path, path,
4327 &last_extent, start_slot,
4328 ins_nr, 1, 0);
4329 /* can't be 1, extent items aren't processed */
4330 ASSERT(ret <= 0);
4331 if (ret < 0)
4332 return ret;
4333 ins_nr = 0;
4335 ret = btrfs_next_leaf(root, path);
4336 if (ret < 0)
4337 return ret;
4338 else if (ret > 0)
4339 break;
4340 continue;
4343 btrfs_item_key_to_cpu(leaf, &key, slot);
4344 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4345 break;
4347 if (ins_nr == 0)
4348 start_slot = slot;
4349 ins_nr++;
4350 path->slots[0]++;
4351 cond_resched();
4353 if (ins_nr > 0) {
4354 u64 last_extent = 0;
4356 ret = copy_items(trans, inode, dst_path, path,
4357 &last_extent, start_slot,
4358 ins_nr, 1, 0);
4359 /* can't be 1, extent items aren't processed */
4360 ASSERT(ret <= 0);
4361 if (ret < 0)
4362 return ret;
4365 return 0;
4369 * If the no holes feature is enabled we need to make sure any hole between the
4370 * last extent and the i_size of our inode is explicitly marked in the log. This
4371 * is to make sure that doing something like:
4373 * 1) create file with 128Kb of data
4374 * 2) truncate file to 64Kb
4375 * 3) truncate file to 256Kb
4376 * 4) fsync file
4377 * 5) <crash/power failure>
4378 * 6) mount fs and trigger log replay
4380 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4381 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4382 * file correspond to a hole. The presence of explicit holes in a log tree is
4383 * what guarantees that log replay will remove/adjust file extent items in the
4384 * fs/subvol tree.
4386 * Here we do not need to care about holes between extents, that is already done
4387 * by copy_items(). We also only need to do this in the full sync path, where we
4388 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4389 * lookup the list of modified extent maps and if any represents a hole, we
4390 * insert a corresponding extent representing a hole in the log tree.
4392 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4393 struct btrfs_root *root,
4394 struct inode *inode,
4395 struct btrfs_path *path)
4397 int ret;
4398 struct btrfs_key key;
4399 u64 hole_start;
4400 u64 hole_size;
4401 struct extent_buffer *leaf;
4402 struct btrfs_root *log = root->log_root;
4403 const u64 ino = btrfs_ino(inode);
4404 const u64 i_size = i_size_read(inode);
4406 if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
4407 return 0;
4409 key.objectid = ino;
4410 key.type = BTRFS_EXTENT_DATA_KEY;
4411 key.offset = (u64)-1;
4413 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4414 ASSERT(ret != 0);
4415 if (ret < 0)
4416 return ret;
4418 ASSERT(path->slots[0] > 0);
4419 path->slots[0]--;
4420 leaf = path->nodes[0];
4421 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4423 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4424 /* inode does not have any extents */
4425 hole_start = 0;
4426 hole_size = i_size;
4427 } else {
4428 struct btrfs_file_extent_item *extent;
4429 u64 len;
4432 * If there's an extent beyond i_size, an explicit hole was
4433 * already inserted by copy_items().
4435 if (key.offset >= i_size)
4436 return 0;
4438 extent = btrfs_item_ptr(leaf, path->slots[0],
4439 struct btrfs_file_extent_item);
4441 if (btrfs_file_extent_type(leaf, extent) ==
4442 BTRFS_FILE_EXTENT_INLINE) {
4443 len = btrfs_file_extent_inline_len(leaf,
4444 path->slots[0],
4445 extent);
4446 ASSERT(len == i_size);
4447 return 0;
4450 len = btrfs_file_extent_num_bytes(leaf, extent);
4451 /* Last extent goes beyond i_size, no need to log a hole. */
4452 if (key.offset + len > i_size)
4453 return 0;
4454 hole_start = key.offset + len;
4455 hole_size = i_size - hole_start;
4457 btrfs_release_path(path);
4459 /* Last extent ends at i_size. */
4460 if (hole_size == 0)
4461 return 0;
4463 hole_size = ALIGN(hole_size, root->sectorsize);
4464 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4465 hole_size, 0, hole_size, 0, 0, 0);
4466 return ret;
4470 * When we are logging a new inode X, check if it doesn't have a reference that
4471 * matches the reference from some other inode Y created in a past transaction
4472 * and that was renamed in the current transaction. If we don't do this, then at
4473 * log replay time we can lose inode Y (and all its files if it's a directory):
4475 * mkdir /mnt/x
4476 * echo "hello world" > /mnt/x/foobar
4477 * sync
4478 * mv /mnt/x /mnt/y
4479 * mkdir /mnt/x # or touch /mnt/x
4480 * xfs_io -c fsync /mnt/x
4481 * <power fail>
4482 * mount fs, trigger log replay
4484 * After the log replay procedure, we would lose the first directory and all its
4485 * files (file foobar).
4486 * For the case where inode Y is not a directory we simply end up losing it:
4488 * echo "123" > /mnt/foo
4489 * sync
4490 * mv /mnt/foo /mnt/bar
4491 * echo "abc" > /mnt/foo
4492 * xfs_io -c fsync /mnt/foo
4493 * <power fail>
4495 * We also need this for cases where a snapshot entry is replaced by some other
4496 * entry (file or directory) otherwise we end up with an unreplayable log due to
4497 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4498 * if it were a regular entry:
4500 * mkdir /mnt/x
4501 * btrfs subvolume snapshot /mnt /mnt/x/snap
4502 * btrfs subvolume delete /mnt/x/snap
4503 * rmdir /mnt/x
4504 * mkdir /mnt/x
4505 * fsync /mnt/x or fsync some new file inside it
4506 * <power fail>
4508 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4509 * the same transaction.
4511 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4512 const int slot,
4513 const struct btrfs_key *key,
4514 struct inode *inode,
4515 u64 *other_ino)
4517 int ret;
4518 struct btrfs_path *search_path;
4519 char *name = NULL;
4520 u32 name_len = 0;
4521 u32 item_size = btrfs_item_size_nr(eb, slot);
4522 u32 cur_offset = 0;
4523 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4525 search_path = btrfs_alloc_path();
4526 if (!search_path)
4527 return -ENOMEM;
4528 search_path->search_commit_root = 1;
4529 search_path->skip_locking = 1;
4531 while (cur_offset < item_size) {
4532 u64 parent;
4533 u32 this_name_len;
4534 u32 this_len;
4535 unsigned long name_ptr;
4536 struct btrfs_dir_item *di;
4538 if (key->type == BTRFS_INODE_REF_KEY) {
4539 struct btrfs_inode_ref *iref;
4541 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4542 parent = key->offset;
4543 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4544 name_ptr = (unsigned long)(iref + 1);
4545 this_len = sizeof(*iref) + this_name_len;
4546 } else {
4547 struct btrfs_inode_extref *extref;
4549 extref = (struct btrfs_inode_extref *)(ptr +
4550 cur_offset);
4551 parent = btrfs_inode_extref_parent(eb, extref);
4552 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4553 name_ptr = (unsigned long)&extref->name;
4554 this_len = sizeof(*extref) + this_name_len;
4557 if (this_name_len > name_len) {
4558 char *new_name;
4560 new_name = krealloc(name, this_name_len, GFP_NOFS);
4561 if (!new_name) {
4562 ret = -ENOMEM;
4563 goto out;
4565 name_len = this_name_len;
4566 name = new_name;
4569 read_extent_buffer(eb, name, name_ptr, this_name_len);
4570 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
4571 search_path, parent,
4572 name, this_name_len, 0);
4573 if (di && !IS_ERR(di)) {
4574 struct btrfs_key di_key;
4576 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4577 di, &di_key);
4578 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4579 ret = 1;
4580 *other_ino = di_key.objectid;
4581 } else {
4582 ret = -EAGAIN;
4584 goto out;
4585 } else if (IS_ERR(di)) {
4586 ret = PTR_ERR(di);
4587 goto out;
4589 btrfs_release_path(search_path);
4591 cur_offset += this_len;
4593 ret = 0;
4594 out:
4595 btrfs_free_path(search_path);
4596 kfree(name);
4597 return ret;
4600 /* log a single inode in the tree log.
4601 * At least one parent directory for this inode must exist in the tree
4602 * or be logged already.
4604 * Any items from this inode changed by the current transaction are copied
4605 * to the log tree. An extra reference is taken on any extents in this
4606 * file, allowing us to avoid a whole pile of corner cases around logging
4607 * blocks that have been removed from the tree.
4609 * See LOG_INODE_ALL and related defines for a description of what inode_only
4610 * does.
4612 * This handles both files and directories.
4614 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4615 struct btrfs_root *root, struct inode *inode,
4616 int inode_only,
4617 const loff_t start,
4618 const loff_t end,
4619 struct btrfs_log_ctx *ctx)
4621 struct btrfs_path *path;
4622 struct btrfs_path *dst_path;
4623 struct btrfs_key min_key;
4624 struct btrfs_key max_key;
4625 struct btrfs_root *log = root->log_root;
4626 struct extent_buffer *src = NULL;
4627 LIST_HEAD(logged_list);
4628 u64 last_extent = 0;
4629 int err = 0;
4630 int ret;
4631 int nritems;
4632 int ins_start_slot = 0;
4633 int ins_nr;
4634 bool fast_search = false;
4635 u64 ino = btrfs_ino(inode);
4636 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4637 u64 logged_isize = 0;
4638 bool need_log_inode_item = true;
4639 bool xattrs_logged = false;
4641 path = btrfs_alloc_path();
4642 if (!path)
4643 return -ENOMEM;
4644 dst_path = btrfs_alloc_path();
4645 if (!dst_path) {
4646 btrfs_free_path(path);
4647 return -ENOMEM;
4650 min_key.objectid = ino;
4651 min_key.type = BTRFS_INODE_ITEM_KEY;
4652 min_key.offset = 0;
4654 max_key.objectid = ino;
4657 /* today the code can only do partial logging of directories */
4658 if (S_ISDIR(inode->i_mode) ||
4659 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4660 &BTRFS_I(inode)->runtime_flags) &&
4661 inode_only >= LOG_INODE_EXISTS))
4662 max_key.type = BTRFS_XATTR_ITEM_KEY;
4663 else
4664 max_key.type = (u8)-1;
4665 max_key.offset = (u64)-1;
4668 * Only run delayed items if we are a dir or a new file.
4669 * Otherwise commit the delayed inode only, which is needed in
4670 * order for the log replay code to mark inodes for link count
4671 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4673 if (S_ISDIR(inode->i_mode) ||
4674 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
4675 ret = btrfs_commit_inode_delayed_items(trans, inode);
4676 else
4677 ret = btrfs_commit_inode_delayed_inode(inode);
4679 if (ret) {
4680 btrfs_free_path(path);
4681 btrfs_free_path(dst_path);
4682 return ret;
4685 if (inode_only == LOG_OTHER_INODE) {
4686 inode_only = LOG_INODE_EXISTS;
4687 mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
4688 SINGLE_DEPTH_NESTING);
4689 } else {
4690 mutex_lock(&BTRFS_I(inode)->log_mutex);
4694 * a brute force approach to making sure we get the most uptodate
4695 * copies of everything.
4697 if (S_ISDIR(inode->i_mode)) {
4698 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4700 if (inode_only == LOG_INODE_EXISTS)
4701 max_key_type = BTRFS_XATTR_ITEM_KEY;
4702 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4703 } else {
4704 if (inode_only == LOG_INODE_EXISTS) {
4706 * Make sure the new inode item we write to the log has
4707 * the same isize as the current one (if it exists).
4708 * This is necessary to prevent data loss after log
4709 * replay, and also to prevent doing a wrong expanding
4710 * truncate - for e.g. create file, write 4K into offset
4711 * 0, fsync, write 4K into offset 4096, add hard link,
4712 * fsync some other file (to sync log), power fail - if
4713 * we use the inode's current i_size, after log replay
4714 * we get a 8Kb file, with the last 4Kb extent as a hole
4715 * (zeroes), as if an expanding truncate happened,
4716 * instead of getting a file of 4Kb only.
4718 err = logged_inode_size(log, inode, path,
4719 &logged_isize);
4720 if (err)
4721 goto out_unlock;
4723 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4724 &BTRFS_I(inode)->runtime_flags)) {
4725 if (inode_only == LOG_INODE_EXISTS) {
4726 max_key.type = BTRFS_XATTR_ITEM_KEY;
4727 ret = drop_objectid_items(trans, log, path, ino,
4728 max_key.type);
4729 } else {
4730 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4731 &BTRFS_I(inode)->runtime_flags);
4732 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4733 &BTRFS_I(inode)->runtime_flags);
4734 while(1) {
4735 ret = btrfs_truncate_inode_items(trans,
4736 log, inode, 0, 0);
4737 if (ret != -EAGAIN)
4738 break;
4741 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4742 &BTRFS_I(inode)->runtime_flags) ||
4743 inode_only == LOG_INODE_EXISTS) {
4744 if (inode_only == LOG_INODE_ALL)
4745 fast_search = true;
4746 max_key.type = BTRFS_XATTR_ITEM_KEY;
4747 ret = drop_objectid_items(trans, log, path, ino,
4748 max_key.type);
4749 } else {
4750 if (inode_only == LOG_INODE_ALL)
4751 fast_search = true;
4752 goto log_extents;
4756 if (ret) {
4757 err = ret;
4758 goto out_unlock;
4761 while (1) {
4762 ins_nr = 0;
4763 ret = btrfs_search_forward(root, &min_key,
4764 path, trans->transid);
4765 if (ret < 0) {
4766 err = ret;
4767 goto out_unlock;
4769 if (ret != 0)
4770 break;
4771 again:
4772 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4773 if (min_key.objectid != ino)
4774 break;
4775 if (min_key.type > max_key.type)
4776 break;
4778 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4779 need_log_inode_item = false;
4781 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4782 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4783 BTRFS_I(inode)->generation == trans->transid) {
4784 u64 other_ino = 0;
4786 ret = btrfs_check_ref_name_override(path->nodes[0],
4787 path->slots[0],
4788 &min_key, inode,
4789 &other_ino);
4790 if (ret < 0) {
4791 err = ret;
4792 goto out_unlock;
4793 } else if (ret > 0 && ctx &&
4794 other_ino != btrfs_ino(ctx->inode)) {
4795 struct btrfs_key inode_key;
4796 struct inode *other_inode;
4798 if (ins_nr > 0) {
4799 ins_nr++;
4800 } else {
4801 ins_nr = 1;
4802 ins_start_slot = path->slots[0];
4804 ret = copy_items(trans, inode, dst_path, path,
4805 &last_extent, ins_start_slot,
4806 ins_nr, inode_only,
4807 logged_isize);
4808 if (ret < 0) {
4809 err = ret;
4810 goto out_unlock;
4812 ins_nr = 0;
4813 btrfs_release_path(path);
4814 inode_key.objectid = other_ino;
4815 inode_key.type = BTRFS_INODE_ITEM_KEY;
4816 inode_key.offset = 0;
4817 other_inode = btrfs_iget(root->fs_info->sb,
4818 &inode_key, root,
4819 NULL);
4821 * If the other inode that had a conflicting dir
4822 * entry was deleted in the current transaction,
4823 * we don't need to do more work nor fallback to
4824 * a transaction commit.
4826 if (IS_ERR(other_inode) &&
4827 PTR_ERR(other_inode) == -ENOENT) {
4828 goto next_key;
4829 } else if (IS_ERR(other_inode)) {
4830 err = PTR_ERR(other_inode);
4831 goto out_unlock;
4834 * We are safe logging the other inode without
4835 * acquiring its i_mutex as long as we log with
4836 * the LOG_INODE_EXISTS mode. We're safe against
4837 * concurrent renames of the other inode as well
4838 * because during a rename we pin the log and
4839 * update the log with the new name before we
4840 * unpin it.
4842 err = btrfs_log_inode(trans, root, other_inode,
4843 LOG_OTHER_INODE,
4844 0, LLONG_MAX, ctx);
4845 iput(other_inode);
4846 if (err)
4847 goto out_unlock;
4848 else
4849 goto next_key;
4853 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4854 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4855 if (ins_nr == 0)
4856 goto next_slot;
4857 ret = copy_items(trans, inode, dst_path, path,
4858 &last_extent, ins_start_slot,
4859 ins_nr, inode_only, logged_isize);
4860 if (ret < 0) {
4861 err = ret;
4862 goto out_unlock;
4864 ins_nr = 0;
4865 if (ret) {
4866 btrfs_release_path(path);
4867 continue;
4869 goto next_slot;
4872 src = path->nodes[0];
4873 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4874 ins_nr++;
4875 goto next_slot;
4876 } else if (!ins_nr) {
4877 ins_start_slot = path->slots[0];
4878 ins_nr = 1;
4879 goto next_slot;
4882 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4883 ins_start_slot, ins_nr, inode_only,
4884 logged_isize);
4885 if (ret < 0) {
4886 err = ret;
4887 goto out_unlock;
4889 if (ret) {
4890 ins_nr = 0;
4891 btrfs_release_path(path);
4892 continue;
4894 ins_nr = 1;
4895 ins_start_slot = path->slots[0];
4896 next_slot:
4898 nritems = btrfs_header_nritems(path->nodes[0]);
4899 path->slots[0]++;
4900 if (path->slots[0] < nritems) {
4901 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4902 path->slots[0]);
4903 goto again;
4905 if (ins_nr) {
4906 ret = copy_items(trans, inode, dst_path, path,
4907 &last_extent, ins_start_slot,
4908 ins_nr, inode_only, logged_isize);
4909 if (ret < 0) {
4910 err = ret;
4911 goto out_unlock;
4913 ret = 0;
4914 ins_nr = 0;
4916 btrfs_release_path(path);
4917 next_key:
4918 if (min_key.offset < (u64)-1) {
4919 min_key.offset++;
4920 } else if (min_key.type < max_key.type) {
4921 min_key.type++;
4922 min_key.offset = 0;
4923 } else {
4924 break;
4927 if (ins_nr) {
4928 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4929 ins_start_slot, ins_nr, inode_only,
4930 logged_isize);
4931 if (ret < 0) {
4932 err = ret;
4933 goto out_unlock;
4935 ret = 0;
4936 ins_nr = 0;
4939 btrfs_release_path(path);
4940 btrfs_release_path(dst_path);
4941 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4942 if (err)
4943 goto out_unlock;
4944 xattrs_logged = true;
4945 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4946 btrfs_release_path(path);
4947 btrfs_release_path(dst_path);
4948 err = btrfs_log_trailing_hole(trans, root, inode, path);
4949 if (err)
4950 goto out_unlock;
4952 log_extents:
4953 btrfs_release_path(path);
4954 btrfs_release_path(dst_path);
4955 if (need_log_inode_item) {
4956 err = log_inode_item(trans, log, dst_path, inode);
4957 if (!err && !xattrs_logged) {
4958 err = btrfs_log_all_xattrs(trans, root, inode, path,
4959 dst_path);
4960 btrfs_release_path(path);
4962 if (err)
4963 goto out_unlock;
4965 if (fast_search) {
4966 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4967 &logged_list, ctx, start, end);
4968 if (ret) {
4969 err = ret;
4970 goto out_unlock;
4972 } else if (inode_only == LOG_INODE_ALL) {
4973 struct extent_map *em, *n;
4975 write_lock(&em_tree->lock);
4977 * We can't just remove every em if we're called for a ranged
4978 * fsync - that is, one that doesn't cover the whole possible
4979 * file range (0 to LLONG_MAX). This is because we can have
4980 * em's that fall outside the range we're logging and therefore
4981 * their ordered operations haven't completed yet
4982 * (btrfs_finish_ordered_io() not invoked yet). This means we
4983 * didn't get their respective file extent item in the fs/subvol
4984 * tree yet, and need to let the next fast fsync (one which
4985 * consults the list of modified extent maps) find the em so
4986 * that it logs a matching file extent item and waits for the
4987 * respective ordered operation to complete (if it's still
4988 * running).
4990 * Removing every em outside the range we're logging would make
4991 * the next fast fsync not log their matching file extent items,
4992 * therefore making us lose data after a log replay.
4994 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4995 list) {
4996 const u64 mod_end = em->mod_start + em->mod_len - 1;
4998 if (em->mod_start >= start && mod_end <= end)
4999 list_del_init(&em->list);
5001 write_unlock(&em_tree->lock);
5004 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
5005 ret = log_directory_changes(trans, root, inode, path, dst_path,
5006 ctx);
5007 if (ret) {
5008 err = ret;
5009 goto out_unlock;
5013 spin_lock(&BTRFS_I(inode)->lock);
5014 BTRFS_I(inode)->logged_trans = trans->transid;
5015 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
5016 spin_unlock(&BTRFS_I(inode)->lock);
5017 out_unlock:
5018 if (unlikely(err))
5019 btrfs_put_logged_extents(&logged_list);
5020 else
5021 btrfs_submit_logged_extents(&logged_list, log);
5022 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5024 btrfs_free_path(path);
5025 btrfs_free_path(dst_path);
5026 return err;
5030 * Check if we must fallback to a transaction commit when logging an inode.
5031 * This must be called after logging the inode and is used only in the context
5032 * when fsyncing an inode requires the need to log some other inode - in which
5033 * case we can't lock the i_mutex of each other inode we need to log as that
5034 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5035 * log inodes up or down in the hierarchy) or rename operations for example. So
5036 * we take the log_mutex of the inode after we have logged it and then check for
5037 * its last_unlink_trans value - this is safe because any task setting
5038 * last_unlink_trans must take the log_mutex and it must do this before it does
5039 * the actual unlink operation, so if we do this check before a concurrent task
5040 * sets last_unlink_trans it means we've logged a consistent version/state of
5041 * all the inode items, otherwise we are not sure and must do a transaction
5042 * commit (the concurrent task might have only updated last_unlink_trans before
5043 * we logged the inode or it might have also done the unlink).
5045 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5046 struct inode *inode)
5048 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
5049 bool ret = false;
5051 mutex_lock(&BTRFS_I(inode)->log_mutex);
5052 if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
5054 * Make sure any commits to the log are forced to be full
5055 * commits.
5057 btrfs_set_log_full_commit(fs_info, trans);
5058 ret = true;
5060 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5062 return ret;
5066 * follow the dentry parent pointers up the chain and see if any
5067 * of the directories in it require a full commit before they can
5068 * be logged. Returns zero if nothing special needs to be done or 1 if
5069 * a full commit is required.
5071 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5072 struct inode *inode,
5073 struct dentry *parent,
5074 struct super_block *sb,
5075 u64 last_committed)
5077 int ret = 0;
5078 struct dentry *old_parent = NULL;
5079 struct inode *orig_inode = inode;
5082 * for regular files, if its inode is already on disk, we don't
5083 * have to worry about the parents at all. This is because
5084 * we can use the last_unlink_trans field to record renames
5085 * and other fun in this file.
5087 if (S_ISREG(inode->i_mode) &&
5088 BTRFS_I(inode)->generation <= last_committed &&
5089 BTRFS_I(inode)->last_unlink_trans <= last_committed)
5090 goto out;
5092 if (!S_ISDIR(inode->i_mode)) {
5093 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5094 goto out;
5095 inode = d_inode(parent);
5098 while (1) {
5100 * If we are logging a directory then we start with our inode,
5101 * not our parent's inode, so we need to skip setting the
5102 * logged_trans so that further down in the log code we don't
5103 * think this inode has already been logged.
5105 if (inode != orig_inode)
5106 BTRFS_I(inode)->logged_trans = trans->transid;
5107 smp_mb();
5109 if (btrfs_must_commit_transaction(trans, inode)) {
5110 ret = 1;
5111 break;
5114 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5115 break;
5117 if (IS_ROOT(parent)) {
5118 inode = d_inode(parent);
5119 if (btrfs_must_commit_transaction(trans, inode))
5120 ret = 1;
5121 break;
5124 parent = dget_parent(parent);
5125 dput(old_parent);
5126 old_parent = parent;
5127 inode = d_inode(parent);
5130 dput(old_parent);
5131 out:
5132 return ret;
5135 struct btrfs_dir_list {
5136 u64 ino;
5137 struct list_head list;
5141 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5142 * details about the why it is needed.
5143 * This is a recursive operation - if an existing dentry corresponds to a
5144 * directory, that directory's new entries are logged too (same behaviour as
5145 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5146 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5147 * complains about the following circular lock dependency / possible deadlock:
5149 * CPU0 CPU1
5150 * ---- ----
5151 * lock(&type->i_mutex_dir_key#3/2);
5152 * lock(sb_internal#2);
5153 * lock(&type->i_mutex_dir_key#3/2);
5154 * lock(&sb->s_type->i_mutex_key#14);
5156 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5157 * sb_start_intwrite() in btrfs_start_transaction().
5158 * Not locking i_mutex of the inodes is still safe because:
5160 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5161 * that while logging the inode new references (names) are added or removed
5162 * from the inode, leaving the logged inode item with a link count that does
5163 * not match the number of logged inode reference items. This is fine because
5164 * at log replay time we compute the real number of links and correct the
5165 * link count in the inode item (see replay_one_buffer() and
5166 * link_to_fixup_dir());
5168 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5169 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5170 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5171 * has a size that doesn't match the sum of the lengths of all the logged
5172 * names. This does not result in a problem because if a dir_item key is
5173 * logged but its matching dir_index key is not logged, at log replay time we
5174 * don't use it to replay the respective name (see replay_one_name()). On the
5175 * other hand if only the dir_index key ends up being logged, the respective
5176 * name is added to the fs/subvol tree with both the dir_item and dir_index
5177 * keys created (see replay_one_name()).
5178 * The directory's inode item with a wrong i_size is not a problem as well,
5179 * since we don't use it at log replay time to set the i_size in the inode
5180 * item of the fs/subvol tree (see overwrite_item()).
5182 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5183 struct btrfs_root *root,
5184 struct inode *start_inode,
5185 struct btrfs_log_ctx *ctx)
5187 struct btrfs_root *log = root->log_root;
5188 struct btrfs_path *path;
5189 LIST_HEAD(dir_list);
5190 struct btrfs_dir_list *dir_elem;
5191 int ret = 0;
5193 path = btrfs_alloc_path();
5194 if (!path)
5195 return -ENOMEM;
5197 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5198 if (!dir_elem) {
5199 btrfs_free_path(path);
5200 return -ENOMEM;
5202 dir_elem->ino = btrfs_ino(start_inode);
5203 list_add_tail(&dir_elem->list, &dir_list);
5205 while (!list_empty(&dir_list)) {
5206 struct extent_buffer *leaf;
5207 struct btrfs_key min_key;
5208 int nritems;
5209 int i;
5211 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5212 list);
5213 if (ret)
5214 goto next_dir_inode;
5216 min_key.objectid = dir_elem->ino;
5217 min_key.type = BTRFS_DIR_ITEM_KEY;
5218 min_key.offset = 0;
5219 again:
5220 btrfs_release_path(path);
5221 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5222 if (ret < 0) {
5223 goto next_dir_inode;
5224 } else if (ret > 0) {
5225 ret = 0;
5226 goto next_dir_inode;
5229 process_leaf:
5230 leaf = path->nodes[0];
5231 nritems = btrfs_header_nritems(leaf);
5232 for (i = path->slots[0]; i < nritems; i++) {
5233 struct btrfs_dir_item *di;
5234 struct btrfs_key di_key;
5235 struct inode *di_inode;
5236 struct btrfs_dir_list *new_dir_elem;
5237 int log_mode = LOG_INODE_EXISTS;
5238 int type;
5240 btrfs_item_key_to_cpu(leaf, &min_key, i);
5241 if (min_key.objectid != dir_elem->ino ||
5242 min_key.type != BTRFS_DIR_ITEM_KEY)
5243 goto next_dir_inode;
5245 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5246 type = btrfs_dir_type(leaf, di);
5247 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5248 type != BTRFS_FT_DIR)
5249 continue;
5250 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5251 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5252 continue;
5254 btrfs_release_path(path);
5255 di_inode = btrfs_iget(root->fs_info->sb, &di_key,
5256 root, NULL);
5257 if (IS_ERR(di_inode)) {
5258 ret = PTR_ERR(di_inode);
5259 goto next_dir_inode;
5262 if (btrfs_inode_in_log(di_inode, trans->transid)) {
5263 iput(di_inode);
5264 break;
5267 ctx->log_new_dentries = false;
5268 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5269 log_mode = LOG_INODE_ALL;
5270 ret = btrfs_log_inode(trans, root, di_inode,
5271 log_mode, 0, LLONG_MAX, ctx);
5272 if (!ret &&
5273 btrfs_must_commit_transaction(trans, di_inode))
5274 ret = 1;
5275 iput(di_inode);
5276 if (ret)
5277 goto next_dir_inode;
5278 if (ctx->log_new_dentries) {
5279 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5280 GFP_NOFS);
5281 if (!new_dir_elem) {
5282 ret = -ENOMEM;
5283 goto next_dir_inode;
5285 new_dir_elem->ino = di_key.objectid;
5286 list_add_tail(&new_dir_elem->list, &dir_list);
5288 break;
5290 if (i == nritems) {
5291 ret = btrfs_next_leaf(log, path);
5292 if (ret < 0) {
5293 goto next_dir_inode;
5294 } else if (ret > 0) {
5295 ret = 0;
5296 goto next_dir_inode;
5298 goto process_leaf;
5300 if (min_key.offset < (u64)-1) {
5301 min_key.offset++;
5302 goto again;
5304 next_dir_inode:
5305 list_del(&dir_elem->list);
5306 kfree(dir_elem);
5309 btrfs_free_path(path);
5310 return ret;
5313 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5314 struct inode *inode,
5315 struct btrfs_log_ctx *ctx)
5317 int ret;
5318 struct btrfs_path *path;
5319 struct btrfs_key key;
5320 struct btrfs_root *root = BTRFS_I(inode)->root;
5321 const u64 ino = btrfs_ino(inode);
5323 path = btrfs_alloc_path();
5324 if (!path)
5325 return -ENOMEM;
5326 path->skip_locking = 1;
5327 path->search_commit_root = 1;
5329 key.objectid = ino;
5330 key.type = BTRFS_INODE_REF_KEY;
5331 key.offset = 0;
5332 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5333 if (ret < 0)
5334 goto out;
5336 while (true) {
5337 struct extent_buffer *leaf = path->nodes[0];
5338 int slot = path->slots[0];
5339 u32 cur_offset = 0;
5340 u32 item_size;
5341 unsigned long ptr;
5343 if (slot >= btrfs_header_nritems(leaf)) {
5344 ret = btrfs_next_leaf(root, path);
5345 if (ret < 0)
5346 goto out;
5347 else if (ret > 0)
5348 break;
5349 continue;
5352 btrfs_item_key_to_cpu(leaf, &key, slot);
5353 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5354 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5355 break;
5357 item_size = btrfs_item_size_nr(leaf, slot);
5358 ptr = btrfs_item_ptr_offset(leaf, slot);
5359 while (cur_offset < item_size) {
5360 struct btrfs_key inode_key;
5361 struct inode *dir_inode;
5363 inode_key.type = BTRFS_INODE_ITEM_KEY;
5364 inode_key.offset = 0;
5366 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5367 struct btrfs_inode_extref *extref;
5369 extref = (struct btrfs_inode_extref *)
5370 (ptr + cur_offset);
5371 inode_key.objectid = btrfs_inode_extref_parent(
5372 leaf, extref);
5373 cur_offset += sizeof(*extref);
5374 cur_offset += btrfs_inode_extref_name_len(leaf,
5375 extref);
5376 } else {
5377 inode_key.objectid = key.offset;
5378 cur_offset = item_size;
5381 dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
5382 root, NULL);
5384 * If the parent inode was deleted, return an error to
5385 * fallback to a transaction commit. This is to prevent
5386 * getting an inode that was moved from one parent A to
5387 * a parent B, got its former parent A deleted and then
5388 * it got fsync'ed, from existing at both parents after
5389 * a log replay (and the old parent still existing).
5390 * Example:
5392 * mkdir /mnt/A
5393 * mkdir /mnt/B
5394 * touch /mnt/B/bar
5395 * sync
5396 * mv /mnt/B/bar /mnt/A/bar
5397 * mv -T /mnt/A /mnt/B
5398 * fsync /mnt/B/bar
5399 * <power fail>
5401 * If we ignore the old parent B which got deleted,
5402 * after a log replay we would have file bar linked
5403 * at both parents and the old parent B would still
5404 * exist.
5406 if (IS_ERR(dir_inode)) {
5407 ret = PTR_ERR(dir_inode);
5408 goto out;
5411 if (ctx)
5412 ctx->log_new_dentries = false;
5413 ret = btrfs_log_inode(trans, root, dir_inode,
5414 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5415 if (!ret &&
5416 btrfs_must_commit_transaction(trans, dir_inode))
5417 ret = 1;
5418 if (!ret && ctx && ctx->log_new_dentries)
5419 ret = log_new_dir_dentries(trans, root,
5420 dir_inode, ctx);
5421 iput(dir_inode);
5422 if (ret)
5423 goto out;
5425 path->slots[0]++;
5427 ret = 0;
5428 out:
5429 btrfs_free_path(path);
5430 return ret;
5434 * helper function around btrfs_log_inode to make sure newly created
5435 * parent directories also end up in the log. A minimal inode and backref
5436 * only logging is done of any parent directories that are older than
5437 * the last committed transaction
5439 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5440 struct btrfs_root *root, struct inode *inode,
5441 struct dentry *parent,
5442 const loff_t start,
5443 const loff_t end,
5444 int exists_only,
5445 struct btrfs_log_ctx *ctx)
5447 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
5448 struct super_block *sb;
5449 struct dentry *old_parent = NULL;
5450 int ret = 0;
5451 u64 last_committed = root->fs_info->last_trans_committed;
5452 bool log_dentries = false;
5453 struct inode *orig_inode = inode;
5455 sb = inode->i_sb;
5457 if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
5458 ret = 1;
5459 goto end_no_trans;
5463 * The prev transaction commit doesn't complete, we need do
5464 * full commit by ourselves.
5466 if (root->fs_info->last_trans_log_full_commit >
5467 root->fs_info->last_trans_committed) {
5468 ret = 1;
5469 goto end_no_trans;
5472 if (root != BTRFS_I(inode)->root ||
5473 btrfs_root_refs(&root->root_item) == 0) {
5474 ret = 1;
5475 goto end_no_trans;
5478 ret = check_parent_dirs_for_sync(trans, inode, parent,
5479 sb, last_committed);
5480 if (ret)
5481 goto end_no_trans;
5483 if (btrfs_inode_in_log(inode, trans->transid)) {
5484 ret = BTRFS_NO_LOG_SYNC;
5485 goto end_no_trans;
5488 ret = start_log_trans(trans, root, ctx);
5489 if (ret)
5490 goto end_no_trans;
5492 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5493 if (ret)
5494 goto end_trans;
5497 * for regular files, if its inode is already on disk, we don't
5498 * have to worry about the parents at all. This is because
5499 * we can use the last_unlink_trans field to record renames
5500 * and other fun in this file.
5502 if (S_ISREG(inode->i_mode) &&
5503 BTRFS_I(inode)->generation <= last_committed &&
5504 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
5505 ret = 0;
5506 goto end_trans;
5509 if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
5510 log_dentries = true;
5513 * On unlink we must make sure all our current and old parent directory
5514 * inodes are fully logged. This is to prevent leaving dangling
5515 * directory index entries in directories that were our parents but are
5516 * not anymore. Not doing this results in old parent directory being
5517 * impossible to delete after log replay (rmdir will always fail with
5518 * error -ENOTEMPTY).
5520 * Example 1:
5522 * mkdir testdir
5523 * touch testdir/foo
5524 * ln testdir/foo testdir/bar
5525 * sync
5526 * unlink testdir/bar
5527 * xfs_io -c fsync testdir/foo
5528 * <power failure>
5529 * mount fs, triggers log replay
5531 * If we don't log the parent directory (testdir), after log replay the
5532 * directory still has an entry pointing to the file inode using the bar
5533 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5534 * the file inode has a link count of 1.
5536 * Example 2:
5538 * mkdir testdir
5539 * touch foo
5540 * ln foo testdir/foo2
5541 * ln foo testdir/foo3
5542 * sync
5543 * unlink testdir/foo3
5544 * xfs_io -c fsync foo
5545 * <power failure>
5546 * mount fs, triggers log replay
5548 * Similar as the first example, after log replay the parent directory
5549 * testdir still has an entry pointing to the inode file with name foo3
5550 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5551 * and has a link count of 2.
5553 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
5554 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5555 if (ret)
5556 goto end_trans;
5559 while (1) {
5560 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5561 break;
5563 inode = d_inode(parent);
5564 if (root != BTRFS_I(inode)->root)
5565 break;
5567 if (BTRFS_I(inode)->generation > last_committed) {
5568 ret = btrfs_log_inode(trans, root, inode,
5569 LOG_INODE_EXISTS,
5570 0, LLONG_MAX, ctx);
5571 if (ret)
5572 goto end_trans;
5574 if (IS_ROOT(parent))
5575 break;
5577 parent = dget_parent(parent);
5578 dput(old_parent);
5579 old_parent = parent;
5581 if (log_dentries)
5582 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5583 else
5584 ret = 0;
5585 end_trans:
5586 dput(old_parent);
5587 if (ret < 0) {
5588 btrfs_set_log_full_commit(root->fs_info, trans);
5589 ret = 1;
5592 if (ret)
5593 btrfs_remove_log_ctx(root, ctx);
5594 btrfs_end_log_trans(root);
5595 end_no_trans:
5596 return ret;
5600 * it is not safe to log dentry if the chunk root has added new
5601 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5602 * If this returns 1, you must commit the transaction to safely get your
5603 * data on disk.
5605 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5606 struct btrfs_root *root, struct dentry *dentry,
5607 const loff_t start,
5608 const loff_t end,
5609 struct btrfs_log_ctx *ctx)
5611 struct dentry *parent = dget_parent(dentry);
5612 int ret;
5614 ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent,
5615 start, end, 0, ctx);
5616 dput(parent);
5618 return ret;
5622 * should be called during mount to recover any replay any log trees
5623 * from the FS
5625 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5627 int ret;
5628 struct btrfs_path *path;
5629 struct btrfs_trans_handle *trans;
5630 struct btrfs_key key;
5631 struct btrfs_key found_key;
5632 struct btrfs_key tmp_key;
5633 struct btrfs_root *log;
5634 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5635 struct walk_control wc = {
5636 .process_func = process_one_buffer,
5637 .stage = 0,
5640 path = btrfs_alloc_path();
5641 if (!path)
5642 return -ENOMEM;
5644 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5646 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5647 if (IS_ERR(trans)) {
5648 ret = PTR_ERR(trans);
5649 goto error;
5652 wc.trans = trans;
5653 wc.pin = 1;
5655 ret = walk_log_tree(trans, log_root_tree, &wc);
5656 if (ret) {
5657 btrfs_handle_fs_error(fs_info, ret,
5658 "Failed to pin buffers while recovering log root tree.");
5659 goto error;
5662 again:
5663 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5664 key.offset = (u64)-1;
5665 key.type = BTRFS_ROOT_ITEM_KEY;
5667 while (1) {
5668 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5670 if (ret < 0) {
5671 btrfs_handle_fs_error(fs_info, ret,
5672 "Couldn't find tree log root.");
5673 goto error;
5675 if (ret > 0) {
5676 if (path->slots[0] == 0)
5677 break;
5678 path->slots[0]--;
5680 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5681 path->slots[0]);
5682 btrfs_release_path(path);
5683 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5684 break;
5686 log = btrfs_read_fs_root(log_root_tree, &found_key);
5687 if (IS_ERR(log)) {
5688 ret = PTR_ERR(log);
5689 btrfs_handle_fs_error(fs_info, ret,
5690 "Couldn't read tree log root.");
5691 goto error;
5694 tmp_key.objectid = found_key.offset;
5695 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5696 tmp_key.offset = (u64)-1;
5698 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5699 if (IS_ERR(wc.replay_dest)) {
5700 ret = PTR_ERR(wc.replay_dest);
5701 free_extent_buffer(log->node);
5702 free_extent_buffer(log->commit_root);
5703 kfree(log);
5704 btrfs_handle_fs_error(fs_info, ret,
5705 "Couldn't read target root for tree log recovery.");
5706 goto error;
5709 wc.replay_dest->log_root = log;
5710 btrfs_record_root_in_trans(trans, wc.replay_dest);
5711 ret = walk_log_tree(trans, log, &wc);
5713 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5714 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5715 path);
5718 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5719 struct btrfs_root *root = wc.replay_dest;
5721 btrfs_release_path(path);
5724 * We have just replayed everything, and the highest
5725 * objectid of fs roots probably has changed in case
5726 * some inode_item's got replayed.
5728 * root->objectid_mutex is not acquired as log replay
5729 * could only happen during mount.
5731 ret = btrfs_find_highest_objectid(root,
5732 &root->highest_objectid);
5735 key.offset = found_key.offset - 1;
5736 wc.replay_dest->log_root = NULL;
5737 free_extent_buffer(log->node);
5738 free_extent_buffer(log->commit_root);
5739 kfree(log);
5741 if (ret)
5742 goto error;
5744 if (found_key.offset == 0)
5745 break;
5747 btrfs_release_path(path);
5749 /* step one is to pin it all, step two is to replay just inodes */
5750 if (wc.pin) {
5751 wc.pin = 0;
5752 wc.process_func = replay_one_buffer;
5753 wc.stage = LOG_WALK_REPLAY_INODES;
5754 goto again;
5756 /* step three is to replay everything */
5757 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5758 wc.stage++;
5759 goto again;
5762 btrfs_free_path(path);
5764 /* step 4: commit the transaction, which also unpins the blocks */
5765 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
5766 if (ret)
5767 return ret;
5769 free_extent_buffer(log_root_tree->node);
5770 log_root_tree->log_root = NULL;
5771 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5772 kfree(log_root_tree);
5774 return 0;
5775 error:
5776 if (wc.trans)
5777 btrfs_end_transaction(wc.trans, fs_info->tree_root);
5778 btrfs_free_path(path);
5779 return ret;
5783 * there are some corner cases where we want to force a full
5784 * commit instead of allowing a directory to be logged.
5786 * They revolve around files there were unlinked from the directory, and
5787 * this function updates the parent directory so that a full commit is
5788 * properly done if it is fsync'd later after the unlinks are done.
5790 * Must be called before the unlink operations (updates to the subvolume tree,
5791 * inodes, etc) are done.
5793 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5794 struct inode *dir, struct inode *inode,
5795 int for_rename)
5798 * when we're logging a file, if it hasn't been renamed
5799 * or unlinked, and its inode is fully committed on disk,
5800 * we don't have to worry about walking up the directory chain
5801 * to log its parents.
5803 * So, we use the last_unlink_trans field to put this transid
5804 * into the file. When the file is logged we check it and
5805 * don't log the parents if the file is fully on disk.
5807 mutex_lock(&BTRFS_I(inode)->log_mutex);
5808 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5809 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5812 * if this directory was already logged any new
5813 * names for this file/dir will get recorded
5815 smp_mb();
5816 if (BTRFS_I(dir)->logged_trans == trans->transid)
5817 return;
5820 * if the inode we're about to unlink was logged,
5821 * the log will be properly updated for any new names
5823 if (BTRFS_I(inode)->logged_trans == trans->transid)
5824 return;
5827 * when renaming files across directories, if the directory
5828 * there we're unlinking from gets fsync'd later on, there's
5829 * no way to find the destination directory later and fsync it
5830 * properly. So, we have to be conservative and force commits
5831 * so the new name gets discovered.
5833 if (for_rename)
5834 goto record;
5836 /* we can safely do the unlink without any special recording */
5837 return;
5839 record:
5840 mutex_lock(&BTRFS_I(dir)->log_mutex);
5841 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5842 mutex_unlock(&BTRFS_I(dir)->log_mutex);
5846 * Make sure that if someone attempts to fsync the parent directory of a deleted
5847 * snapshot, it ends up triggering a transaction commit. This is to guarantee
5848 * that after replaying the log tree of the parent directory's root we will not
5849 * see the snapshot anymore and at log replay time we will not see any log tree
5850 * corresponding to the deleted snapshot's root, which could lead to replaying
5851 * it after replaying the log tree of the parent directory (which would replay
5852 * the snapshot delete operation).
5854 * Must be called before the actual snapshot destroy operation (updates to the
5855 * parent root and tree of tree roots trees, etc) are done.
5857 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
5858 struct inode *dir)
5860 mutex_lock(&BTRFS_I(dir)->log_mutex);
5861 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5862 mutex_unlock(&BTRFS_I(dir)->log_mutex);
5866 * Call this after adding a new name for a file and it will properly
5867 * update the log to reflect the new name.
5869 * It will return zero if all goes well, and it will return 1 if a
5870 * full transaction commit is required.
5872 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5873 struct inode *inode, struct inode *old_dir,
5874 struct dentry *parent)
5876 struct btrfs_root * root = BTRFS_I(inode)->root;
5879 * this will force the logging code to walk the dentry chain
5880 * up for the file
5882 if (S_ISREG(inode->i_mode))
5883 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5886 * if this inode hasn't been logged and directory we're renaming it
5887 * from hasn't been logged, we don't need to log it
5889 if (BTRFS_I(inode)->logged_trans <=
5890 root->fs_info->last_trans_committed &&
5891 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
5892 root->fs_info->last_trans_committed))
5893 return 0;
5895 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
5896 LLONG_MAX, 1, NULL);