Linux 4.18.10
[linux/fpc-iii.git] / fs / btrfs / ctree.c
blob4bc326df472ef4c81af658cb08ea77bae20c2916
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 */
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
9 #include <linux/mm.h>
10 #include "ctree.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "print-tree.h"
14 #include "locking.h"
16 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
17 *root, struct btrfs_path *path, int level);
18 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
19 const struct btrfs_key *ins_key, struct btrfs_path *path,
20 int data_size, int extend);
21 static int push_node_left(struct btrfs_trans_handle *trans,
22 struct btrfs_fs_info *fs_info,
23 struct extent_buffer *dst,
24 struct extent_buffer *src, int empty);
25 static int balance_node_right(struct btrfs_trans_handle *trans,
26 struct btrfs_fs_info *fs_info,
27 struct extent_buffer *dst_buf,
28 struct extent_buffer *src_buf);
29 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
30 int level, int slot);
32 struct btrfs_path *btrfs_alloc_path(void)
34 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
38 * set all locked nodes in the path to blocking locks. This should
39 * be done before scheduling
41 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
43 int i;
44 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
45 if (!p->nodes[i] || !p->locks[i])
46 continue;
47 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
48 if (p->locks[i] == BTRFS_READ_LOCK)
49 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
50 else if (p->locks[i] == BTRFS_WRITE_LOCK)
51 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
56 * reset all the locked nodes in the patch to spinning locks.
58 * held is used to keep lockdep happy, when lockdep is enabled
59 * we set held to a blocking lock before we go around and
60 * retake all the spinlocks in the path. You can safely use NULL
61 * for held
63 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
64 struct extent_buffer *held, int held_rw)
66 int i;
68 if (held) {
69 btrfs_set_lock_blocking_rw(held, held_rw);
70 if (held_rw == BTRFS_WRITE_LOCK)
71 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
72 else if (held_rw == BTRFS_READ_LOCK)
73 held_rw = BTRFS_READ_LOCK_BLOCKING;
75 btrfs_set_path_blocking(p);
77 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
78 if (p->nodes[i] && p->locks[i]) {
79 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
80 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
81 p->locks[i] = BTRFS_WRITE_LOCK;
82 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
83 p->locks[i] = BTRFS_READ_LOCK;
87 if (held)
88 btrfs_clear_lock_blocking_rw(held, held_rw);
91 /* this also releases the path */
92 void btrfs_free_path(struct btrfs_path *p)
94 if (!p)
95 return;
96 btrfs_release_path(p);
97 kmem_cache_free(btrfs_path_cachep, p);
101 * path release drops references on the extent buffers in the path
102 * and it drops any locks held by this path
104 * It is safe to call this on paths that no locks or extent buffers held.
106 noinline void btrfs_release_path(struct btrfs_path *p)
108 int i;
110 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
111 p->slots[i] = 0;
112 if (!p->nodes[i])
113 continue;
114 if (p->locks[i]) {
115 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
116 p->locks[i] = 0;
118 free_extent_buffer(p->nodes[i]);
119 p->nodes[i] = NULL;
124 * safely gets a reference on the root node of a tree. A lock
125 * is not taken, so a concurrent writer may put a different node
126 * at the root of the tree. See btrfs_lock_root_node for the
127 * looping required.
129 * The extent buffer returned by this has a reference taken, so
130 * it won't disappear. It may stop being the root of the tree
131 * at any time because there are no locks held.
133 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
135 struct extent_buffer *eb;
137 while (1) {
138 rcu_read_lock();
139 eb = rcu_dereference(root->node);
142 * RCU really hurts here, we could free up the root node because
143 * it was COWed but we may not get the new root node yet so do
144 * the inc_not_zero dance and if it doesn't work then
145 * synchronize_rcu and try again.
147 if (atomic_inc_not_zero(&eb->refs)) {
148 rcu_read_unlock();
149 break;
151 rcu_read_unlock();
152 synchronize_rcu();
154 return eb;
157 /* loop around taking references on and locking the root node of the
158 * tree until you end up with a lock on the root. A locked buffer
159 * is returned, with a reference held.
161 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
163 struct extent_buffer *eb;
165 while (1) {
166 eb = btrfs_root_node(root);
167 btrfs_tree_lock(eb);
168 if (eb == root->node)
169 break;
170 btrfs_tree_unlock(eb);
171 free_extent_buffer(eb);
173 return eb;
176 /* loop around taking references on and locking the root node of the
177 * tree until you end up with a lock on the root. A locked buffer
178 * is returned, with a reference held.
180 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
182 struct extent_buffer *eb;
184 while (1) {
185 eb = btrfs_root_node(root);
186 btrfs_tree_read_lock(eb);
187 if (eb == root->node)
188 break;
189 btrfs_tree_read_unlock(eb);
190 free_extent_buffer(eb);
192 return eb;
195 /* cowonly root (everything not a reference counted cow subvolume), just get
196 * put onto a simple dirty list. transaction.c walks this to make sure they
197 * get properly updated on disk.
199 static void add_root_to_dirty_list(struct btrfs_root *root)
201 struct btrfs_fs_info *fs_info = root->fs_info;
203 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
204 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
205 return;
207 spin_lock(&fs_info->trans_lock);
208 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
209 /* Want the extent tree to be the last on the list */
210 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
211 list_move_tail(&root->dirty_list,
212 &fs_info->dirty_cowonly_roots);
213 else
214 list_move(&root->dirty_list,
215 &fs_info->dirty_cowonly_roots);
217 spin_unlock(&fs_info->trans_lock);
221 * used by snapshot creation to make a copy of a root for a tree with
222 * a given objectid. The buffer with the new root node is returned in
223 * cow_ret, and this func returns zero on success or a negative error code.
225 int btrfs_copy_root(struct btrfs_trans_handle *trans,
226 struct btrfs_root *root,
227 struct extent_buffer *buf,
228 struct extent_buffer **cow_ret, u64 new_root_objectid)
230 struct btrfs_fs_info *fs_info = root->fs_info;
231 struct extent_buffer *cow;
232 int ret = 0;
233 int level;
234 struct btrfs_disk_key disk_key;
236 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
237 trans->transid != fs_info->running_transaction->transid);
238 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
239 trans->transid != root->last_trans);
241 level = btrfs_header_level(buf);
242 if (level == 0)
243 btrfs_item_key(buf, &disk_key, 0);
244 else
245 btrfs_node_key(buf, &disk_key, 0);
247 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
248 &disk_key, level, buf->start, 0);
249 if (IS_ERR(cow))
250 return PTR_ERR(cow);
252 copy_extent_buffer_full(cow, buf);
253 btrfs_set_header_bytenr(cow, cow->start);
254 btrfs_set_header_generation(cow, trans->transid);
255 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
256 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
257 BTRFS_HEADER_FLAG_RELOC);
258 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
259 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
260 else
261 btrfs_set_header_owner(cow, new_root_objectid);
263 write_extent_buffer_fsid(cow, fs_info->fsid);
265 WARN_ON(btrfs_header_generation(buf) > trans->transid);
266 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
267 ret = btrfs_inc_ref(trans, root, cow, 1);
268 else
269 ret = btrfs_inc_ref(trans, root, cow, 0);
271 if (ret)
272 return ret;
274 btrfs_mark_buffer_dirty(cow);
275 *cow_ret = cow;
276 return 0;
279 enum mod_log_op {
280 MOD_LOG_KEY_REPLACE,
281 MOD_LOG_KEY_ADD,
282 MOD_LOG_KEY_REMOVE,
283 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
284 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
285 MOD_LOG_MOVE_KEYS,
286 MOD_LOG_ROOT_REPLACE,
289 struct tree_mod_root {
290 u64 logical;
291 u8 level;
294 struct tree_mod_elem {
295 struct rb_node node;
296 u64 logical;
297 u64 seq;
298 enum mod_log_op op;
300 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
301 int slot;
303 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
304 u64 generation;
306 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
307 struct btrfs_disk_key key;
308 u64 blockptr;
310 /* this is used for op == MOD_LOG_MOVE_KEYS */
311 struct {
312 int dst_slot;
313 int nr_items;
314 } move;
316 /* this is used for op == MOD_LOG_ROOT_REPLACE */
317 struct tree_mod_root old_root;
321 * Pull a new tree mod seq number for our operation.
323 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
325 return atomic64_inc_return(&fs_info->tree_mod_seq);
329 * This adds a new blocker to the tree mod log's blocker list if the @elem
330 * passed does not already have a sequence number set. So when a caller expects
331 * to record tree modifications, it should ensure to set elem->seq to zero
332 * before calling btrfs_get_tree_mod_seq.
333 * Returns a fresh, unused tree log modification sequence number, even if no new
334 * blocker was added.
336 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
337 struct seq_list *elem)
339 write_lock(&fs_info->tree_mod_log_lock);
340 spin_lock(&fs_info->tree_mod_seq_lock);
341 if (!elem->seq) {
342 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
343 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
345 spin_unlock(&fs_info->tree_mod_seq_lock);
346 write_unlock(&fs_info->tree_mod_log_lock);
348 return elem->seq;
351 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
354 struct rb_root *tm_root;
355 struct rb_node *node;
356 struct rb_node *next;
357 struct seq_list *cur_elem;
358 struct tree_mod_elem *tm;
359 u64 min_seq = (u64)-1;
360 u64 seq_putting = elem->seq;
362 if (!seq_putting)
363 return;
365 spin_lock(&fs_info->tree_mod_seq_lock);
366 list_del(&elem->list);
367 elem->seq = 0;
369 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
370 if (cur_elem->seq < min_seq) {
371 if (seq_putting > cur_elem->seq) {
373 * blocker with lower sequence number exists, we
374 * cannot remove anything from the log
376 spin_unlock(&fs_info->tree_mod_seq_lock);
377 return;
379 min_seq = cur_elem->seq;
382 spin_unlock(&fs_info->tree_mod_seq_lock);
385 * anything that's lower than the lowest existing (read: blocked)
386 * sequence number can be removed from the tree.
388 write_lock(&fs_info->tree_mod_log_lock);
389 tm_root = &fs_info->tree_mod_log;
390 for (node = rb_first(tm_root); node; node = next) {
391 next = rb_next(node);
392 tm = rb_entry(node, struct tree_mod_elem, node);
393 if (tm->seq > min_seq)
394 continue;
395 rb_erase(node, tm_root);
396 kfree(tm);
398 write_unlock(&fs_info->tree_mod_log_lock);
402 * key order of the log:
403 * node/leaf start address -> sequence
405 * The 'start address' is the logical address of the *new* root node
406 * for root replace operations, or the logical address of the affected
407 * block for all other operations.
409 * Note: must be called with write lock for fs_info::tree_mod_log_lock.
411 static noinline int
412 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
414 struct rb_root *tm_root;
415 struct rb_node **new;
416 struct rb_node *parent = NULL;
417 struct tree_mod_elem *cur;
419 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
421 tm_root = &fs_info->tree_mod_log;
422 new = &tm_root->rb_node;
423 while (*new) {
424 cur = rb_entry(*new, struct tree_mod_elem, node);
425 parent = *new;
426 if (cur->logical < tm->logical)
427 new = &((*new)->rb_left);
428 else if (cur->logical > tm->logical)
429 new = &((*new)->rb_right);
430 else if (cur->seq < tm->seq)
431 new = &((*new)->rb_left);
432 else if (cur->seq > tm->seq)
433 new = &((*new)->rb_right);
434 else
435 return -EEXIST;
438 rb_link_node(&tm->node, parent, new);
439 rb_insert_color(&tm->node, tm_root);
440 return 0;
444 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
445 * returns zero with the tree_mod_log_lock acquired. The caller must hold
446 * this until all tree mod log insertions are recorded in the rb tree and then
447 * write unlock fs_info::tree_mod_log_lock.
449 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
450 struct extent_buffer *eb) {
451 smp_mb();
452 if (list_empty(&(fs_info)->tree_mod_seq_list))
453 return 1;
454 if (eb && btrfs_header_level(eb) == 0)
455 return 1;
457 write_lock(&fs_info->tree_mod_log_lock);
458 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
459 write_unlock(&fs_info->tree_mod_log_lock);
460 return 1;
463 return 0;
466 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
467 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
468 struct extent_buffer *eb)
470 smp_mb();
471 if (list_empty(&(fs_info)->tree_mod_seq_list))
472 return 0;
473 if (eb && btrfs_header_level(eb) == 0)
474 return 0;
476 return 1;
479 static struct tree_mod_elem *
480 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
481 enum mod_log_op op, gfp_t flags)
483 struct tree_mod_elem *tm;
485 tm = kzalloc(sizeof(*tm), flags);
486 if (!tm)
487 return NULL;
489 tm->logical = eb->start;
490 if (op != MOD_LOG_KEY_ADD) {
491 btrfs_node_key(eb, &tm->key, slot);
492 tm->blockptr = btrfs_node_blockptr(eb, slot);
494 tm->op = op;
495 tm->slot = slot;
496 tm->generation = btrfs_node_ptr_generation(eb, slot);
497 RB_CLEAR_NODE(&tm->node);
499 return tm;
502 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
503 enum mod_log_op op, gfp_t flags)
505 struct tree_mod_elem *tm;
506 int ret;
508 if (!tree_mod_need_log(eb->fs_info, eb))
509 return 0;
511 tm = alloc_tree_mod_elem(eb, slot, op, flags);
512 if (!tm)
513 return -ENOMEM;
515 if (tree_mod_dont_log(eb->fs_info, eb)) {
516 kfree(tm);
517 return 0;
520 ret = __tree_mod_log_insert(eb->fs_info, tm);
521 write_unlock(&eb->fs_info->tree_mod_log_lock);
522 if (ret)
523 kfree(tm);
525 return ret;
528 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
529 int dst_slot, int src_slot, int nr_items)
531 struct tree_mod_elem *tm = NULL;
532 struct tree_mod_elem **tm_list = NULL;
533 int ret = 0;
534 int i;
535 int locked = 0;
537 if (!tree_mod_need_log(eb->fs_info, eb))
538 return 0;
540 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
541 if (!tm_list)
542 return -ENOMEM;
544 tm = kzalloc(sizeof(*tm), GFP_NOFS);
545 if (!tm) {
546 ret = -ENOMEM;
547 goto free_tms;
550 tm->logical = eb->start;
551 tm->slot = src_slot;
552 tm->move.dst_slot = dst_slot;
553 tm->move.nr_items = nr_items;
554 tm->op = MOD_LOG_MOVE_KEYS;
556 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
557 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
558 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
559 if (!tm_list[i]) {
560 ret = -ENOMEM;
561 goto free_tms;
565 if (tree_mod_dont_log(eb->fs_info, eb))
566 goto free_tms;
567 locked = 1;
570 * When we override something during the move, we log these removals.
571 * This can only happen when we move towards the beginning of the
572 * buffer, i.e. dst_slot < src_slot.
574 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
575 ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
576 if (ret)
577 goto free_tms;
580 ret = __tree_mod_log_insert(eb->fs_info, tm);
581 if (ret)
582 goto free_tms;
583 write_unlock(&eb->fs_info->tree_mod_log_lock);
584 kfree(tm_list);
586 return 0;
587 free_tms:
588 for (i = 0; i < nr_items; i++) {
589 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
590 rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
591 kfree(tm_list[i]);
593 if (locked)
594 write_unlock(&eb->fs_info->tree_mod_log_lock);
595 kfree(tm_list);
596 kfree(tm);
598 return ret;
601 static inline int
602 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
603 struct tree_mod_elem **tm_list,
604 int nritems)
606 int i, j;
607 int ret;
609 for (i = nritems - 1; i >= 0; i--) {
610 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
611 if (ret) {
612 for (j = nritems - 1; j > i; j--)
613 rb_erase(&tm_list[j]->node,
614 &fs_info->tree_mod_log);
615 return ret;
619 return 0;
622 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
623 struct extent_buffer *new_root, int log_removal)
625 struct btrfs_fs_info *fs_info = old_root->fs_info;
626 struct tree_mod_elem *tm = NULL;
627 struct tree_mod_elem **tm_list = NULL;
628 int nritems = 0;
629 int ret = 0;
630 int i;
632 if (!tree_mod_need_log(fs_info, NULL))
633 return 0;
635 if (log_removal && btrfs_header_level(old_root) > 0) {
636 nritems = btrfs_header_nritems(old_root);
637 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
638 GFP_NOFS);
639 if (!tm_list) {
640 ret = -ENOMEM;
641 goto free_tms;
643 for (i = 0; i < nritems; i++) {
644 tm_list[i] = alloc_tree_mod_elem(old_root, i,
645 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
646 if (!tm_list[i]) {
647 ret = -ENOMEM;
648 goto free_tms;
653 tm = kzalloc(sizeof(*tm), GFP_NOFS);
654 if (!tm) {
655 ret = -ENOMEM;
656 goto free_tms;
659 tm->logical = new_root->start;
660 tm->old_root.logical = old_root->start;
661 tm->old_root.level = btrfs_header_level(old_root);
662 tm->generation = btrfs_header_generation(old_root);
663 tm->op = MOD_LOG_ROOT_REPLACE;
665 if (tree_mod_dont_log(fs_info, NULL))
666 goto free_tms;
668 if (tm_list)
669 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
670 if (!ret)
671 ret = __tree_mod_log_insert(fs_info, tm);
673 write_unlock(&fs_info->tree_mod_log_lock);
674 if (ret)
675 goto free_tms;
676 kfree(tm_list);
678 return ret;
680 free_tms:
681 if (tm_list) {
682 for (i = 0; i < nritems; i++)
683 kfree(tm_list[i]);
684 kfree(tm_list);
686 kfree(tm);
688 return ret;
691 static struct tree_mod_elem *
692 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
693 int smallest)
695 struct rb_root *tm_root;
696 struct rb_node *node;
697 struct tree_mod_elem *cur = NULL;
698 struct tree_mod_elem *found = NULL;
700 read_lock(&fs_info->tree_mod_log_lock);
701 tm_root = &fs_info->tree_mod_log;
702 node = tm_root->rb_node;
703 while (node) {
704 cur = rb_entry(node, struct tree_mod_elem, node);
705 if (cur->logical < start) {
706 node = node->rb_left;
707 } else if (cur->logical > start) {
708 node = node->rb_right;
709 } else if (cur->seq < min_seq) {
710 node = node->rb_left;
711 } else if (!smallest) {
712 /* we want the node with the highest seq */
713 if (found)
714 BUG_ON(found->seq > cur->seq);
715 found = cur;
716 node = node->rb_left;
717 } else if (cur->seq > min_seq) {
718 /* we want the node with the smallest seq */
719 if (found)
720 BUG_ON(found->seq < cur->seq);
721 found = cur;
722 node = node->rb_right;
723 } else {
724 found = cur;
725 break;
728 read_unlock(&fs_info->tree_mod_log_lock);
730 return found;
734 * this returns the element from the log with the smallest time sequence
735 * value that's in the log (the oldest log item). any element with a time
736 * sequence lower than min_seq will be ignored.
738 static struct tree_mod_elem *
739 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
740 u64 min_seq)
742 return __tree_mod_log_search(fs_info, start, min_seq, 1);
746 * this returns the element from the log with the largest time sequence
747 * value that's in the log (the most recent log item). any element with
748 * a time sequence lower than min_seq will be ignored.
750 static struct tree_mod_elem *
751 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
753 return __tree_mod_log_search(fs_info, start, min_seq, 0);
756 static noinline int
757 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
758 struct extent_buffer *src, unsigned long dst_offset,
759 unsigned long src_offset, int nr_items)
761 int ret = 0;
762 struct tree_mod_elem **tm_list = NULL;
763 struct tree_mod_elem **tm_list_add, **tm_list_rem;
764 int i;
765 int locked = 0;
767 if (!tree_mod_need_log(fs_info, NULL))
768 return 0;
770 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
771 return 0;
773 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
774 GFP_NOFS);
775 if (!tm_list)
776 return -ENOMEM;
778 tm_list_add = tm_list;
779 tm_list_rem = tm_list + nr_items;
780 for (i = 0; i < nr_items; i++) {
781 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
782 MOD_LOG_KEY_REMOVE, GFP_NOFS);
783 if (!tm_list_rem[i]) {
784 ret = -ENOMEM;
785 goto free_tms;
788 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
789 MOD_LOG_KEY_ADD, GFP_NOFS);
790 if (!tm_list_add[i]) {
791 ret = -ENOMEM;
792 goto free_tms;
796 if (tree_mod_dont_log(fs_info, NULL))
797 goto free_tms;
798 locked = 1;
800 for (i = 0; i < nr_items; i++) {
801 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
802 if (ret)
803 goto free_tms;
804 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
805 if (ret)
806 goto free_tms;
809 write_unlock(&fs_info->tree_mod_log_lock);
810 kfree(tm_list);
812 return 0;
814 free_tms:
815 for (i = 0; i < nr_items * 2; i++) {
816 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
817 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
818 kfree(tm_list[i]);
820 if (locked)
821 write_unlock(&fs_info->tree_mod_log_lock);
822 kfree(tm_list);
824 return ret;
827 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
829 struct tree_mod_elem **tm_list = NULL;
830 int nritems = 0;
831 int i;
832 int ret = 0;
834 if (btrfs_header_level(eb) == 0)
835 return 0;
837 if (!tree_mod_need_log(eb->fs_info, NULL))
838 return 0;
840 nritems = btrfs_header_nritems(eb);
841 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
842 if (!tm_list)
843 return -ENOMEM;
845 for (i = 0; i < nritems; i++) {
846 tm_list[i] = alloc_tree_mod_elem(eb, i,
847 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
848 if (!tm_list[i]) {
849 ret = -ENOMEM;
850 goto free_tms;
854 if (tree_mod_dont_log(eb->fs_info, eb))
855 goto free_tms;
857 ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
858 write_unlock(&eb->fs_info->tree_mod_log_lock);
859 if (ret)
860 goto free_tms;
861 kfree(tm_list);
863 return 0;
865 free_tms:
866 for (i = 0; i < nritems; i++)
867 kfree(tm_list[i]);
868 kfree(tm_list);
870 return ret;
874 * check if the tree block can be shared by multiple trees
876 int btrfs_block_can_be_shared(struct btrfs_root *root,
877 struct extent_buffer *buf)
880 * Tree blocks not in reference counted trees and tree roots
881 * are never shared. If a block was allocated after the last
882 * snapshot and the block was not allocated by tree relocation,
883 * we know the block is not shared.
885 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
886 buf != root->node && buf != root->commit_root &&
887 (btrfs_header_generation(buf) <=
888 btrfs_root_last_snapshot(&root->root_item) ||
889 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
890 return 1;
891 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
892 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
893 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
894 return 1;
895 #endif
896 return 0;
899 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
900 struct btrfs_root *root,
901 struct extent_buffer *buf,
902 struct extent_buffer *cow,
903 int *last_ref)
905 struct btrfs_fs_info *fs_info = root->fs_info;
906 u64 refs;
907 u64 owner;
908 u64 flags;
909 u64 new_flags = 0;
910 int ret;
913 * Backrefs update rules:
915 * Always use full backrefs for extent pointers in tree block
916 * allocated by tree relocation.
918 * If a shared tree block is no longer referenced by its owner
919 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
920 * use full backrefs for extent pointers in tree block.
922 * If a tree block is been relocating
923 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
924 * use full backrefs for extent pointers in tree block.
925 * The reason for this is some operations (such as drop tree)
926 * are only allowed for blocks use full backrefs.
929 if (btrfs_block_can_be_shared(root, buf)) {
930 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
931 btrfs_header_level(buf), 1,
932 &refs, &flags);
933 if (ret)
934 return ret;
935 if (refs == 0) {
936 ret = -EROFS;
937 btrfs_handle_fs_error(fs_info, ret, NULL);
938 return ret;
940 } else {
941 refs = 1;
942 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
943 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
944 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
945 else
946 flags = 0;
949 owner = btrfs_header_owner(buf);
950 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
951 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
953 if (refs > 1) {
954 if ((owner == root->root_key.objectid ||
955 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
956 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
957 ret = btrfs_inc_ref(trans, root, buf, 1);
958 if (ret)
959 return ret;
961 if (root->root_key.objectid ==
962 BTRFS_TREE_RELOC_OBJECTID) {
963 ret = btrfs_dec_ref(trans, root, buf, 0);
964 if (ret)
965 return ret;
966 ret = btrfs_inc_ref(trans, root, cow, 1);
967 if (ret)
968 return ret;
970 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
971 } else {
973 if (root->root_key.objectid ==
974 BTRFS_TREE_RELOC_OBJECTID)
975 ret = btrfs_inc_ref(trans, root, cow, 1);
976 else
977 ret = btrfs_inc_ref(trans, root, cow, 0);
978 if (ret)
979 return ret;
981 if (new_flags != 0) {
982 int level = btrfs_header_level(buf);
984 ret = btrfs_set_disk_extent_flags(trans, fs_info,
985 buf->start,
986 buf->len,
987 new_flags, level, 0);
988 if (ret)
989 return ret;
991 } else {
992 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
993 if (root->root_key.objectid ==
994 BTRFS_TREE_RELOC_OBJECTID)
995 ret = btrfs_inc_ref(trans, root, cow, 1);
996 else
997 ret = btrfs_inc_ref(trans, root, cow, 0);
998 if (ret)
999 return ret;
1000 ret = btrfs_dec_ref(trans, root, buf, 1);
1001 if (ret)
1002 return ret;
1004 clean_tree_block(fs_info, buf);
1005 *last_ref = 1;
1007 return 0;
1011 * does the dirty work in cow of a single block. The parent block (if
1012 * supplied) is updated to point to the new cow copy. The new buffer is marked
1013 * dirty and returned locked. If you modify the block it needs to be marked
1014 * dirty again.
1016 * search_start -- an allocation hint for the new block
1018 * empty_size -- a hint that you plan on doing more cow. This is the size in
1019 * bytes the allocator should try to find free next to the block it returns.
1020 * This is just a hint and may be ignored by the allocator.
1022 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1023 struct btrfs_root *root,
1024 struct extent_buffer *buf,
1025 struct extent_buffer *parent, int parent_slot,
1026 struct extent_buffer **cow_ret,
1027 u64 search_start, u64 empty_size)
1029 struct btrfs_fs_info *fs_info = root->fs_info;
1030 struct btrfs_disk_key disk_key;
1031 struct extent_buffer *cow;
1032 int level, ret;
1033 int last_ref = 0;
1034 int unlock_orig = 0;
1035 u64 parent_start = 0;
1037 if (*cow_ret == buf)
1038 unlock_orig = 1;
1040 btrfs_assert_tree_locked(buf);
1042 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1043 trans->transid != fs_info->running_transaction->transid);
1044 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1045 trans->transid != root->last_trans);
1047 level = btrfs_header_level(buf);
1049 if (level == 0)
1050 btrfs_item_key(buf, &disk_key, 0);
1051 else
1052 btrfs_node_key(buf, &disk_key, 0);
1054 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1055 parent_start = parent->start;
1057 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1058 root->root_key.objectid, &disk_key, level,
1059 search_start, empty_size);
1060 if (IS_ERR(cow))
1061 return PTR_ERR(cow);
1063 /* cow is set to blocking by btrfs_init_new_buffer */
1065 copy_extent_buffer_full(cow, buf);
1066 btrfs_set_header_bytenr(cow, cow->start);
1067 btrfs_set_header_generation(cow, trans->transid);
1068 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1069 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1070 BTRFS_HEADER_FLAG_RELOC);
1071 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1072 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1073 else
1074 btrfs_set_header_owner(cow, root->root_key.objectid);
1076 write_extent_buffer_fsid(cow, fs_info->fsid);
1078 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1079 if (ret) {
1080 btrfs_abort_transaction(trans, ret);
1081 return ret;
1084 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1085 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1086 if (ret) {
1087 btrfs_abort_transaction(trans, ret);
1088 return ret;
1092 if (buf == root->node) {
1093 WARN_ON(parent && parent != buf);
1094 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1095 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1096 parent_start = buf->start;
1098 extent_buffer_get(cow);
1099 ret = tree_mod_log_insert_root(root->node, cow, 1);
1100 BUG_ON(ret < 0);
1101 rcu_assign_pointer(root->node, cow);
1103 btrfs_free_tree_block(trans, root, buf, parent_start,
1104 last_ref);
1105 free_extent_buffer(buf);
1106 add_root_to_dirty_list(root);
1107 } else {
1108 WARN_ON(trans->transid != btrfs_header_generation(parent));
1109 tree_mod_log_insert_key(parent, parent_slot,
1110 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1111 btrfs_set_node_blockptr(parent, parent_slot,
1112 cow->start);
1113 btrfs_set_node_ptr_generation(parent, parent_slot,
1114 trans->transid);
1115 btrfs_mark_buffer_dirty(parent);
1116 if (last_ref) {
1117 ret = tree_mod_log_free_eb(buf);
1118 if (ret) {
1119 btrfs_abort_transaction(trans, ret);
1120 return ret;
1123 btrfs_free_tree_block(trans, root, buf, parent_start,
1124 last_ref);
1126 if (unlock_orig)
1127 btrfs_tree_unlock(buf);
1128 free_extent_buffer_stale(buf);
1129 btrfs_mark_buffer_dirty(cow);
1130 *cow_ret = cow;
1131 return 0;
1135 * returns the logical address of the oldest predecessor of the given root.
1136 * entries older than time_seq are ignored.
1138 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1139 struct extent_buffer *eb_root, u64 time_seq)
1141 struct tree_mod_elem *tm;
1142 struct tree_mod_elem *found = NULL;
1143 u64 root_logical = eb_root->start;
1144 int looped = 0;
1146 if (!time_seq)
1147 return NULL;
1150 * the very last operation that's logged for a root is the
1151 * replacement operation (if it is replaced at all). this has
1152 * the logical address of the *new* root, making it the very
1153 * first operation that's logged for this root.
1155 while (1) {
1156 tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1157 time_seq);
1158 if (!looped && !tm)
1159 return NULL;
1161 * if there are no tree operation for the oldest root, we simply
1162 * return it. this should only happen if that (old) root is at
1163 * level 0.
1165 if (!tm)
1166 break;
1169 * if there's an operation that's not a root replacement, we
1170 * found the oldest version of our root. normally, we'll find a
1171 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1173 if (tm->op != MOD_LOG_ROOT_REPLACE)
1174 break;
1176 found = tm;
1177 root_logical = tm->old_root.logical;
1178 looped = 1;
1181 /* if there's no old root to return, return what we found instead */
1182 if (!found)
1183 found = tm;
1185 return found;
1189 * tm is a pointer to the first operation to rewind within eb. then, all
1190 * previous operations will be rewound (until we reach something older than
1191 * time_seq).
1193 static void
1194 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1195 u64 time_seq, struct tree_mod_elem *first_tm)
1197 u32 n;
1198 struct rb_node *next;
1199 struct tree_mod_elem *tm = first_tm;
1200 unsigned long o_dst;
1201 unsigned long o_src;
1202 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1204 n = btrfs_header_nritems(eb);
1205 read_lock(&fs_info->tree_mod_log_lock);
1206 while (tm && tm->seq >= time_seq) {
1208 * all the operations are recorded with the operator used for
1209 * the modification. as we're going backwards, we do the
1210 * opposite of each operation here.
1212 switch (tm->op) {
1213 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1214 BUG_ON(tm->slot < n);
1215 /* Fallthrough */
1216 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1217 case MOD_LOG_KEY_REMOVE:
1218 btrfs_set_node_key(eb, &tm->key, tm->slot);
1219 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1220 btrfs_set_node_ptr_generation(eb, tm->slot,
1221 tm->generation);
1222 n++;
1223 break;
1224 case MOD_LOG_KEY_REPLACE:
1225 BUG_ON(tm->slot >= n);
1226 btrfs_set_node_key(eb, &tm->key, tm->slot);
1227 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1228 btrfs_set_node_ptr_generation(eb, tm->slot,
1229 tm->generation);
1230 break;
1231 case MOD_LOG_KEY_ADD:
1232 /* if a move operation is needed it's in the log */
1233 n--;
1234 break;
1235 case MOD_LOG_MOVE_KEYS:
1236 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1237 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1238 memmove_extent_buffer(eb, o_dst, o_src,
1239 tm->move.nr_items * p_size);
1240 break;
1241 case MOD_LOG_ROOT_REPLACE:
1243 * this operation is special. for roots, this must be
1244 * handled explicitly before rewinding.
1245 * for non-roots, this operation may exist if the node
1246 * was a root: root A -> child B; then A gets empty and
1247 * B is promoted to the new root. in the mod log, we'll
1248 * have a root-replace operation for B, a tree block
1249 * that is no root. we simply ignore that operation.
1251 break;
1253 next = rb_next(&tm->node);
1254 if (!next)
1255 break;
1256 tm = rb_entry(next, struct tree_mod_elem, node);
1257 if (tm->logical != first_tm->logical)
1258 break;
1260 read_unlock(&fs_info->tree_mod_log_lock);
1261 btrfs_set_header_nritems(eb, n);
1265 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1266 * is returned. If rewind operations happen, a fresh buffer is returned. The
1267 * returned buffer is always read-locked. If the returned buffer is not the
1268 * input buffer, the lock on the input buffer is released and the input buffer
1269 * is freed (its refcount is decremented).
1271 static struct extent_buffer *
1272 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1273 struct extent_buffer *eb, u64 time_seq)
1275 struct extent_buffer *eb_rewin;
1276 struct tree_mod_elem *tm;
1278 if (!time_seq)
1279 return eb;
1281 if (btrfs_header_level(eb) == 0)
1282 return eb;
1284 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1285 if (!tm)
1286 return eb;
1288 btrfs_set_path_blocking(path);
1289 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1291 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1292 BUG_ON(tm->slot != 0);
1293 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1294 if (!eb_rewin) {
1295 btrfs_tree_read_unlock_blocking(eb);
1296 free_extent_buffer(eb);
1297 return NULL;
1299 btrfs_set_header_bytenr(eb_rewin, eb->start);
1300 btrfs_set_header_backref_rev(eb_rewin,
1301 btrfs_header_backref_rev(eb));
1302 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1303 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1304 } else {
1305 eb_rewin = btrfs_clone_extent_buffer(eb);
1306 if (!eb_rewin) {
1307 btrfs_tree_read_unlock_blocking(eb);
1308 free_extent_buffer(eb);
1309 return NULL;
1313 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1314 btrfs_tree_read_unlock_blocking(eb);
1315 free_extent_buffer(eb);
1317 extent_buffer_get(eb_rewin);
1318 btrfs_tree_read_lock(eb_rewin);
1319 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1320 WARN_ON(btrfs_header_nritems(eb_rewin) >
1321 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1323 return eb_rewin;
1327 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1328 * value. If there are no changes, the current root->root_node is returned. If
1329 * anything changed in between, there's a fresh buffer allocated on which the
1330 * rewind operations are done. In any case, the returned buffer is read locked.
1331 * Returns NULL on error (with no locks held).
1333 static inline struct extent_buffer *
1334 get_old_root(struct btrfs_root *root, u64 time_seq)
1336 struct btrfs_fs_info *fs_info = root->fs_info;
1337 struct tree_mod_elem *tm;
1338 struct extent_buffer *eb = NULL;
1339 struct extent_buffer *eb_root;
1340 struct extent_buffer *old;
1341 struct tree_mod_root *old_root = NULL;
1342 u64 old_generation = 0;
1343 u64 logical;
1344 int level;
1346 eb_root = btrfs_read_lock_root_node(root);
1347 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1348 if (!tm)
1349 return eb_root;
1351 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1352 old_root = &tm->old_root;
1353 old_generation = tm->generation;
1354 logical = old_root->logical;
1355 level = old_root->level;
1356 } else {
1357 logical = eb_root->start;
1358 level = btrfs_header_level(eb_root);
1361 tm = tree_mod_log_search(fs_info, logical, time_seq);
1362 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1363 btrfs_tree_read_unlock(eb_root);
1364 free_extent_buffer(eb_root);
1365 old = read_tree_block(fs_info, logical, 0, level, NULL);
1366 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1367 if (!IS_ERR(old))
1368 free_extent_buffer(old);
1369 btrfs_warn(fs_info,
1370 "failed to read tree block %llu from get_old_root",
1371 logical);
1372 } else {
1373 eb = btrfs_clone_extent_buffer(old);
1374 free_extent_buffer(old);
1376 } else if (old_root) {
1377 btrfs_tree_read_unlock(eb_root);
1378 free_extent_buffer(eb_root);
1379 eb = alloc_dummy_extent_buffer(fs_info, logical);
1380 } else {
1381 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1382 eb = btrfs_clone_extent_buffer(eb_root);
1383 btrfs_tree_read_unlock_blocking(eb_root);
1384 free_extent_buffer(eb_root);
1387 if (!eb)
1388 return NULL;
1389 extent_buffer_get(eb);
1390 btrfs_tree_read_lock(eb);
1391 if (old_root) {
1392 btrfs_set_header_bytenr(eb, eb->start);
1393 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1394 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1395 btrfs_set_header_level(eb, old_root->level);
1396 btrfs_set_header_generation(eb, old_generation);
1398 if (tm)
1399 __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1400 else
1401 WARN_ON(btrfs_header_level(eb) != 0);
1402 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1404 return eb;
1407 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1409 struct tree_mod_elem *tm;
1410 int level;
1411 struct extent_buffer *eb_root = btrfs_root_node(root);
1413 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1414 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1415 level = tm->old_root.level;
1416 } else {
1417 level = btrfs_header_level(eb_root);
1419 free_extent_buffer(eb_root);
1421 return level;
1424 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1425 struct btrfs_root *root,
1426 struct extent_buffer *buf)
1428 if (btrfs_is_testing(root->fs_info))
1429 return 0;
1431 /* Ensure we can see the FORCE_COW bit */
1432 smp_mb__before_atomic();
1435 * We do not need to cow a block if
1436 * 1) this block is not created or changed in this transaction;
1437 * 2) this block does not belong to TREE_RELOC tree;
1438 * 3) the root is not forced COW.
1440 * What is forced COW:
1441 * when we create snapshot during committing the transaction,
1442 * after we've finished coping src root, we must COW the shared
1443 * block to ensure the metadata consistency.
1445 if (btrfs_header_generation(buf) == trans->transid &&
1446 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1447 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1448 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1449 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1450 return 0;
1451 return 1;
1455 * cows a single block, see __btrfs_cow_block for the real work.
1456 * This version of it has extra checks so that a block isn't COWed more than
1457 * once per transaction, as long as it hasn't been written yet
1459 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1460 struct btrfs_root *root, struct extent_buffer *buf,
1461 struct extent_buffer *parent, int parent_slot,
1462 struct extent_buffer **cow_ret)
1464 struct btrfs_fs_info *fs_info = root->fs_info;
1465 u64 search_start;
1466 int ret;
1468 if (trans->transaction != fs_info->running_transaction)
1469 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1470 trans->transid,
1471 fs_info->running_transaction->transid);
1473 if (trans->transid != fs_info->generation)
1474 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1475 trans->transid, fs_info->generation);
1477 if (!should_cow_block(trans, root, buf)) {
1478 trans->dirty = true;
1479 *cow_ret = buf;
1480 return 0;
1483 search_start = buf->start & ~((u64)SZ_1G - 1);
1485 if (parent)
1486 btrfs_set_lock_blocking(parent);
1487 btrfs_set_lock_blocking(buf);
1489 ret = __btrfs_cow_block(trans, root, buf, parent,
1490 parent_slot, cow_ret, search_start, 0);
1492 trace_btrfs_cow_block(root, buf, *cow_ret);
1494 return ret;
1498 * helper function for defrag to decide if two blocks pointed to by a
1499 * node are actually close by
1501 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1503 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1504 return 1;
1505 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1506 return 1;
1507 return 0;
1511 * compare two keys in a memcmp fashion
1513 static int comp_keys(const struct btrfs_disk_key *disk,
1514 const struct btrfs_key *k2)
1516 struct btrfs_key k1;
1518 btrfs_disk_key_to_cpu(&k1, disk);
1520 return btrfs_comp_cpu_keys(&k1, k2);
1524 * same as comp_keys only with two btrfs_key's
1526 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1528 if (k1->objectid > k2->objectid)
1529 return 1;
1530 if (k1->objectid < k2->objectid)
1531 return -1;
1532 if (k1->type > k2->type)
1533 return 1;
1534 if (k1->type < k2->type)
1535 return -1;
1536 if (k1->offset > k2->offset)
1537 return 1;
1538 if (k1->offset < k2->offset)
1539 return -1;
1540 return 0;
1544 * this is used by the defrag code to go through all the
1545 * leaves pointed to by a node and reallocate them so that
1546 * disk order is close to key order
1548 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1549 struct btrfs_root *root, struct extent_buffer *parent,
1550 int start_slot, u64 *last_ret,
1551 struct btrfs_key *progress)
1553 struct btrfs_fs_info *fs_info = root->fs_info;
1554 struct extent_buffer *cur;
1555 u64 blocknr;
1556 u64 gen;
1557 u64 search_start = *last_ret;
1558 u64 last_block = 0;
1559 u64 other;
1560 u32 parent_nritems;
1561 int end_slot;
1562 int i;
1563 int err = 0;
1564 int parent_level;
1565 int uptodate;
1566 u32 blocksize;
1567 int progress_passed = 0;
1568 struct btrfs_disk_key disk_key;
1570 parent_level = btrfs_header_level(parent);
1572 WARN_ON(trans->transaction != fs_info->running_transaction);
1573 WARN_ON(trans->transid != fs_info->generation);
1575 parent_nritems = btrfs_header_nritems(parent);
1576 blocksize = fs_info->nodesize;
1577 end_slot = parent_nritems - 1;
1579 if (parent_nritems <= 1)
1580 return 0;
1582 btrfs_set_lock_blocking(parent);
1584 for (i = start_slot; i <= end_slot; i++) {
1585 struct btrfs_key first_key;
1586 int close = 1;
1588 btrfs_node_key(parent, &disk_key, i);
1589 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1590 continue;
1592 progress_passed = 1;
1593 blocknr = btrfs_node_blockptr(parent, i);
1594 gen = btrfs_node_ptr_generation(parent, i);
1595 btrfs_node_key_to_cpu(parent, &first_key, i);
1596 if (last_block == 0)
1597 last_block = blocknr;
1599 if (i > 0) {
1600 other = btrfs_node_blockptr(parent, i - 1);
1601 close = close_blocks(blocknr, other, blocksize);
1603 if (!close && i < end_slot) {
1604 other = btrfs_node_blockptr(parent, i + 1);
1605 close = close_blocks(blocknr, other, blocksize);
1607 if (close) {
1608 last_block = blocknr;
1609 continue;
1612 cur = find_extent_buffer(fs_info, blocknr);
1613 if (cur)
1614 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1615 else
1616 uptodate = 0;
1617 if (!cur || !uptodate) {
1618 if (!cur) {
1619 cur = read_tree_block(fs_info, blocknr, gen,
1620 parent_level - 1,
1621 &first_key);
1622 if (IS_ERR(cur)) {
1623 return PTR_ERR(cur);
1624 } else if (!extent_buffer_uptodate(cur)) {
1625 free_extent_buffer(cur);
1626 return -EIO;
1628 } else if (!uptodate) {
1629 err = btrfs_read_buffer(cur, gen,
1630 parent_level - 1,&first_key);
1631 if (err) {
1632 free_extent_buffer(cur);
1633 return err;
1637 if (search_start == 0)
1638 search_start = last_block;
1640 btrfs_tree_lock(cur);
1641 btrfs_set_lock_blocking(cur);
1642 err = __btrfs_cow_block(trans, root, cur, parent, i,
1643 &cur, search_start,
1644 min(16 * blocksize,
1645 (end_slot - i) * blocksize));
1646 if (err) {
1647 btrfs_tree_unlock(cur);
1648 free_extent_buffer(cur);
1649 break;
1651 search_start = cur->start;
1652 last_block = cur->start;
1653 *last_ret = search_start;
1654 btrfs_tree_unlock(cur);
1655 free_extent_buffer(cur);
1657 return err;
1661 * search for key in the extent_buffer. The items start at offset p,
1662 * and they are item_size apart. There are 'max' items in p.
1664 * the slot in the array is returned via slot, and it points to
1665 * the place where you would insert key if it is not found in
1666 * the array.
1668 * slot may point to max if the key is bigger than all of the keys
1670 static noinline int generic_bin_search(struct extent_buffer *eb,
1671 unsigned long p, int item_size,
1672 const struct btrfs_key *key,
1673 int max, int *slot)
1675 int low = 0;
1676 int high = max;
1677 int mid;
1678 int ret;
1679 struct btrfs_disk_key *tmp = NULL;
1680 struct btrfs_disk_key unaligned;
1681 unsigned long offset;
1682 char *kaddr = NULL;
1683 unsigned long map_start = 0;
1684 unsigned long map_len = 0;
1685 int err;
1687 if (low > high) {
1688 btrfs_err(eb->fs_info,
1689 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1690 __func__, low, high, eb->start,
1691 btrfs_header_owner(eb), btrfs_header_level(eb));
1692 return -EINVAL;
1695 while (low < high) {
1696 mid = (low + high) / 2;
1697 offset = p + mid * item_size;
1699 if (!kaddr || offset < map_start ||
1700 (offset + sizeof(struct btrfs_disk_key)) >
1701 map_start + map_len) {
1703 err = map_private_extent_buffer(eb, offset,
1704 sizeof(struct btrfs_disk_key),
1705 &kaddr, &map_start, &map_len);
1707 if (!err) {
1708 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1709 map_start);
1710 } else if (err == 1) {
1711 read_extent_buffer(eb, &unaligned,
1712 offset, sizeof(unaligned));
1713 tmp = &unaligned;
1714 } else {
1715 return err;
1718 } else {
1719 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1720 map_start);
1722 ret = comp_keys(tmp, key);
1724 if (ret < 0)
1725 low = mid + 1;
1726 else if (ret > 0)
1727 high = mid;
1728 else {
1729 *slot = mid;
1730 return 0;
1733 *slot = low;
1734 return 1;
1738 * simple bin_search frontend that does the right thing for
1739 * leaves vs nodes
1741 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1742 int level, int *slot)
1744 if (level == 0)
1745 return generic_bin_search(eb,
1746 offsetof(struct btrfs_leaf, items),
1747 sizeof(struct btrfs_item),
1748 key, btrfs_header_nritems(eb),
1749 slot);
1750 else
1751 return generic_bin_search(eb,
1752 offsetof(struct btrfs_node, ptrs),
1753 sizeof(struct btrfs_key_ptr),
1754 key, btrfs_header_nritems(eb),
1755 slot);
1758 static void root_add_used(struct btrfs_root *root, u32 size)
1760 spin_lock(&root->accounting_lock);
1761 btrfs_set_root_used(&root->root_item,
1762 btrfs_root_used(&root->root_item) + size);
1763 spin_unlock(&root->accounting_lock);
1766 static void root_sub_used(struct btrfs_root *root, u32 size)
1768 spin_lock(&root->accounting_lock);
1769 btrfs_set_root_used(&root->root_item,
1770 btrfs_root_used(&root->root_item) - size);
1771 spin_unlock(&root->accounting_lock);
1774 /* given a node and slot number, this reads the blocks it points to. The
1775 * extent buffer is returned with a reference taken (but unlocked).
1777 static noinline struct extent_buffer *
1778 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1779 int slot)
1781 int level = btrfs_header_level(parent);
1782 struct extent_buffer *eb;
1783 struct btrfs_key first_key;
1785 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1786 return ERR_PTR(-ENOENT);
1788 BUG_ON(level == 0);
1790 btrfs_node_key_to_cpu(parent, &first_key, slot);
1791 eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1792 btrfs_node_ptr_generation(parent, slot),
1793 level - 1, &first_key);
1794 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1795 free_extent_buffer(eb);
1796 eb = ERR_PTR(-EIO);
1799 return eb;
1803 * node level balancing, used to make sure nodes are in proper order for
1804 * item deletion. We balance from the top down, so we have to make sure
1805 * that a deletion won't leave an node completely empty later on.
1807 static noinline int balance_level(struct btrfs_trans_handle *trans,
1808 struct btrfs_root *root,
1809 struct btrfs_path *path, int level)
1811 struct btrfs_fs_info *fs_info = root->fs_info;
1812 struct extent_buffer *right = NULL;
1813 struct extent_buffer *mid;
1814 struct extent_buffer *left = NULL;
1815 struct extent_buffer *parent = NULL;
1816 int ret = 0;
1817 int wret;
1818 int pslot;
1819 int orig_slot = path->slots[level];
1820 u64 orig_ptr;
1822 if (level == 0)
1823 return 0;
1825 mid = path->nodes[level];
1827 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1828 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1829 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1831 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1833 if (level < BTRFS_MAX_LEVEL - 1) {
1834 parent = path->nodes[level + 1];
1835 pslot = path->slots[level + 1];
1839 * deal with the case where there is only one pointer in the root
1840 * by promoting the node below to a root
1842 if (!parent) {
1843 struct extent_buffer *child;
1845 if (btrfs_header_nritems(mid) != 1)
1846 return 0;
1848 /* promote the child to a root */
1849 child = read_node_slot(fs_info, mid, 0);
1850 if (IS_ERR(child)) {
1851 ret = PTR_ERR(child);
1852 btrfs_handle_fs_error(fs_info, ret, NULL);
1853 goto enospc;
1856 btrfs_tree_lock(child);
1857 btrfs_set_lock_blocking(child);
1858 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1859 if (ret) {
1860 btrfs_tree_unlock(child);
1861 free_extent_buffer(child);
1862 goto enospc;
1865 ret = tree_mod_log_insert_root(root->node, child, 1);
1866 BUG_ON(ret < 0);
1867 rcu_assign_pointer(root->node, child);
1869 add_root_to_dirty_list(root);
1870 btrfs_tree_unlock(child);
1872 path->locks[level] = 0;
1873 path->nodes[level] = NULL;
1874 clean_tree_block(fs_info, mid);
1875 btrfs_tree_unlock(mid);
1876 /* once for the path */
1877 free_extent_buffer(mid);
1879 root_sub_used(root, mid->len);
1880 btrfs_free_tree_block(trans, root, mid, 0, 1);
1881 /* once for the root ptr */
1882 free_extent_buffer_stale(mid);
1883 return 0;
1885 if (btrfs_header_nritems(mid) >
1886 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1887 return 0;
1889 left = read_node_slot(fs_info, parent, pslot - 1);
1890 if (IS_ERR(left))
1891 left = NULL;
1893 if (left) {
1894 btrfs_tree_lock(left);
1895 btrfs_set_lock_blocking(left);
1896 wret = btrfs_cow_block(trans, root, left,
1897 parent, pslot - 1, &left);
1898 if (wret) {
1899 ret = wret;
1900 goto enospc;
1904 right = read_node_slot(fs_info, parent, pslot + 1);
1905 if (IS_ERR(right))
1906 right = NULL;
1908 if (right) {
1909 btrfs_tree_lock(right);
1910 btrfs_set_lock_blocking(right);
1911 wret = btrfs_cow_block(trans, root, right,
1912 parent, pslot + 1, &right);
1913 if (wret) {
1914 ret = wret;
1915 goto enospc;
1919 /* first, try to make some room in the middle buffer */
1920 if (left) {
1921 orig_slot += btrfs_header_nritems(left);
1922 wret = push_node_left(trans, fs_info, left, mid, 1);
1923 if (wret < 0)
1924 ret = wret;
1928 * then try to empty the right most buffer into the middle
1930 if (right) {
1931 wret = push_node_left(trans, fs_info, mid, right, 1);
1932 if (wret < 0 && wret != -ENOSPC)
1933 ret = wret;
1934 if (btrfs_header_nritems(right) == 0) {
1935 clean_tree_block(fs_info, right);
1936 btrfs_tree_unlock(right);
1937 del_ptr(root, path, level + 1, pslot + 1);
1938 root_sub_used(root, right->len);
1939 btrfs_free_tree_block(trans, root, right, 0, 1);
1940 free_extent_buffer_stale(right);
1941 right = NULL;
1942 } else {
1943 struct btrfs_disk_key right_key;
1944 btrfs_node_key(right, &right_key, 0);
1945 ret = tree_mod_log_insert_key(parent, pslot + 1,
1946 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1947 BUG_ON(ret < 0);
1948 btrfs_set_node_key(parent, &right_key, pslot + 1);
1949 btrfs_mark_buffer_dirty(parent);
1952 if (btrfs_header_nritems(mid) == 1) {
1954 * we're not allowed to leave a node with one item in the
1955 * tree during a delete. A deletion from lower in the tree
1956 * could try to delete the only pointer in this node.
1957 * So, pull some keys from the left.
1958 * There has to be a left pointer at this point because
1959 * otherwise we would have pulled some pointers from the
1960 * right
1962 if (!left) {
1963 ret = -EROFS;
1964 btrfs_handle_fs_error(fs_info, ret, NULL);
1965 goto enospc;
1967 wret = balance_node_right(trans, fs_info, mid, left);
1968 if (wret < 0) {
1969 ret = wret;
1970 goto enospc;
1972 if (wret == 1) {
1973 wret = push_node_left(trans, fs_info, left, mid, 1);
1974 if (wret < 0)
1975 ret = wret;
1977 BUG_ON(wret == 1);
1979 if (btrfs_header_nritems(mid) == 0) {
1980 clean_tree_block(fs_info, mid);
1981 btrfs_tree_unlock(mid);
1982 del_ptr(root, path, level + 1, pslot);
1983 root_sub_used(root, mid->len);
1984 btrfs_free_tree_block(trans, root, mid, 0, 1);
1985 free_extent_buffer_stale(mid);
1986 mid = NULL;
1987 } else {
1988 /* update the parent key to reflect our changes */
1989 struct btrfs_disk_key mid_key;
1990 btrfs_node_key(mid, &mid_key, 0);
1991 ret = tree_mod_log_insert_key(parent, pslot,
1992 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1993 BUG_ON(ret < 0);
1994 btrfs_set_node_key(parent, &mid_key, pslot);
1995 btrfs_mark_buffer_dirty(parent);
1998 /* update the path */
1999 if (left) {
2000 if (btrfs_header_nritems(left) > orig_slot) {
2001 extent_buffer_get(left);
2002 /* left was locked after cow */
2003 path->nodes[level] = left;
2004 path->slots[level + 1] -= 1;
2005 path->slots[level] = orig_slot;
2006 if (mid) {
2007 btrfs_tree_unlock(mid);
2008 free_extent_buffer(mid);
2010 } else {
2011 orig_slot -= btrfs_header_nritems(left);
2012 path->slots[level] = orig_slot;
2015 /* double check we haven't messed things up */
2016 if (orig_ptr !=
2017 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2018 BUG();
2019 enospc:
2020 if (right) {
2021 btrfs_tree_unlock(right);
2022 free_extent_buffer(right);
2024 if (left) {
2025 if (path->nodes[level] != left)
2026 btrfs_tree_unlock(left);
2027 free_extent_buffer(left);
2029 return ret;
2032 /* Node balancing for insertion. Here we only split or push nodes around
2033 * when they are completely full. This is also done top down, so we
2034 * have to be pessimistic.
2036 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2037 struct btrfs_root *root,
2038 struct btrfs_path *path, int level)
2040 struct btrfs_fs_info *fs_info = root->fs_info;
2041 struct extent_buffer *right = NULL;
2042 struct extent_buffer *mid;
2043 struct extent_buffer *left = NULL;
2044 struct extent_buffer *parent = NULL;
2045 int ret = 0;
2046 int wret;
2047 int pslot;
2048 int orig_slot = path->slots[level];
2050 if (level == 0)
2051 return 1;
2053 mid = path->nodes[level];
2054 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2056 if (level < BTRFS_MAX_LEVEL - 1) {
2057 parent = path->nodes[level + 1];
2058 pslot = path->slots[level + 1];
2061 if (!parent)
2062 return 1;
2064 left = read_node_slot(fs_info, parent, pslot - 1);
2065 if (IS_ERR(left))
2066 left = NULL;
2068 /* first, try to make some room in the middle buffer */
2069 if (left) {
2070 u32 left_nr;
2072 btrfs_tree_lock(left);
2073 btrfs_set_lock_blocking(left);
2075 left_nr = btrfs_header_nritems(left);
2076 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2077 wret = 1;
2078 } else {
2079 ret = btrfs_cow_block(trans, root, left, parent,
2080 pslot - 1, &left);
2081 if (ret)
2082 wret = 1;
2083 else {
2084 wret = push_node_left(trans, fs_info,
2085 left, mid, 0);
2088 if (wret < 0)
2089 ret = wret;
2090 if (wret == 0) {
2091 struct btrfs_disk_key disk_key;
2092 orig_slot += left_nr;
2093 btrfs_node_key(mid, &disk_key, 0);
2094 ret = tree_mod_log_insert_key(parent, pslot,
2095 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2096 BUG_ON(ret < 0);
2097 btrfs_set_node_key(parent, &disk_key, pslot);
2098 btrfs_mark_buffer_dirty(parent);
2099 if (btrfs_header_nritems(left) > orig_slot) {
2100 path->nodes[level] = left;
2101 path->slots[level + 1] -= 1;
2102 path->slots[level] = orig_slot;
2103 btrfs_tree_unlock(mid);
2104 free_extent_buffer(mid);
2105 } else {
2106 orig_slot -=
2107 btrfs_header_nritems(left);
2108 path->slots[level] = orig_slot;
2109 btrfs_tree_unlock(left);
2110 free_extent_buffer(left);
2112 return 0;
2114 btrfs_tree_unlock(left);
2115 free_extent_buffer(left);
2117 right = read_node_slot(fs_info, parent, pslot + 1);
2118 if (IS_ERR(right))
2119 right = NULL;
2122 * then try to empty the right most buffer into the middle
2124 if (right) {
2125 u32 right_nr;
2127 btrfs_tree_lock(right);
2128 btrfs_set_lock_blocking(right);
2130 right_nr = btrfs_header_nritems(right);
2131 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2132 wret = 1;
2133 } else {
2134 ret = btrfs_cow_block(trans, root, right,
2135 parent, pslot + 1,
2136 &right);
2137 if (ret)
2138 wret = 1;
2139 else {
2140 wret = balance_node_right(trans, fs_info,
2141 right, mid);
2144 if (wret < 0)
2145 ret = wret;
2146 if (wret == 0) {
2147 struct btrfs_disk_key disk_key;
2149 btrfs_node_key(right, &disk_key, 0);
2150 ret = tree_mod_log_insert_key(parent, pslot + 1,
2151 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2152 BUG_ON(ret < 0);
2153 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2154 btrfs_mark_buffer_dirty(parent);
2156 if (btrfs_header_nritems(mid) <= orig_slot) {
2157 path->nodes[level] = right;
2158 path->slots[level + 1] += 1;
2159 path->slots[level] = orig_slot -
2160 btrfs_header_nritems(mid);
2161 btrfs_tree_unlock(mid);
2162 free_extent_buffer(mid);
2163 } else {
2164 btrfs_tree_unlock(right);
2165 free_extent_buffer(right);
2167 return 0;
2169 btrfs_tree_unlock(right);
2170 free_extent_buffer(right);
2172 return 1;
2176 * readahead one full node of leaves, finding things that are close
2177 * to the block in 'slot', and triggering ra on them.
2179 static void reada_for_search(struct btrfs_fs_info *fs_info,
2180 struct btrfs_path *path,
2181 int level, int slot, u64 objectid)
2183 struct extent_buffer *node;
2184 struct btrfs_disk_key disk_key;
2185 u32 nritems;
2186 u64 search;
2187 u64 target;
2188 u64 nread = 0;
2189 struct extent_buffer *eb;
2190 u32 nr;
2191 u32 blocksize;
2192 u32 nscan = 0;
2194 if (level != 1)
2195 return;
2197 if (!path->nodes[level])
2198 return;
2200 node = path->nodes[level];
2202 search = btrfs_node_blockptr(node, slot);
2203 blocksize = fs_info->nodesize;
2204 eb = find_extent_buffer(fs_info, search);
2205 if (eb) {
2206 free_extent_buffer(eb);
2207 return;
2210 target = search;
2212 nritems = btrfs_header_nritems(node);
2213 nr = slot;
2215 while (1) {
2216 if (path->reada == READA_BACK) {
2217 if (nr == 0)
2218 break;
2219 nr--;
2220 } else if (path->reada == READA_FORWARD) {
2221 nr++;
2222 if (nr >= nritems)
2223 break;
2225 if (path->reada == READA_BACK && objectid) {
2226 btrfs_node_key(node, &disk_key, nr);
2227 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2228 break;
2230 search = btrfs_node_blockptr(node, nr);
2231 if ((search <= target && target - search <= 65536) ||
2232 (search > target && search - target <= 65536)) {
2233 readahead_tree_block(fs_info, search);
2234 nread += blocksize;
2236 nscan++;
2237 if ((nread > 65536 || nscan > 32))
2238 break;
2242 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2243 struct btrfs_path *path, int level)
2245 int slot;
2246 int nritems;
2247 struct extent_buffer *parent;
2248 struct extent_buffer *eb;
2249 u64 gen;
2250 u64 block1 = 0;
2251 u64 block2 = 0;
2253 parent = path->nodes[level + 1];
2254 if (!parent)
2255 return;
2257 nritems = btrfs_header_nritems(parent);
2258 slot = path->slots[level + 1];
2260 if (slot > 0) {
2261 block1 = btrfs_node_blockptr(parent, slot - 1);
2262 gen = btrfs_node_ptr_generation(parent, slot - 1);
2263 eb = find_extent_buffer(fs_info, block1);
2265 * if we get -eagain from btrfs_buffer_uptodate, we
2266 * don't want to return eagain here. That will loop
2267 * forever
2269 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2270 block1 = 0;
2271 free_extent_buffer(eb);
2273 if (slot + 1 < nritems) {
2274 block2 = btrfs_node_blockptr(parent, slot + 1);
2275 gen = btrfs_node_ptr_generation(parent, slot + 1);
2276 eb = find_extent_buffer(fs_info, block2);
2277 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2278 block2 = 0;
2279 free_extent_buffer(eb);
2282 if (block1)
2283 readahead_tree_block(fs_info, block1);
2284 if (block2)
2285 readahead_tree_block(fs_info, block2);
2290 * when we walk down the tree, it is usually safe to unlock the higher layers
2291 * in the tree. The exceptions are when our path goes through slot 0, because
2292 * operations on the tree might require changing key pointers higher up in the
2293 * tree.
2295 * callers might also have set path->keep_locks, which tells this code to keep
2296 * the lock if the path points to the last slot in the block. This is part of
2297 * walking through the tree, and selecting the next slot in the higher block.
2299 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2300 * if lowest_unlock is 1, level 0 won't be unlocked
2302 static noinline void unlock_up(struct btrfs_path *path, int level,
2303 int lowest_unlock, int min_write_lock_level,
2304 int *write_lock_level)
2306 int i;
2307 int skip_level = level;
2308 int no_skips = 0;
2309 struct extent_buffer *t;
2311 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2312 if (!path->nodes[i])
2313 break;
2314 if (!path->locks[i])
2315 break;
2316 if (!no_skips && path->slots[i] == 0) {
2317 skip_level = i + 1;
2318 continue;
2320 if (!no_skips && path->keep_locks) {
2321 u32 nritems;
2322 t = path->nodes[i];
2323 nritems = btrfs_header_nritems(t);
2324 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2325 skip_level = i + 1;
2326 continue;
2329 if (skip_level < i && i >= lowest_unlock)
2330 no_skips = 1;
2332 t = path->nodes[i];
2333 if (i >= lowest_unlock && i > skip_level) {
2334 btrfs_tree_unlock_rw(t, path->locks[i]);
2335 path->locks[i] = 0;
2336 if (write_lock_level &&
2337 i > min_write_lock_level &&
2338 i <= *write_lock_level) {
2339 *write_lock_level = i - 1;
2346 * This releases any locks held in the path starting at level and
2347 * going all the way up to the root.
2349 * btrfs_search_slot will keep the lock held on higher nodes in a few
2350 * corner cases, such as COW of the block at slot zero in the node. This
2351 * ignores those rules, and it should only be called when there are no
2352 * more updates to be done higher up in the tree.
2354 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2356 int i;
2358 if (path->keep_locks)
2359 return;
2361 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2362 if (!path->nodes[i])
2363 continue;
2364 if (!path->locks[i])
2365 continue;
2366 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2367 path->locks[i] = 0;
2372 * helper function for btrfs_search_slot. The goal is to find a block
2373 * in cache without setting the path to blocking. If we find the block
2374 * we return zero and the path is unchanged.
2376 * If we can't find the block, we set the path blocking and do some
2377 * reada. -EAGAIN is returned and the search must be repeated.
2379 static int
2380 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2381 struct extent_buffer **eb_ret, int level, int slot,
2382 const struct btrfs_key *key)
2384 struct btrfs_fs_info *fs_info = root->fs_info;
2385 u64 blocknr;
2386 u64 gen;
2387 struct extent_buffer *b = *eb_ret;
2388 struct extent_buffer *tmp;
2389 struct btrfs_key first_key;
2390 int ret;
2391 int parent_level;
2393 blocknr = btrfs_node_blockptr(b, slot);
2394 gen = btrfs_node_ptr_generation(b, slot);
2395 parent_level = btrfs_header_level(b);
2396 btrfs_node_key_to_cpu(b, &first_key, slot);
2398 tmp = find_extent_buffer(fs_info, blocknr);
2399 if (tmp) {
2400 /* first we do an atomic uptodate check */
2401 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2402 *eb_ret = tmp;
2403 return 0;
2406 /* the pages were up to date, but we failed
2407 * the generation number check. Do a full
2408 * read for the generation number that is correct.
2409 * We must do this without dropping locks so
2410 * we can trust our generation number
2412 btrfs_set_path_blocking(p);
2414 /* now we're allowed to do a blocking uptodate check */
2415 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2416 if (!ret) {
2417 *eb_ret = tmp;
2418 return 0;
2420 free_extent_buffer(tmp);
2421 btrfs_release_path(p);
2422 return -EIO;
2426 * reduce lock contention at high levels
2427 * of the btree by dropping locks before
2428 * we read. Don't release the lock on the current
2429 * level because we need to walk this node to figure
2430 * out which blocks to read.
2432 btrfs_unlock_up_safe(p, level + 1);
2433 btrfs_set_path_blocking(p);
2435 if (p->reada != READA_NONE)
2436 reada_for_search(fs_info, p, level, slot, key->objectid);
2438 ret = -EAGAIN;
2439 tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2440 &first_key);
2441 if (!IS_ERR(tmp)) {
2443 * If the read above didn't mark this buffer up to date,
2444 * it will never end up being up to date. Set ret to EIO now
2445 * and give up so that our caller doesn't loop forever
2446 * on our EAGAINs.
2448 if (!extent_buffer_uptodate(tmp))
2449 ret = -EIO;
2450 free_extent_buffer(tmp);
2451 } else {
2452 ret = PTR_ERR(tmp);
2455 btrfs_release_path(p);
2456 return ret;
2460 * helper function for btrfs_search_slot. This does all of the checks
2461 * for node-level blocks and does any balancing required based on
2462 * the ins_len.
2464 * If no extra work was required, zero is returned. If we had to
2465 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2466 * start over
2468 static int
2469 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2470 struct btrfs_root *root, struct btrfs_path *p,
2471 struct extent_buffer *b, int level, int ins_len,
2472 int *write_lock_level)
2474 struct btrfs_fs_info *fs_info = root->fs_info;
2475 int ret;
2477 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2478 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2479 int sret;
2481 if (*write_lock_level < level + 1) {
2482 *write_lock_level = level + 1;
2483 btrfs_release_path(p);
2484 goto again;
2487 btrfs_set_path_blocking(p);
2488 reada_for_balance(fs_info, p, level);
2489 sret = split_node(trans, root, p, level);
2490 btrfs_clear_path_blocking(p, NULL, 0);
2492 BUG_ON(sret > 0);
2493 if (sret) {
2494 ret = sret;
2495 goto done;
2497 b = p->nodes[level];
2498 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2499 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2500 int sret;
2502 if (*write_lock_level < level + 1) {
2503 *write_lock_level = level + 1;
2504 btrfs_release_path(p);
2505 goto again;
2508 btrfs_set_path_blocking(p);
2509 reada_for_balance(fs_info, p, level);
2510 sret = balance_level(trans, root, p, level);
2511 btrfs_clear_path_blocking(p, NULL, 0);
2513 if (sret) {
2514 ret = sret;
2515 goto done;
2517 b = p->nodes[level];
2518 if (!b) {
2519 btrfs_release_path(p);
2520 goto again;
2522 BUG_ON(btrfs_header_nritems(b) == 1);
2524 return 0;
2526 again:
2527 ret = -EAGAIN;
2528 done:
2529 return ret;
2532 static void key_search_validate(struct extent_buffer *b,
2533 const struct btrfs_key *key,
2534 int level)
2536 #ifdef CONFIG_BTRFS_ASSERT
2537 struct btrfs_disk_key disk_key;
2539 btrfs_cpu_key_to_disk(&disk_key, key);
2541 if (level == 0)
2542 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2543 offsetof(struct btrfs_leaf, items[0].key),
2544 sizeof(disk_key)));
2545 else
2546 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2547 offsetof(struct btrfs_node, ptrs[0].key),
2548 sizeof(disk_key)));
2549 #endif
2552 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2553 int level, int *prev_cmp, int *slot)
2555 if (*prev_cmp != 0) {
2556 *prev_cmp = btrfs_bin_search(b, key, level, slot);
2557 return *prev_cmp;
2560 key_search_validate(b, key, level);
2561 *slot = 0;
2563 return 0;
2566 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2567 u64 iobjectid, u64 ioff, u8 key_type,
2568 struct btrfs_key *found_key)
2570 int ret;
2571 struct btrfs_key key;
2572 struct extent_buffer *eb;
2574 ASSERT(path);
2575 ASSERT(found_key);
2577 key.type = key_type;
2578 key.objectid = iobjectid;
2579 key.offset = ioff;
2581 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2582 if (ret < 0)
2583 return ret;
2585 eb = path->nodes[0];
2586 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2587 ret = btrfs_next_leaf(fs_root, path);
2588 if (ret)
2589 return ret;
2590 eb = path->nodes[0];
2593 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2594 if (found_key->type != key.type ||
2595 found_key->objectid != key.objectid)
2596 return 1;
2598 return 0;
2601 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2602 struct btrfs_path *p,
2603 int write_lock_level)
2605 struct btrfs_fs_info *fs_info = root->fs_info;
2606 struct extent_buffer *b;
2607 int root_lock;
2608 int level = 0;
2610 /* We try very hard to do read locks on the root */
2611 root_lock = BTRFS_READ_LOCK;
2613 if (p->search_commit_root) {
2614 /* The commit roots are read only so we always do read locks */
2615 if (p->need_commit_sem)
2616 down_read(&fs_info->commit_root_sem);
2617 b = root->commit_root;
2618 extent_buffer_get(b);
2619 level = btrfs_header_level(b);
2620 if (p->need_commit_sem)
2621 up_read(&fs_info->commit_root_sem);
2623 * Ensure that all callers have set skip_locking when
2624 * p->search_commit_root = 1.
2626 ASSERT(p->skip_locking == 1);
2628 goto out;
2631 if (p->skip_locking) {
2632 b = btrfs_root_node(root);
2633 level = btrfs_header_level(b);
2634 goto out;
2638 * If the level is set to maximum, we can skip trying to get the read
2639 * lock.
2641 if (write_lock_level < BTRFS_MAX_LEVEL) {
2643 * We don't know the level of the root node until we actually
2644 * have it read locked
2646 b = btrfs_read_lock_root_node(root);
2647 level = btrfs_header_level(b);
2648 if (level > write_lock_level)
2649 goto out;
2651 /* Whoops, must trade for write lock */
2652 btrfs_tree_read_unlock(b);
2653 free_extent_buffer(b);
2656 b = btrfs_lock_root_node(root);
2657 root_lock = BTRFS_WRITE_LOCK;
2659 /* The level might have changed, check again */
2660 level = btrfs_header_level(b);
2662 out:
2663 p->nodes[level] = b;
2664 if (!p->skip_locking)
2665 p->locks[level] = root_lock;
2667 * Callers are responsible for dropping b's references.
2669 return b;
2674 * btrfs_search_slot - look for a key in a tree and perform necessary
2675 * modifications to preserve tree invariants.
2677 * @trans: Handle of transaction, used when modifying the tree
2678 * @p: Holds all btree nodes along the search path
2679 * @root: The root node of the tree
2680 * @key: The key we are looking for
2681 * @ins_len: Indicates purpose of search, for inserts it is 1, for
2682 * deletions it's -1. 0 for plain searches
2683 * @cow: boolean should CoW operations be performed. Must always be 1
2684 * when modifying the tree.
2686 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2687 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2689 * If @key is found, 0 is returned and you can find the item in the leaf level
2690 * of the path (level 0)
2692 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2693 * points to the slot where it should be inserted
2695 * If an error is encountered while searching the tree a negative error number
2696 * is returned
2698 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2699 const struct btrfs_key *key, struct btrfs_path *p,
2700 int ins_len, int cow)
2702 struct btrfs_fs_info *fs_info = root->fs_info;
2703 struct extent_buffer *b;
2704 int slot;
2705 int ret;
2706 int err;
2707 int level;
2708 int lowest_unlock = 1;
2709 /* everything at write_lock_level or lower must be write locked */
2710 int write_lock_level = 0;
2711 u8 lowest_level = 0;
2712 int min_write_lock_level;
2713 int prev_cmp;
2715 lowest_level = p->lowest_level;
2716 WARN_ON(lowest_level && ins_len > 0);
2717 WARN_ON(p->nodes[0] != NULL);
2718 BUG_ON(!cow && ins_len);
2720 if (ins_len < 0) {
2721 lowest_unlock = 2;
2723 /* when we are removing items, we might have to go up to level
2724 * two as we update tree pointers Make sure we keep write
2725 * for those levels as well
2727 write_lock_level = 2;
2728 } else if (ins_len > 0) {
2730 * for inserting items, make sure we have a write lock on
2731 * level 1 so we can update keys
2733 write_lock_level = 1;
2736 if (!cow)
2737 write_lock_level = -1;
2739 if (cow && (p->keep_locks || p->lowest_level))
2740 write_lock_level = BTRFS_MAX_LEVEL;
2742 min_write_lock_level = write_lock_level;
2744 again:
2745 prev_cmp = -1;
2746 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2748 while (b) {
2749 level = btrfs_header_level(b);
2752 * setup the path here so we can release it under lock
2753 * contention with the cow code
2755 if (cow) {
2756 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2759 * if we don't really need to cow this block
2760 * then we don't want to set the path blocking,
2761 * so we test it here
2763 if (!should_cow_block(trans, root, b)) {
2764 trans->dirty = true;
2765 goto cow_done;
2769 * must have write locks on this node and the
2770 * parent
2772 if (level > write_lock_level ||
2773 (level + 1 > write_lock_level &&
2774 level + 1 < BTRFS_MAX_LEVEL &&
2775 p->nodes[level + 1])) {
2776 write_lock_level = level + 1;
2777 btrfs_release_path(p);
2778 goto again;
2781 btrfs_set_path_blocking(p);
2782 if (last_level)
2783 err = btrfs_cow_block(trans, root, b, NULL, 0,
2784 &b);
2785 else
2786 err = btrfs_cow_block(trans, root, b,
2787 p->nodes[level + 1],
2788 p->slots[level + 1], &b);
2789 if (err) {
2790 ret = err;
2791 goto done;
2794 cow_done:
2795 p->nodes[level] = b;
2796 btrfs_clear_path_blocking(p, NULL, 0);
2799 * we have a lock on b and as long as we aren't changing
2800 * the tree, there is no way to for the items in b to change.
2801 * It is safe to drop the lock on our parent before we
2802 * go through the expensive btree search on b.
2804 * If we're inserting or deleting (ins_len != 0), then we might
2805 * be changing slot zero, which may require changing the parent.
2806 * So, we can't drop the lock until after we know which slot
2807 * we're operating on.
2809 if (!ins_len && !p->keep_locks) {
2810 int u = level + 1;
2812 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2813 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2814 p->locks[u] = 0;
2818 ret = key_search(b, key, level, &prev_cmp, &slot);
2819 if (ret < 0)
2820 goto done;
2822 if (level != 0) {
2823 int dec = 0;
2824 if (ret && slot > 0) {
2825 dec = 1;
2826 slot -= 1;
2828 p->slots[level] = slot;
2829 err = setup_nodes_for_search(trans, root, p, b, level,
2830 ins_len, &write_lock_level);
2831 if (err == -EAGAIN)
2832 goto again;
2833 if (err) {
2834 ret = err;
2835 goto done;
2837 b = p->nodes[level];
2838 slot = p->slots[level];
2841 * slot 0 is special, if we change the key
2842 * we have to update the parent pointer
2843 * which means we must have a write lock
2844 * on the parent
2846 if (slot == 0 && ins_len &&
2847 write_lock_level < level + 1) {
2848 write_lock_level = level + 1;
2849 btrfs_release_path(p);
2850 goto again;
2853 unlock_up(p, level, lowest_unlock,
2854 min_write_lock_level, &write_lock_level);
2856 if (level == lowest_level) {
2857 if (dec)
2858 p->slots[level]++;
2859 goto done;
2862 err = read_block_for_search(root, p, &b, level,
2863 slot, key);
2864 if (err == -EAGAIN)
2865 goto again;
2866 if (err) {
2867 ret = err;
2868 goto done;
2871 if (!p->skip_locking) {
2872 level = btrfs_header_level(b);
2873 if (level <= write_lock_level) {
2874 err = btrfs_try_tree_write_lock(b);
2875 if (!err) {
2876 btrfs_set_path_blocking(p);
2877 btrfs_tree_lock(b);
2878 btrfs_clear_path_blocking(p, b,
2879 BTRFS_WRITE_LOCK);
2881 p->locks[level] = BTRFS_WRITE_LOCK;
2882 } else {
2883 err = btrfs_tree_read_lock_atomic(b);
2884 if (!err) {
2885 btrfs_set_path_blocking(p);
2886 btrfs_tree_read_lock(b);
2887 btrfs_clear_path_blocking(p, b,
2888 BTRFS_READ_LOCK);
2890 p->locks[level] = BTRFS_READ_LOCK;
2892 p->nodes[level] = b;
2894 } else {
2895 p->slots[level] = slot;
2896 if (ins_len > 0 &&
2897 btrfs_leaf_free_space(fs_info, b) < ins_len) {
2898 if (write_lock_level < 1) {
2899 write_lock_level = 1;
2900 btrfs_release_path(p);
2901 goto again;
2904 btrfs_set_path_blocking(p);
2905 err = split_leaf(trans, root, key,
2906 p, ins_len, ret == 0);
2907 btrfs_clear_path_blocking(p, NULL, 0);
2909 BUG_ON(err > 0);
2910 if (err) {
2911 ret = err;
2912 goto done;
2915 if (!p->search_for_split)
2916 unlock_up(p, level, lowest_unlock,
2917 min_write_lock_level, &write_lock_level);
2918 goto done;
2921 ret = 1;
2922 done:
2924 * we don't really know what they plan on doing with the path
2925 * from here on, so for now just mark it as blocking
2927 if (!p->leave_spinning)
2928 btrfs_set_path_blocking(p);
2929 if (ret < 0 && !p->skip_release_on_error)
2930 btrfs_release_path(p);
2931 return ret;
2935 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2936 * current state of the tree together with the operations recorded in the tree
2937 * modification log to search for the key in a previous version of this tree, as
2938 * denoted by the time_seq parameter.
2940 * Naturally, there is no support for insert, delete or cow operations.
2942 * The resulting path and return value will be set up as if we called
2943 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2945 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2946 struct btrfs_path *p, u64 time_seq)
2948 struct btrfs_fs_info *fs_info = root->fs_info;
2949 struct extent_buffer *b;
2950 int slot;
2951 int ret;
2952 int err;
2953 int level;
2954 int lowest_unlock = 1;
2955 u8 lowest_level = 0;
2956 int prev_cmp = -1;
2958 lowest_level = p->lowest_level;
2959 WARN_ON(p->nodes[0] != NULL);
2961 if (p->search_commit_root) {
2962 BUG_ON(time_seq);
2963 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2966 again:
2967 b = get_old_root(root, time_seq);
2968 level = btrfs_header_level(b);
2969 p->locks[level] = BTRFS_READ_LOCK;
2971 while (b) {
2972 level = btrfs_header_level(b);
2973 p->nodes[level] = b;
2974 btrfs_clear_path_blocking(p, NULL, 0);
2977 * we have a lock on b and as long as we aren't changing
2978 * the tree, there is no way to for the items in b to change.
2979 * It is safe to drop the lock on our parent before we
2980 * go through the expensive btree search on b.
2982 btrfs_unlock_up_safe(p, level + 1);
2985 * Since we can unwind ebs we want to do a real search every
2986 * time.
2988 prev_cmp = -1;
2989 ret = key_search(b, key, level, &prev_cmp, &slot);
2991 if (level != 0) {
2992 int dec = 0;
2993 if (ret && slot > 0) {
2994 dec = 1;
2995 slot -= 1;
2997 p->slots[level] = slot;
2998 unlock_up(p, level, lowest_unlock, 0, NULL);
3000 if (level == lowest_level) {
3001 if (dec)
3002 p->slots[level]++;
3003 goto done;
3006 err = read_block_for_search(root, p, &b, level,
3007 slot, key);
3008 if (err == -EAGAIN)
3009 goto again;
3010 if (err) {
3011 ret = err;
3012 goto done;
3015 level = btrfs_header_level(b);
3016 err = btrfs_tree_read_lock_atomic(b);
3017 if (!err) {
3018 btrfs_set_path_blocking(p);
3019 btrfs_tree_read_lock(b);
3020 btrfs_clear_path_blocking(p, b,
3021 BTRFS_READ_LOCK);
3023 b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3024 if (!b) {
3025 ret = -ENOMEM;
3026 goto done;
3028 p->locks[level] = BTRFS_READ_LOCK;
3029 p->nodes[level] = b;
3030 } else {
3031 p->slots[level] = slot;
3032 unlock_up(p, level, lowest_unlock, 0, NULL);
3033 goto done;
3036 ret = 1;
3037 done:
3038 if (!p->leave_spinning)
3039 btrfs_set_path_blocking(p);
3040 if (ret < 0)
3041 btrfs_release_path(p);
3043 return ret;
3047 * helper to use instead of search slot if no exact match is needed but
3048 * instead the next or previous item should be returned.
3049 * When find_higher is true, the next higher item is returned, the next lower
3050 * otherwise.
3051 * When return_any and find_higher are both true, and no higher item is found,
3052 * return the next lower instead.
3053 * When return_any is true and find_higher is false, and no lower item is found,
3054 * return the next higher instead.
3055 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3056 * < 0 on error
3058 int btrfs_search_slot_for_read(struct btrfs_root *root,
3059 const struct btrfs_key *key,
3060 struct btrfs_path *p, int find_higher,
3061 int return_any)
3063 int ret;
3064 struct extent_buffer *leaf;
3066 again:
3067 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3068 if (ret <= 0)
3069 return ret;
3071 * a return value of 1 means the path is at the position where the
3072 * item should be inserted. Normally this is the next bigger item,
3073 * but in case the previous item is the last in a leaf, path points
3074 * to the first free slot in the previous leaf, i.e. at an invalid
3075 * item.
3077 leaf = p->nodes[0];
3079 if (find_higher) {
3080 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3081 ret = btrfs_next_leaf(root, p);
3082 if (ret <= 0)
3083 return ret;
3084 if (!return_any)
3085 return 1;
3087 * no higher item found, return the next
3088 * lower instead
3090 return_any = 0;
3091 find_higher = 0;
3092 btrfs_release_path(p);
3093 goto again;
3095 } else {
3096 if (p->slots[0] == 0) {
3097 ret = btrfs_prev_leaf(root, p);
3098 if (ret < 0)
3099 return ret;
3100 if (!ret) {
3101 leaf = p->nodes[0];
3102 if (p->slots[0] == btrfs_header_nritems(leaf))
3103 p->slots[0]--;
3104 return 0;
3106 if (!return_any)
3107 return 1;
3109 * no lower item found, return the next
3110 * higher instead
3112 return_any = 0;
3113 find_higher = 1;
3114 btrfs_release_path(p);
3115 goto again;
3116 } else {
3117 --p->slots[0];
3120 return 0;
3124 * adjust the pointers going up the tree, starting at level
3125 * making sure the right key of each node is points to 'key'.
3126 * This is used after shifting pointers to the left, so it stops
3127 * fixing up pointers when a given leaf/node is not in slot 0 of the
3128 * higher levels
3131 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3132 struct btrfs_path *path,
3133 struct btrfs_disk_key *key, int level)
3135 int i;
3136 struct extent_buffer *t;
3137 int ret;
3139 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3140 int tslot = path->slots[i];
3142 if (!path->nodes[i])
3143 break;
3144 t = path->nodes[i];
3145 ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3146 GFP_ATOMIC);
3147 BUG_ON(ret < 0);
3148 btrfs_set_node_key(t, key, tslot);
3149 btrfs_mark_buffer_dirty(path->nodes[i]);
3150 if (tslot != 0)
3151 break;
3156 * update item key.
3158 * This function isn't completely safe. It's the caller's responsibility
3159 * that the new key won't break the order
3161 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3162 struct btrfs_path *path,
3163 const struct btrfs_key *new_key)
3165 struct btrfs_disk_key disk_key;
3166 struct extent_buffer *eb;
3167 int slot;
3169 eb = path->nodes[0];
3170 slot = path->slots[0];
3171 if (slot > 0) {
3172 btrfs_item_key(eb, &disk_key, slot - 1);
3173 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3175 if (slot < btrfs_header_nritems(eb) - 1) {
3176 btrfs_item_key(eb, &disk_key, slot + 1);
3177 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3180 btrfs_cpu_key_to_disk(&disk_key, new_key);
3181 btrfs_set_item_key(eb, &disk_key, slot);
3182 btrfs_mark_buffer_dirty(eb);
3183 if (slot == 0)
3184 fixup_low_keys(fs_info, path, &disk_key, 1);
3188 * try to push data from one node into the next node left in the
3189 * tree.
3191 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3192 * error, and > 0 if there was no room in the left hand block.
3194 static int push_node_left(struct btrfs_trans_handle *trans,
3195 struct btrfs_fs_info *fs_info,
3196 struct extent_buffer *dst,
3197 struct extent_buffer *src, int empty)
3199 int push_items = 0;
3200 int src_nritems;
3201 int dst_nritems;
3202 int ret = 0;
3204 src_nritems = btrfs_header_nritems(src);
3205 dst_nritems = btrfs_header_nritems(dst);
3206 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3207 WARN_ON(btrfs_header_generation(src) != trans->transid);
3208 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3210 if (!empty && src_nritems <= 8)
3211 return 1;
3213 if (push_items <= 0)
3214 return 1;
3216 if (empty) {
3217 push_items = min(src_nritems, push_items);
3218 if (push_items < src_nritems) {
3219 /* leave at least 8 pointers in the node if
3220 * we aren't going to empty it
3222 if (src_nritems - push_items < 8) {
3223 if (push_items <= 8)
3224 return 1;
3225 push_items -= 8;
3228 } else
3229 push_items = min(src_nritems - 8, push_items);
3231 ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3232 push_items);
3233 if (ret) {
3234 btrfs_abort_transaction(trans, ret);
3235 return ret;
3237 copy_extent_buffer(dst, src,
3238 btrfs_node_key_ptr_offset(dst_nritems),
3239 btrfs_node_key_ptr_offset(0),
3240 push_items * sizeof(struct btrfs_key_ptr));
3242 if (push_items < src_nritems) {
3244 * Don't call tree_mod_log_insert_move here, key removal was
3245 * already fully logged by tree_mod_log_eb_copy above.
3247 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3248 btrfs_node_key_ptr_offset(push_items),
3249 (src_nritems - push_items) *
3250 sizeof(struct btrfs_key_ptr));
3252 btrfs_set_header_nritems(src, src_nritems - push_items);
3253 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3254 btrfs_mark_buffer_dirty(src);
3255 btrfs_mark_buffer_dirty(dst);
3257 return ret;
3261 * try to push data from one node into the next node right in the
3262 * tree.
3264 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3265 * error, and > 0 if there was no room in the right hand block.
3267 * this will only push up to 1/2 the contents of the left node over
3269 static int balance_node_right(struct btrfs_trans_handle *trans,
3270 struct btrfs_fs_info *fs_info,
3271 struct extent_buffer *dst,
3272 struct extent_buffer *src)
3274 int push_items = 0;
3275 int max_push;
3276 int src_nritems;
3277 int dst_nritems;
3278 int ret = 0;
3280 WARN_ON(btrfs_header_generation(src) != trans->transid);
3281 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3283 src_nritems = btrfs_header_nritems(src);
3284 dst_nritems = btrfs_header_nritems(dst);
3285 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3286 if (push_items <= 0)
3287 return 1;
3289 if (src_nritems < 4)
3290 return 1;
3292 max_push = src_nritems / 2 + 1;
3293 /* don't try to empty the node */
3294 if (max_push >= src_nritems)
3295 return 1;
3297 if (max_push < push_items)
3298 push_items = max_push;
3300 ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3301 BUG_ON(ret < 0);
3302 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3303 btrfs_node_key_ptr_offset(0),
3304 (dst_nritems) *
3305 sizeof(struct btrfs_key_ptr));
3307 ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3308 src_nritems - push_items, push_items);
3309 if (ret) {
3310 btrfs_abort_transaction(trans, ret);
3311 return ret;
3313 copy_extent_buffer(dst, src,
3314 btrfs_node_key_ptr_offset(0),
3315 btrfs_node_key_ptr_offset(src_nritems - push_items),
3316 push_items * sizeof(struct btrfs_key_ptr));
3318 btrfs_set_header_nritems(src, src_nritems - push_items);
3319 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3321 btrfs_mark_buffer_dirty(src);
3322 btrfs_mark_buffer_dirty(dst);
3324 return ret;
3328 * helper function to insert a new root level in the tree.
3329 * A new node is allocated, and a single item is inserted to
3330 * point to the existing root
3332 * returns zero on success or < 0 on failure.
3334 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3335 struct btrfs_root *root,
3336 struct btrfs_path *path, int level)
3338 struct btrfs_fs_info *fs_info = root->fs_info;
3339 u64 lower_gen;
3340 struct extent_buffer *lower;
3341 struct extent_buffer *c;
3342 struct extent_buffer *old;
3343 struct btrfs_disk_key lower_key;
3344 int ret;
3346 BUG_ON(path->nodes[level]);
3347 BUG_ON(path->nodes[level-1] != root->node);
3349 lower = path->nodes[level-1];
3350 if (level == 1)
3351 btrfs_item_key(lower, &lower_key, 0);
3352 else
3353 btrfs_node_key(lower, &lower_key, 0);
3355 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3356 &lower_key, level, root->node->start, 0);
3357 if (IS_ERR(c))
3358 return PTR_ERR(c);
3360 root_add_used(root, fs_info->nodesize);
3362 memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
3363 btrfs_set_header_nritems(c, 1);
3364 btrfs_set_header_level(c, level);
3365 btrfs_set_header_bytenr(c, c->start);
3366 btrfs_set_header_generation(c, trans->transid);
3367 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3368 btrfs_set_header_owner(c, root->root_key.objectid);
3370 write_extent_buffer_fsid(c, fs_info->fsid);
3371 write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
3373 btrfs_set_node_key(c, &lower_key, 0);
3374 btrfs_set_node_blockptr(c, 0, lower->start);
3375 lower_gen = btrfs_header_generation(lower);
3376 WARN_ON(lower_gen != trans->transid);
3378 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3380 btrfs_mark_buffer_dirty(c);
3382 old = root->node;
3383 ret = tree_mod_log_insert_root(root->node, c, 0);
3384 BUG_ON(ret < 0);
3385 rcu_assign_pointer(root->node, c);
3387 /* the super has an extra ref to root->node */
3388 free_extent_buffer(old);
3390 add_root_to_dirty_list(root);
3391 extent_buffer_get(c);
3392 path->nodes[level] = c;
3393 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3394 path->slots[level] = 0;
3395 return 0;
3399 * worker function to insert a single pointer in a node.
3400 * the node should have enough room for the pointer already
3402 * slot and level indicate where you want the key to go, and
3403 * blocknr is the block the key points to.
3405 static void insert_ptr(struct btrfs_trans_handle *trans,
3406 struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3407 struct btrfs_disk_key *key, u64 bytenr,
3408 int slot, int level)
3410 struct extent_buffer *lower;
3411 int nritems;
3412 int ret;
3414 BUG_ON(!path->nodes[level]);
3415 btrfs_assert_tree_locked(path->nodes[level]);
3416 lower = path->nodes[level];
3417 nritems = btrfs_header_nritems(lower);
3418 BUG_ON(slot > nritems);
3419 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3420 if (slot != nritems) {
3421 if (level) {
3422 ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3423 nritems - slot);
3424 BUG_ON(ret < 0);
3426 memmove_extent_buffer(lower,
3427 btrfs_node_key_ptr_offset(slot + 1),
3428 btrfs_node_key_ptr_offset(slot),
3429 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3431 if (level) {
3432 ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3433 GFP_NOFS);
3434 BUG_ON(ret < 0);
3436 btrfs_set_node_key(lower, key, slot);
3437 btrfs_set_node_blockptr(lower, slot, bytenr);
3438 WARN_ON(trans->transid == 0);
3439 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3440 btrfs_set_header_nritems(lower, nritems + 1);
3441 btrfs_mark_buffer_dirty(lower);
3445 * split the node at the specified level in path in two.
3446 * The path is corrected to point to the appropriate node after the split
3448 * Before splitting this tries to make some room in the node by pushing
3449 * left and right, if either one works, it returns right away.
3451 * returns 0 on success and < 0 on failure
3453 static noinline int split_node(struct btrfs_trans_handle *trans,
3454 struct btrfs_root *root,
3455 struct btrfs_path *path, int level)
3457 struct btrfs_fs_info *fs_info = root->fs_info;
3458 struct extent_buffer *c;
3459 struct extent_buffer *split;
3460 struct btrfs_disk_key disk_key;
3461 int mid;
3462 int ret;
3463 u32 c_nritems;
3465 c = path->nodes[level];
3466 WARN_ON(btrfs_header_generation(c) != trans->transid);
3467 if (c == root->node) {
3469 * trying to split the root, lets make a new one
3471 * tree mod log: We don't log_removal old root in
3472 * insert_new_root, because that root buffer will be kept as a
3473 * normal node. We are going to log removal of half of the
3474 * elements below with tree_mod_log_eb_copy. We're holding a
3475 * tree lock on the buffer, which is why we cannot race with
3476 * other tree_mod_log users.
3478 ret = insert_new_root(trans, root, path, level + 1);
3479 if (ret)
3480 return ret;
3481 } else {
3482 ret = push_nodes_for_insert(trans, root, path, level);
3483 c = path->nodes[level];
3484 if (!ret && btrfs_header_nritems(c) <
3485 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3486 return 0;
3487 if (ret < 0)
3488 return ret;
3491 c_nritems = btrfs_header_nritems(c);
3492 mid = (c_nritems + 1) / 2;
3493 btrfs_node_key(c, &disk_key, mid);
3495 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3496 &disk_key, level, c->start, 0);
3497 if (IS_ERR(split))
3498 return PTR_ERR(split);
3500 root_add_used(root, fs_info->nodesize);
3502 memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
3503 btrfs_set_header_level(split, btrfs_header_level(c));
3504 btrfs_set_header_bytenr(split, split->start);
3505 btrfs_set_header_generation(split, trans->transid);
3506 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3507 btrfs_set_header_owner(split, root->root_key.objectid);
3508 write_extent_buffer_fsid(split, fs_info->fsid);
3509 write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
3511 ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3512 if (ret) {
3513 btrfs_abort_transaction(trans, ret);
3514 return ret;
3516 copy_extent_buffer(split, c,
3517 btrfs_node_key_ptr_offset(0),
3518 btrfs_node_key_ptr_offset(mid),
3519 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3520 btrfs_set_header_nritems(split, c_nritems - mid);
3521 btrfs_set_header_nritems(c, mid);
3522 ret = 0;
3524 btrfs_mark_buffer_dirty(c);
3525 btrfs_mark_buffer_dirty(split);
3527 insert_ptr(trans, fs_info, path, &disk_key, split->start,
3528 path->slots[level + 1] + 1, level + 1);
3530 if (path->slots[level] >= mid) {
3531 path->slots[level] -= mid;
3532 btrfs_tree_unlock(c);
3533 free_extent_buffer(c);
3534 path->nodes[level] = split;
3535 path->slots[level + 1] += 1;
3536 } else {
3537 btrfs_tree_unlock(split);
3538 free_extent_buffer(split);
3540 return ret;
3544 * how many bytes are required to store the items in a leaf. start
3545 * and nr indicate which items in the leaf to check. This totals up the
3546 * space used both by the item structs and the item data
3548 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3550 struct btrfs_item *start_item;
3551 struct btrfs_item *end_item;
3552 struct btrfs_map_token token;
3553 int data_len;
3554 int nritems = btrfs_header_nritems(l);
3555 int end = min(nritems, start + nr) - 1;
3557 if (!nr)
3558 return 0;
3559 btrfs_init_map_token(&token);
3560 start_item = btrfs_item_nr(start);
3561 end_item = btrfs_item_nr(end);
3562 data_len = btrfs_token_item_offset(l, start_item, &token) +
3563 btrfs_token_item_size(l, start_item, &token);
3564 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3565 data_len += sizeof(struct btrfs_item) * nr;
3566 WARN_ON(data_len < 0);
3567 return data_len;
3571 * The space between the end of the leaf items and
3572 * the start of the leaf data. IOW, how much room
3573 * the leaf has left for both items and data
3575 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3576 struct extent_buffer *leaf)
3578 int nritems = btrfs_header_nritems(leaf);
3579 int ret;
3581 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3582 if (ret < 0) {
3583 btrfs_crit(fs_info,
3584 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3585 ret,
3586 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3587 leaf_space_used(leaf, 0, nritems), nritems);
3589 return ret;
3593 * min slot controls the lowest index we're willing to push to the
3594 * right. We'll push up to and including min_slot, but no lower
3596 static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3597 struct btrfs_path *path,
3598 int data_size, int empty,
3599 struct extent_buffer *right,
3600 int free_space, u32 left_nritems,
3601 u32 min_slot)
3603 struct extent_buffer *left = path->nodes[0];
3604 struct extent_buffer *upper = path->nodes[1];
3605 struct btrfs_map_token token;
3606 struct btrfs_disk_key disk_key;
3607 int slot;
3608 u32 i;
3609 int push_space = 0;
3610 int push_items = 0;
3611 struct btrfs_item *item;
3612 u32 nr;
3613 u32 right_nritems;
3614 u32 data_end;
3615 u32 this_item_size;
3617 btrfs_init_map_token(&token);
3619 if (empty)
3620 nr = 0;
3621 else
3622 nr = max_t(u32, 1, min_slot);
3624 if (path->slots[0] >= left_nritems)
3625 push_space += data_size;
3627 slot = path->slots[1];
3628 i = left_nritems - 1;
3629 while (i >= nr) {
3630 item = btrfs_item_nr(i);
3632 if (!empty && push_items > 0) {
3633 if (path->slots[0] > i)
3634 break;
3635 if (path->slots[0] == i) {
3636 int space = btrfs_leaf_free_space(fs_info, left);
3637 if (space + push_space * 2 > free_space)
3638 break;
3642 if (path->slots[0] == i)
3643 push_space += data_size;
3645 this_item_size = btrfs_item_size(left, item);
3646 if (this_item_size + sizeof(*item) + push_space > free_space)
3647 break;
3649 push_items++;
3650 push_space += this_item_size + sizeof(*item);
3651 if (i == 0)
3652 break;
3653 i--;
3656 if (push_items == 0)
3657 goto out_unlock;
3659 WARN_ON(!empty && push_items == left_nritems);
3661 /* push left to right */
3662 right_nritems = btrfs_header_nritems(right);
3664 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3665 push_space -= leaf_data_end(fs_info, left);
3667 /* make room in the right data area */
3668 data_end = leaf_data_end(fs_info, right);
3669 memmove_extent_buffer(right,
3670 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3671 BTRFS_LEAF_DATA_OFFSET + data_end,
3672 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3674 /* copy from the left data area */
3675 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3676 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3677 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3678 push_space);
3680 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3681 btrfs_item_nr_offset(0),
3682 right_nritems * sizeof(struct btrfs_item));
3684 /* copy the items from left to right */
3685 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3686 btrfs_item_nr_offset(left_nritems - push_items),
3687 push_items * sizeof(struct btrfs_item));
3689 /* update the item pointers */
3690 right_nritems += push_items;
3691 btrfs_set_header_nritems(right, right_nritems);
3692 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3693 for (i = 0; i < right_nritems; i++) {
3694 item = btrfs_item_nr(i);
3695 push_space -= btrfs_token_item_size(right, item, &token);
3696 btrfs_set_token_item_offset(right, item, push_space, &token);
3699 left_nritems -= push_items;
3700 btrfs_set_header_nritems(left, left_nritems);
3702 if (left_nritems)
3703 btrfs_mark_buffer_dirty(left);
3704 else
3705 clean_tree_block(fs_info, left);
3707 btrfs_mark_buffer_dirty(right);
3709 btrfs_item_key(right, &disk_key, 0);
3710 btrfs_set_node_key(upper, &disk_key, slot + 1);
3711 btrfs_mark_buffer_dirty(upper);
3713 /* then fixup the leaf pointer in the path */
3714 if (path->slots[0] >= left_nritems) {
3715 path->slots[0] -= left_nritems;
3716 if (btrfs_header_nritems(path->nodes[0]) == 0)
3717 clean_tree_block(fs_info, path->nodes[0]);
3718 btrfs_tree_unlock(path->nodes[0]);
3719 free_extent_buffer(path->nodes[0]);
3720 path->nodes[0] = right;
3721 path->slots[1] += 1;
3722 } else {
3723 btrfs_tree_unlock(right);
3724 free_extent_buffer(right);
3726 return 0;
3728 out_unlock:
3729 btrfs_tree_unlock(right);
3730 free_extent_buffer(right);
3731 return 1;
3735 * push some data in the path leaf to the right, trying to free up at
3736 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3738 * returns 1 if the push failed because the other node didn't have enough
3739 * room, 0 if everything worked out and < 0 if there were major errors.
3741 * this will push starting from min_slot to the end of the leaf. It won't
3742 * push any slot lower than min_slot
3744 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3745 *root, struct btrfs_path *path,
3746 int min_data_size, int data_size,
3747 int empty, u32 min_slot)
3749 struct btrfs_fs_info *fs_info = root->fs_info;
3750 struct extent_buffer *left = path->nodes[0];
3751 struct extent_buffer *right;
3752 struct extent_buffer *upper;
3753 int slot;
3754 int free_space;
3755 u32 left_nritems;
3756 int ret;
3758 if (!path->nodes[1])
3759 return 1;
3761 slot = path->slots[1];
3762 upper = path->nodes[1];
3763 if (slot >= btrfs_header_nritems(upper) - 1)
3764 return 1;
3766 btrfs_assert_tree_locked(path->nodes[1]);
3768 right = read_node_slot(fs_info, upper, slot + 1);
3770 * slot + 1 is not valid or we fail to read the right node,
3771 * no big deal, just return.
3773 if (IS_ERR(right))
3774 return 1;
3776 btrfs_tree_lock(right);
3777 btrfs_set_lock_blocking(right);
3779 free_space = btrfs_leaf_free_space(fs_info, right);
3780 if (free_space < data_size)
3781 goto out_unlock;
3783 /* cow and double check */
3784 ret = btrfs_cow_block(trans, root, right, upper,
3785 slot + 1, &right);
3786 if (ret)
3787 goto out_unlock;
3789 free_space = btrfs_leaf_free_space(fs_info, right);
3790 if (free_space < data_size)
3791 goto out_unlock;
3793 left_nritems = btrfs_header_nritems(left);
3794 if (left_nritems == 0)
3795 goto out_unlock;
3797 if (path->slots[0] == left_nritems && !empty) {
3798 /* Key greater than all keys in the leaf, right neighbor has
3799 * enough room for it and we're not emptying our leaf to delete
3800 * it, therefore use right neighbor to insert the new item and
3801 * no need to touch/dirty our left leaft. */
3802 btrfs_tree_unlock(left);
3803 free_extent_buffer(left);
3804 path->nodes[0] = right;
3805 path->slots[0] = 0;
3806 path->slots[1]++;
3807 return 0;
3810 return __push_leaf_right(fs_info, path, min_data_size, empty,
3811 right, free_space, left_nritems, min_slot);
3812 out_unlock:
3813 btrfs_tree_unlock(right);
3814 free_extent_buffer(right);
3815 return 1;
3819 * push some data in the path leaf to the left, trying to free up at
3820 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3822 * max_slot can put a limit on how far into the leaf we'll push items. The
3823 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3824 * items
3826 static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3827 struct btrfs_path *path, int data_size,
3828 int empty, struct extent_buffer *left,
3829 int free_space, u32 right_nritems,
3830 u32 max_slot)
3832 struct btrfs_disk_key disk_key;
3833 struct extent_buffer *right = path->nodes[0];
3834 int i;
3835 int push_space = 0;
3836 int push_items = 0;
3837 struct btrfs_item *item;
3838 u32 old_left_nritems;
3839 u32 nr;
3840 int ret = 0;
3841 u32 this_item_size;
3842 u32 old_left_item_size;
3843 struct btrfs_map_token token;
3845 btrfs_init_map_token(&token);
3847 if (empty)
3848 nr = min(right_nritems, max_slot);
3849 else
3850 nr = min(right_nritems - 1, max_slot);
3852 for (i = 0; i < nr; i++) {
3853 item = btrfs_item_nr(i);
3855 if (!empty && push_items > 0) {
3856 if (path->slots[0] < i)
3857 break;
3858 if (path->slots[0] == i) {
3859 int space = btrfs_leaf_free_space(fs_info, right);
3860 if (space + push_space * 2 > free_space)
3861 break;
3865 if (path->slots[0] == i)
3866 push_space += data_size;
3868 this_item_size = btrfs_item_size(right, item);
3869 if (this_item_size + sizeof(*item) + push_space > free_space)
3870 break;
3872 push_items++;
3873 push_space += this_item_size + sizeof(*item);
3876 if (push_items == 0) {
3877 ret = 1;
3878 goto out;
3880 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3882 /* push data from right to left */
3883 copy_extent_buffer(left, right,
3884 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3885 btrfs_item_nr_offset(0),
3886 push_items * sizeof(struct btrfs_item));
3888 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3889 btrfs_item_offset_nr(right, push_items - 1);
3891 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3892 leaf_data_end(fs_info, left) - push_space,
3893 BTRFS_LEAF_DATA_OFFSET +
3894 btrfs_item_offset_nr(right, push_items - 1),
3895 push_space);
3896 old_left_nritems = btrfs_header_nritems(left);
3897 BUG_ON(old_left_nritems <= 0);
3899 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3900 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3901 u32 ioff;
3903 item = btrfs_item_nr(i);
3905 ioff = btrfs_token_item_offset(left, item, &token);
3906 btrfs_set_token_item_offset(left, item,
3907 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3908 &token);
3910 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3912 /* fixup right node */
3913 if (push_items > right_nritems)
3914 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3915 right_nritems);
3917 if (push_items < right_nritems) {
3918 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3919 leaf_data_end(fs_info, right);
3920 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3921 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3922 BTRFS_LEAF_DATA_OFFSET +
3923 leaf_data_end(fs_info, right), push_space);
3925 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3926 btrfs_item_nr_offset(push_items),
3927 (btrfs_header_nritems(right) - push_items) *
3928 sizeof(struct btrfs_item));
3930 right_nritems -= push_items;
3931 btrfs_set_header_nritems(right, right_nritems);
3932 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3933 for (i = 0; i < right_nritems; i++) {
3934 item = btrfs_item_nr(i);
3936 push_space = push_space - btrfs_token_item_size(right,
3937 item, &token);
3938 btrfs_set_token_item_offset(right, item, push_space, &token);
3941 btrfs_mark_buffer_dirty(left);
3942 if (right_nritems)
3943 btrfs_mark_buffer_dirty(right);
3944 else
3945 clean_tree_block(fs_info, right);
3947 btrfs_item_key(right, &disk_key, 0);
3948 fixup_low_keys(fs_info, path, &disk_key, 1);
3950 /* then fixup the leaf pointer in the path */
3951 if (path->slots[0] < push_items) {
3952 path->slots[0] += old_left_nritems;
3953 btrfs_tree_unlock(path->nodes[0]);
3954 free_extent_buffer(path->nodes[0]);
3955 path->nodes[0] = left;
3956 path->slots[1] -= 1;
3957 } else {
3958 btrfs_tree_unlock(left);
3959 free_extent_buffer(left);
3960 path->slots[0] -= push_items;
3962 BUG_ON(path->slots[0] < 0);
3963 return ret;
3964 out:
3965 btrfs_tree_unlock(left);
3966 free_extent_buffer(left);
3967 return ret;
3971 * push some data in the path leaf to the left, trying to free up at
3972 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3974 * max_slot can put a limit on how far into the leaf we'll push items. The
3975 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3976 * items
3978 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3979 *root, struct btrfs_path *path, int min_data_size,
3980 int data_size, int empty, u32 max_slot)
3982 struct btrfs_fs_info *fs_info = root->fs_info;
3983 struct extent_buffer *right = path->nodes[0];
3984 struct extent_buffer *left;
3985 int slot;
3986 int free_space;
3987 u32 right_nritems;
3988 int ret = 0;
3990 slot = path->slots[1];
3991 if (slot == 0)
3992 return 1;
3993 if (!path->nodes[1])
3994 return 1;
3996 right_nritems = btrfs_header_nritems(right);
3997 if (right_nritems == 0)
3998 return 1;
4000 btrfs_assert_tree_locked(path->nodes[1]);
4002 left = read_node_slot(fs_info, path->nodes[1], slot - 1);
4004 * slot - 1 is not valid or we fail to read the left node,
4005 * no big deal, just return.
4007 if (IS_ERR(left))
4008 return 1;
4010 btrfs_tree_lock(left);
4011 btrfs_set_lock_blocking(left);
4013 free_space = btrfs_leaf_free_space(fs_info, left);
4014 if (free_space < data_size) {
4015 ret = 1;
4016 goto out;
4019 /* cow and double check */
4020 ret = btrfs_cow_block(trans, root, left,
4021 path->nodes[1], slot - 1, &left);
4022 if (ret) {
4023 /* we hit -ENOSPC, but it isn't fatal here */
4024 if (ret == -ENOSPC)
4025 ret = 1;
4026 goto out;
4029 free_space = btrfs_leaf_free_space(fs_info, left);
4030 if (free_space < data_size) {
4031 ret = 1;
4032 goto out;
4035 return __push_leaf_left(fs_info, path, min_data_size,
4036 empty, left, free_space, right_nritems,
4037 max_slot);
4038 out:
4039 btrfs_tree_unlock(left);
4040 free_extent_buffer(left);
4041 return ret;
4045 * split the path's leaf in two, making sure there is at least data_size
4046 * available for the resulting leaf level of the path.
4048 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4049 struct btrfs_fs_info *fs_info,
4050 struct btrfs_path *path,
4051 struct extent_buffer *l,
4052 struct extent_buffer *right,
4053 int slot, int mid, int nritems)
4055 int data_copy_size;
4056 int rt_data_off;
4057 int i;
4058 struct btrfs_disk_key disk_key;
4059 struct btrfs_map_token token;
4061 btrfs_init_map_token(&token);
4063 nritems = nritems - mid;
4064 btrfs_set_header_nritems(right, nritems);
4065 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4067 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4068 btrfs_item_nr_offset(mid),
4069 nritems * sizeof(struct btrfs_item));
4071 copy_extent_buffer(right, l,
4072 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4073 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4074 leaf_data_end(fs_info, l), data_copy_size);
4076 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4078 for (i = 0; i < nritems; i++) {
4079 struct btrfs_item *item = btrfs_item_nr(i);
4080 u32 ioff;
4082 ioff = btrfs_token_item_offset(right, item, &token);
4083 btrfs_set_token_item_offset(right, item,
4084 ioff + rt_data_off, &token);
4087 btrfs_set_header_nritems(l, mid);
4088 btrfs_item_key(right, &disk_key, 0);
4089 insert_ptr(trans, fs_info, path, &disk_key, right->start,
4090 path->slots[1] + 1, 1);
4092 btrfs_mark_buffer_dirty(right);
4093 btrfs_mark_buffer_dirty(l);
4094 BUG_ON(path->slots[0] != slot);
4096 if (mid <= slot) {
4097 btrfs_tree_unlock(path->nodes[0]);
4098 free_extent_buffer(path->nodes[0]);
4099 path->nodes[0] = right;
4100 path->slots[0] -= mid;
4101 path->slots[1] += 1;
4102 } else {
4103 btrfs_tree_unlock(right);
4104 free_extent_buffer(right);
4107 BUG_ON(path->slots[0] < 0);
4111 * double splits happen when we need to insert a big item in the middle
4112 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4113 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4114 * A B C
4116 * We avoid this by trying to push the items on either side of our target
4117 * into the adjacent leaves. If all goes well we can avoid the double split
4118 * completely.
4120 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4121 struct btrfs_root *root,
4122 struct btrfs_path *path,
4123 int data_size)
4125 struct btrfs_fs_info *fs_info = root->fs_info;
4126 int ret;
4127 int progress = 0;
4128 int slot;
4129 u32 nritems;
4130 int space_needed = data_size;
4132 slot = path->slots[0];
4133 if (slot < btrfs_header_nritems(path->nodes[0]))
4134 space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4137 * try to push all the items after our slot into the
4138 * right leaf
4140 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4141 if (ret < 0)
4142 return ret;
4144 if (ret == 0)
4145 progress++;
4147 nritems = btrfs_header_nritems(path->nodes[0]);
4149 * our goal is to get our slot at the start or end of a leaf. If
4150 * we've done so we're done
4152 if (path->slots[0] == 0 || path->slots[0] == nritems)
4153 return 0;
4155 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4156 return 0;
4158 /* try to push all the items before our slot into the next leaf */
4159 slot = path->slots[0];
4160 space_needed = data_size;
4161 if (slot > 0)
4162 space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4163 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4164 if (ret < 0)
4165 return ret;
4167 if (ret == 0)
4168 progress++;
4170 if (progress)
4171 return 0;
4172 return 1;
4176 * split the path's leaf in two, making sure there is at least data_size
4177 * available for the resulting leaf level of the path.
4179 * returns 0 if all went well and < 0 on failure.
4181 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4182 struct btrfs_root *root,
4183 const struct btrfs_key *ins_key,
4184 struct btrfs_path *path, int data_size,
4185 int extend)
4187 struct btrfs_disk_key disk_key;
4188 struct extent_buffer *l;
4189 u32 nritems;
4190 int mid;
4191 int slot;
4192 struct extent_buffer *right;
4193 struct btrfs_fs_info *fs_info = root->fs_info;
4194 int ret = 0;
4195 int wret;
4196 int split;
4197 int num_doubles = 0;
4198 int tried_avoid_double = 0;
4200 l = path->nodes[0];
4201 slot = path->slots[0];
4202 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4203 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4204 return -EOVERFLOW;
4206 /* first try to make some room by pushing left and right */
4207 if (data_size && path->nodes[1]) {
4208 int space_needed = data_size;
4210 if (slot < btrfs_header_nritems(l))
4211 space_needed -= btrfs_leaf_free_space(fs_info, l);
4213 wret = push_leaf_right(trans, root, path, space_needed,
4214 space_needed, 0, 0);
4215 if (wret < 0)
4216 return wret;
4217 if (wret) {
4218 space_needed = data_size;
4219 if (slot > 0)
4220 space_needed -= btrfs_leaf_free_space(fs_info,
4222 wret = push_leaf_left(trans, root, path, space_needed,
4223 space_needed, 0, (u32)-1);
4224 if (wret < 0)
4225 return wret;
4227 l = path->nodes[0];
4229 /* did the pushes work? */
4230 if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4231 return 0;
4234 if (!path->nodes[1]) {
4235 ret = insert_new_root(trans, root, path, 1);
4236 if (ret)
4237 return ret;
4239 again:
4240 split = 1;
4241 l = path->nodes[0];
4242 slot = path->slots[0];
4243 nritems = btrfs_header_nritems(l);
4244 mid = (nritems + 1) / 2;
4246 if (mid <= slot) {
4247 if (nritems == 1 ||
4248 leaf_space_used(l, mid, nritems - mid) + data_size >
4249 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4250 if (slot >= nritems) {
4251 split = 0;
4252 } else {
4253 mid = slot;
4254 if (mid != nritems &&
4255 leaf_space_used(l, mid, nritems - mid) +
4256 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4257 if (data_size && !tried_avoid_double)
4258 goto push_for_double;
4259 split = 2;
4263 } else {
4264 if (leaf_space_used(l, 0, mid) + data_size >
4265 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4266 if (!extend && data_size && slot == 0) {
4267 split = 0;
4268 } else if ((extend || !data_size) && slot == 0) {
4269 mid = 1;
4270 } else {
4271 mid = slot;
4272 if (mid != nritems &&
4273 leaf_space_used(l, mid, nritems - mid) +
4274 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4275 if (data_size && !tried_avoid_double)
4276 goto push_for_double;
4277 split = 2;
4283 if (split == 0)
4284 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4285 else
4286 btrfs_item_key(l, &disk_key, mid);
4288 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4289 &disk_key, 0, l->start, 0);
4290 if (IS_ERR(right))
4291 return PTR_ERR(right);
4293 root_add_used(root, fs_info->nodesize);
4295 memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
4296 btrfs_set_header_bytenr(right, right->start);
4297 btrfs_set_header_generation(right, trans->transid);
4298 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4299 btrfs_set_header_owner(right, root->root_key.objectid);
4300 btrfs_set_header_level(right, 0);
4301 write_extent_buffer_fsid(right, fs_info->fsid);
4302 write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
4304 if (split == 0) {
4305 if (mid <= slot) {
4306 btrfs_set_header_nritems(right, 0);
4307 insert_ptr(trans, fs_info, path, &disk_key,
4308 right->start, path->slots[1] + 1, 1);
4309 btrfs_tree_unlock(path->nodes[0]);
4310 free_extent_buffer(path->nodes[0]);
4311 path->nodes[0] = right;
4312 path->slots[0] = 0;
4313 path->slots[1] += 1;
4314 } else {
4315 btrfs_set_header_nritems(right, 0);
4316 insert_ptr(trans, fs_info, path, &disk_key,
4317 right->start, path->slots[1], 1);
4318 btrfs_tree_unlock(path->nodes[0]);
4319 free_extent_buffer(path->nodes[0]);
4320 path->nodes[0] = right;
4321 path->slots[0] = 0;
4322 if (path->slots[1] == 0)
4323 fixup_low_keys(fs_info, path, &disk_key, 1);
4326 * We create a new leaf 'right' for the required ins_len and
4327 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4328 * the content of ins_len to 'right'.
4330 return ret;
4333 copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4335 if (split == 2) {
4336 BUG_ON(num_doubles != 0);
4337 num_doubles++;
4338 goto again;
4341 return 0;
4343 push_for_double:
4344 push_for_double_split(trans, root, path, data_size);
4345 tried_avoid_double = 1;
4346 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4347 return 0;
4348 goto again;
4351 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4352 struct btrfs_root *root,
4353 struct btrfs_path *path, int ins_len)
4355 struct btrfs_fs_info *fs_info = root->fs_info;
4356 struct btrfs_key key;
4357 struct extent_buffer *leaf;
4358 struct btrfs_file_extent_item *fi;
4359 u64 extent_len = 0;
4360 u32 item_size;
4361 int ret;
4363 leaf = path->nodes[0];
4364 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4366 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4367 key.type != BTRFS_EXTENT_CSUM_KEY);
4369 if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4370 return 0;
4372 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4373 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4374 fi = btrfs_item_ptr(leaf, path->slots[0],
4375 struct btrfs_file_extent_item);
4376 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4378 btrfs_release_path(path);
4380 path->keep_locks = 1;
4381 path->search_for_split = 1;
4382 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4383 path->search_for_split = 0;
4384 if (ret > 0)
4385 ret = -EAGAIN;
4386 if (ret < 0)
4387 goto err;
4389 ret = -EAGAIN;
4390 leaf = path->nodes[0];
4391 /* if our item isn't there, return now */
4392 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4393 goto err;
4395 /* the leaf has changed, it now has room. return now */
4396 if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4397 goto err;
4399 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4400 fi = btrfs_item_ptr(leaf, path->slots[0],
4401 struct btrfs_file_extent_item);
4402 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4403 goto err;
4406 btrfs_set_path_blocking(path);
4407 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4408 if (ret)
4409 goto err;
4411 path->keep_locks = 0;
4412 btrfs_unlock_up_safe(path, 1);
4413 return 0;
4414 err:
4415 path->keep_locks = 0;
4416 return ret;
4419 static noinline int split_item(struct btrfs_fs_info *fs_info,
4420 struct btrfs_path *path,
4421 const struct btrfs_key *new_key,
4422 unsigned long split_offset)
4424 struct extent_buffer *leaf;
4425 struct btrfs_item *item;
4426 struct btrfs_item *new_item;
4427 int slot;
4428 char *buf;
4429 u32 nritems;
4430 u32 item_size;
4431 u32 orig_offset;
4432 struct btrfs_disk_key disk_key;
4434 leaf = path->nodes[0];
4435 BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4437 btrfs_set_path_blocking(path);
4439 item = btrfs_item_nr(path->slots[0]);
4440 orig_offset = btrfs_item_offset(leaf, item);
4441 item_size = btrfs_item_size(leaf, item);
4443 buf = kmalloc(item_size, GFP_NOFS);
4444 if (!buf)
4445 return -ENOMEM;
4447 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4448 path->slots[0]), item_size);
4450 slot = path->slots[0] + 1;
4451 nritems = btrfs_header_nritems(leaf);
4452 if (slot != nritems) {
4453 /* shift the items */
4454 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4455 btrfs_item_nr_offset(slot),
4456 (nritems - slot) * sizeof(struct btrfs_item));
4459 btrfs_cpu_key_to_disk(&disk_key, new_key);
4460 btrfs_set_item_key(leaf, &disk_key, slot);
4462 new_item = btrfs_item_nr(slot);
4464 btrfs_set_item_offset(leaf, new_item, orig_offset);
4465 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4467 btrfs_set_item_offset(leaf, item,
4468 orig_offset + item_size - split_offset);
4469 btrfs_set_item_size(leaf, item, split_offset);
4471 btrfs_set_header_nritems(leaf, nritems + 1);
4473 /* write the data for the start of the original item */
4474 write_extent_buffer(leaf, buf,
4475 btrfs_item_ptr_offset(leaf, path->slots[0]),
4476 split_offset);
4478 /* write the data for the new item */
4479 write_extent_buffer(leaf, buf + split_offset,
4480 btrfs_item_ptr_offset(leaf, slot),
4481 item_size - split_offset);
4482 btrfs_mark_buffer_dirty(leaf);
4484 BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4485 kfree(buf);
4486 return 0;
4490 * This function splits a single item into two items,
4491 * giving 'new_key' to the new item and splitting the
4492 * old one at split_offset (from the start of the item).
4494 * The path may be released by this operation. After
4495 * the split, the path is pointing to the old item. The
4496 * new item is going to be in the same node as the old one.
4498 * Note, the item being split must be smaller enough to live alone on
4499 * a tree block with room for one extra struct btrfs_item
4501 * This allows us to split the item in place, keeping a lock on the
4502 * leaf the entire time.
4504 int btrfs_split_item(struct btrfs_trans_handle *trans,
4505 struct btrfs_root *root,
4506 struct btrfs_path *path,
4507 const struct btrfs_key *new_key,
4508 unsigned long split_offset)
4510 int ret;
4511 ret = setup_leaf_for_split(trans, root, path,
4512 sizeof(struct btrfs_item));
4513 if (ret)
4514 return ret;
4516 ret = split_item(root->fs_info, path, new_key, split_offset);
4517 return ret;
4521 * This function duplicate a item, giving 'new_key' to the new item.
4522 * It guarantees both items live in the same tree leaf and the new item
4523 * is contiguous with the original item.
4525 * This allows us to split file extent in place, keeping a lock on the
4526 * leaf the entire time.
4528 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4529 struct btrfs_root *root,
4530 struct btrfs_path *path,
4531 const struct btrfs_key *new_key)
4533 struct extent_buffer *leaf;
4534 int ret;
4535 u32 item_size;
4537 leaf = path->nodes[0];
4538 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4539 ret = setup_leaf_for_split(trans, root, path,
4540 item_size + sizeof(struct btrfs_item));
4541 if (ret)
4542 return ret;
4544 path->slots[0]++;
4545 setup_items_for_insert(root, path, new_key, &item_size,
4546 item_size, item_size +
4547 sizeof(struct btrfs_item), 1);
4548 leaf = path->nodes[0];
4549 memcpy_extent_buffer(leaf,
4550 btrfs_item_ptr_offset(leaf, path->slots[0]),
4551 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4552 item_size);
4553 return 0;
4557 * make the item pointed to by the path smaller. new_size indicates
4558 * how small to make it, and from_end tells us if we just chop bytes
4559 * off the end of the item or if we shift the item to chop bytes off
4560 * the front.
4562 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4563 struct btrfs_path *path, u32 new_size, int from_end)
4565 int slot;
4566 struct extent_buffer *leaf;
4567 struct btrfs_item *item;
4568 u32 nritems;
4569 unsigned int data_end;
4570 unsigned int old_data_start;
4571 unsigned int old_size;
4572 unsigned int size_diff;
4573 int i;
4574 struct btrfs_map_token token;
4576 btrfs_init_map_token(&token);
4578 leaf = path->nodes[0];
4579 slot = path->slots[0];
4581 old_size = btrfs_item_size_nr(leaf, slot);
4582 if (old_size == new_size)
4583 return;
4585 nritems = btrfs_header_nritems(leaf);
4586 data_end = leaf_data_end(fs_info, leaf);
4588 old_data_start = btrfs_item_offset_nr(leaf, slot);
4590 size_diff = old_size - new_size;
4592 BUG_ON(slot < 0);
4593 BUG_ON(slot >= nritems);
4596 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4598 /* first correct the data pointers */
4599 for (i = slot; i < nritems; i++) {
4600 u32 ioff;
4601 item = btrfs_item_nr(i);
4603 ioff = btrfs_token_item_offset(leaf, item, &token);
4604 btrfs_set_token_item_offset(leaf, item,
4605 ioff + size_diff, &token);
4608 /* shift the data */
4609 if (from_end) {
4610 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4611 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4612 data_end, old_data_start + new_size - data_end);
4613 } else {
4614 struct btrfs_disk_key disk_key;
4615 u64 offset;
4617 btrfs_item_key(leaf, &disk_key, slot);
4619 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4620 unsigned long ptr;
4621 struct btrfs_file_extent_item *fi;
4623 fi = btrfs_item_ptr(leaf, slot,
4624 struct btrfs_file_extent_item);
4625 fi = (struct btrfs_file_extent_item *)(
4626 (unsigned long)fi - size_diff);
4628 if (btrfs_file_extent_type(leaf, fi) ==
4629 BTRFS_FILE_EXTENT_INLINE) {
4630 ptr = btrfs_item_ptr_offset(leaf, slot);
4631 memmove_extent_buffer(leaf, ptr,
4632 (unsigned long)fi,
4633 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4637 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4638 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4639 data_end, old_data_start - data_end);
4641 offset = btrfs_disk_key_offset(&disk_key);
4642 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4643 btrfs_set_item_key(leaf, &disk_key, slot);
4644 if (slot == 0)
4645 fixup_low_keys(fs_info, path, &disk_key, 1);
4648 item = btrfs_item_nr(slot);
4649 btrfs_set_item_size(leaf, item, new_size);
4650 btrfs_mark_buffer_dirty(leaf);
4652 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4653 btrfs_print_leaf(leaf);
4654 BUG();
4659 * make the item pointed to by the path bigger, data_size is the added size.
4661 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4662 u32 data_size)
4664 int slot;
4665 struct extent_buffer *leaf;
4666 struct btrfs_item *item;
4667 u32 nritems;
4668 unsigned int data_end;
4669 unsigned int old_data;
4670 unsigned int old_size;
4671 int i;
4672 struct btrfs_map_token token;
4674 btrfs_init_map_token(&token);
4676 leaf = path->nodes[0];
4678 nritems = btrfs_header_nritems(leaf);
4679 data_end = leaf_data_end(fs_info, leaf);
4681 if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4682 btrfs_print_leaf(leaf);
4683 BUG();
4685 slot = path->slots[0];
4686 old_data = btrfs_item_end_nr(leaf, slot);
4688 BUG_ON(slot < 0);
4689 if (slot >= nritems) {
4690 btrfs_print_leaf(leaf);
4691 btrfs_crit(fs_info, "slot %d too large, nritems %d",
4692 slot, nritems);
4693 BUG_ON(1);
4697 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4699 /* first correct the data pointers */
4700 for (i = slot; i < nritems; i++) {
4701 u32 ioff;
4702 item = btrfs_item_nr(i);
4704 ioff = btrfs_token_item_offset(leaf, item, &token);
4705 btrfs_set_token_item_offset(leaf, item,
4706 ioff - data_size, &token);
4709 /* shift the data */
4710 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4711 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4712 data_end, old_data - data_end);
4714 data_end = old_data;
4715 old_size = btrfs_item_size_nr(leaf, slot);
4716 item = btrfs_item_nr(slot);
4717 btrfs_set_item_size(leaf, item, old_size + data_size);
4718 btrfs_mark_buffer_dirty(leaf);
4720 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4721 btrfs_print_leaf(leaf);
4722 BUG();
4727 * this is a helper for btrfs_insert_empty_items, the main goal here is
4728 * to save stack depth by doing the bulk of the work in a function
4729 * that doesn't call btrfs_search_slot
4731 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4732 const struct btrfs_key *cpu_key, u32 *data_size,
4733 u32 total_data, u32 total_size, int nr)
4735 struct btrfs_fs_info *fs_info = root->fs_info;
4736 struct btrfs_item *item;
4737 int i;
4738 u32 nritems;
4739 unsigned int data_end;
4740 struct btrfs_disk_key disk_key;
4741 struct extent_buffer *leaf;
4742 int slot;
4743 struct btrfs_map_token token;
4745 if (path->slots[0] == 0) {
4746 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4747 fixup_low_keys(fs_info, path, &disk_key, 1);
4749 btrfs_unlock_up_safe(path, 1);
4751 btrfs_init_map_token(&token);
4753 leaf = path->nodes[0];
4754 slot = path->slots[0];
4756 nritems = btrfs_header_nritems(leaf);
4757 data_end = leaf_data_end(fs_info, leaf);
4759 if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4760 btrfs_print_leaf(leaf);
4761 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4762 total_size, btrfs_leaf_free_space(fs_info, leaf));
4763 BUG();
4766 if (slot != nritems) {
4767 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4769 if (old_data < data_end) {
4770 btrfs_print_leaf(leaf);
4771 btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4772 slot, old_data, data_end);
4773 BUG_ON(1);
4776 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4778 /* first correct the data pointers */
4779 for (i = slot; i < nritems; i++) {
4780 u32 ioff;
4782 item = btrfs_item_nr(i);
4783 ioff = btrfs_token_item_offset(leaf, item, &token);
4784 btrfs_set_token_item_offset(leaf, item,
4785 ioff - total_data, &token);
4787 /* shift the items */
4788 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4789 btrfs_item_nr_offset(slot),
4790 (nritems - slot) * sizeof(struct btrfs_item));
4792 /* shift the data */
4793 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4794 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4795 data_end, old_data - data_end);
4796 data_end = old_data;
4799 /* setup the item for the new data */
4800 for (i = 0; i < nr; i++) {
4801 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4802 btrfs_set_item_key(leaf, &disk_key, slot + i);
4803 item = btrfs_item_nr(slot + i);
4804 btrfs_set_token_item_offset(leaf, item,
4805 data_end - data_size[i], &token);
4806 data_end -= data_size[i];
4807 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4810 btrfs_set_header_nritems(leaf, nritems + nr);
4811 btrfs_mark_buffer_dirty(leaf);
4813 if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4814 btrfs_print_leaf(leaf);
4815 BUG();
4820 * Given a key and some data, insert items into the tree.
4821 * This does all the path init required, making room in the tree if needed.
4823 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4824 struct btrfs_root *root,
4825 struct btrfs_path *path,
4826 const struct btrfs_key *cpu_key, u32 *data_size,
4827 int nr)
4829 int ret = 0;
4830 int slot;
4831 int i;
4832 u32 total_size = 0;
4833 u32 total_data = 0;
4835 for (i = 0; i < nr; i++)
4836 total_data += data_size[i];
4838 total_size = total_data + (nr * sizeof(struct btrfs_item));
4839 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4840 if (ret == 0)
4841 return -EEXIST;
4842 if (ret < 0)
4843 return ret;
4845 slot = path->slots[0];
4846 BUG_ON(slot < 0);
4848 setup_items_for_insert(root, path, cpu_key, data_size,
4849 total_data, total_size, nr);
4850 return 0;
4854 * Given a key and some data, insert an item into the tree.
4855 * This does all the path init required, making room in the tree if needed.
4857 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4858 const struct btrfs_key *cpu_key, void *data,
4859 u32 data_size)
4861 int ret = 0;
4862 struct btrfs_path *path;
4863 struct extent_buffer *leaf;
4864 unsigned long ptr;
4866 path = btrfs_alloc_path();
4867 if (!path)
4868 return -ENOMEM;
4869 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4870 if (!ret) {
4871 leaf = path->nodes[0];
4872 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4873 write_extent_buffer(leaf, data, ptr, data_size);
4874 btrfs_mark_buffer_dirty(leaf);
4876 btrfs_free_path(path);
4877 return ret;
4881 * delete the pointer from a given node.
4883 * the tree should have been previously balanced so the deletion does not
4884 * empty a node.
4886 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4887 int level, int slot)
4889 struct btrfs_fs_info *fs_info = root->fs_info;
4890 struct extent_buffer *parent = path->nodes[level];
4891 u32 nritems;
4892 int ret;
4894 nritems = btrfs_header_nritems(parent);
4895 if (slot != nritems - 1) {
4896 if (level) {
4897 ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4898 nritems - slot - 1);
4899 BUG_ON(ret < 0);
4901 memmove_extent_buffer(parent,
4902 btrfs_node_key_ptr_offset(slot),
4903 btrfs_node_key_ptr_offset(slot + 1),
4904 sizeof(struct btrfs_key_ptr) *
4905 (nritems - slot - 1));
4906 } else if (level) {
4907 ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4908 GFP_NOFS);
4909 BUG_ON(ret < 0);
4912 nritems--;
4913 btrfs_set_header_nritems(parent, nritems);
4914 if (nritems == 0 && parent == root->node) {
4915 BUG_ON(btrfs_header_level(root->node) != 1);
4916 /* just turn the root into a leaf and break */
4917 btrfs_set_header_level(root->node, 0);
4918 } else if (slot == 0) {
4919 struct btrfs_disk_key disk_key;
4921 btrfs_node_key(parent, &disk_key, 0);
4922 fixup_low_keys(fs_info, path, &disk_key, level + 1);
4924 btrfs_mark_buffer_dirty(parent);
4928 * a helper function to delete the leaf pointed to by path->slots[1] and
4929 * path->nodes[1].
4931 * This deletes the pointer in path->nodes[1] and frees the leaf
4932 * block extent. zero is returned if it all worked out, < 0 otherwise.
4934 * The path must have already been setup for deleting the leaf, including
4935 * all the proper balancing. path->nodes[1] must be locked.
4937 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4938 struct btrfs_root *root,
4939 struct btrfs_path *path,
4940 struct extent_buffer *leaf)
4942 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4943 del_ptr(root, path, 1, path->slots[1]);
4946 * btrfs_free_extent is expensive, we want to make sure we
4947 * aren't holding any locks when we call it
4949 btrfs_unlock_up_safe(path, 0);
4951 root_sub_used(root, leaf->len);
4953 extent_buffer_get(leaf);
4954 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4955 free_extent_buffer_stale(leaf);
4958 * delete the item at the leaf level in path. If that empties
4959 * the leaf, remove it from the tree
4961 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4962 struct btrfs_path *path, int slot, int nr)
4964 struct btrfs_fs_info *fs_info = root->fs_info;
4965 struct extent_buffer *leaf;
4966 struct btrfs_item *item;
4967 u32 last_off;
4968 u32 dsize = 0;
4969 int ret = 0;
4970 int wret;
4971 int i;
4972 u32 nritems;
4973 struct btrfs_map_token token;
4975 btrfs_init_map_token(&token);
4977 leaf = path->nodes[0];
4978 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4980 for (i = 0; i < nr; i++)
4981 dsize += btrfs_item_size_nr(leaf, slot + i);
4983 nritems = btrfs_header_nritems(leaf);
4985 if (slot + nr != nritems) {
4986 int data_end = leaf_data_end(fs_info, leaf);
4988 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4989 data_end + dsize,
4990 BTRFS_LEAF_DATA_OFFSET + data_end,
4991 last_off - data_end);
4993 for (i = slot + nr; i < nritems; i++) {
4994 u32 ioff;
4996 item = btrfs_item_nr(i);
4997 ioff = btrfs_token_item_offset(leaf, item, &token);
4998 btrfs_set_token_item_offset(leaf, item,
4999 ioff + dsize, &token);
5002 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
5003 btrfs_item_nr_offset(slot + nr),
5004 sizeof(struct btrfs_item) *
5005 (nritems - slot - nr));
5007 btrfs_set_header_nritems(leaf, nritems - nr);
5008 nritems -= nr;
5010 /* delete the leaf if we've emptied it */
5011 if (nritems == 0) {
5012 if (leaf == root->node) {
5013 btrfs_set_header_level(leaf, 0);
5014 } else {
5015 btrfs_set_path_blocking(path);
5016 clean_tree_block(fs_info, leaf);
5017 btrfs_del_leaf(trans, root, path, leaf);
5019 } else {
5020 int used = leaf_space_used(leaf, 0, nritems);
5021 if (slot == 0) {
5022 struct btrfs_disk_key disk_key;
5024 btrfs_item_key(leaf, &disk_key, 0);
5025 fixup_low_keys(fs_info, path, &disk_key, 1);
5028 /* delete the leaf if it is mostly empty */
5029 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5030 /* push_leaf_left fixes the path.
5031 * make sure the path still points to our leaf
5032 * for possible call to del_ptr below
5034 slot = path->slots[1];
5035 extent_buffer_get(leaf);
5037 btrfs_set_path_blocking(path);
5038 wret = push_leaf_left(trans, root, path, 1, 1,
5039 1, (u32)-1);
5040 if (wret < 0 && wret != -ENOSPC)
5041 ret = wret;
5043 if (path->nodes[0] == leaf &&
5044 btrfs_header_nritems(leaf)) {
5045 wret = push_leaf_right(trans, root, path, 1,
5046 1, 1, 0);
5047 if (wret < 0 && wret != -ENOSPC)
5048 ret = wret;
5051 if (btrfs_header_nritems(leaf) == 0) {
5052 path->slots[1] = slot;
5053 btrfs_del_leaf(trans, root, path, leaf);
5054 free_extent_buffer(leaf);
5055 ret = 0;
5056 } else {
5057 /* if we're still in the path, make sure
5058 * we're dirty. Otherwise, one of the
5059 * push_leaf functions must have already
5060 * dirtied this buffer
5062 if (path->nodes[0] == leaf)
5063 btrfs_mark_buffer_dirty(leaf);
5064 free_extent_buffer(leaf);
5066 } else {
5067 btrfs_mark_buffer_dirty(leaf);
5070 return ret;
5074 * search the tree again to find a leaf with lesser keys
5075 * returns 0 if it found something or 1 if there are no lesser leaves.
5076 * returns < 0 on io errors.
5078 * This may release the path, and so you may lose any locks held at the
5079 * time you call it.
5081 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5083 struct btrfs_key key;
5084 struct btrfs_disk_key found_key;
5085 int ret;
5087 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5089 if (key.offset > 0) {
5090 key.offset--;
5091 } else if (key.type > 0) {
5092 key.type--;
5093 key.offset = (u64)-1;
5094 } else if (key.objectid > 0) {
5095 key.objectid--;
5096 key.type = (u8)-1;
5097 key.offset = (u64)-1;
5098 } else {
5099 return 1;
5102 btrfs_release_path(path);
5103 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5104 if (ret < 0)
5105 return ret;
5106 btrfs_item_key(path->nodes[0], &found_key, 0);
5107 ret = comp_keys(&found_key, &key);
5109 * We might have had an item with the previous key in the tree right
5110 * before we released our path. And after we released our path, that
5111 * item might have been pushed to the first slot (0) of the leaf we
5112 * were holding due to a tree balance. Alternatively, an item with the
5113 * previous key can exist as the only element of a leaf (big fat item).
5114 * Therefore account for these 2 cases, so that our callers (like
5115 * btrfs_previous_item) don't miss an existing item with a key matching
5116 * the previous key we computed above.
5118 if (ret <= 0)
5119 return 0;
5120 return 1;
5124 * A helper function to walk down the tree starting at min_key, and looking
5125 * for nodes or leaves that are have a minimum transaction id.
5126 * This is used by the btree defrag code, and tree logging
5128 * This does not cow, but it does stuff the starting key it finds back
5129 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5130 * key and get a writable path.
5132 * This honors path->lowest_level to prevent descent past a given level
5133 * of the tree.
5135 * min_trans indicates the oldest transaction that you are interested
5136 * in walking through. Any nodes or leaves older than min_trans are
5137 * skipped over (without reading them).
5139 * returns zero if something useful was found, < 0 on error and 1 if there
5140 * was nothing in the tree that matched the search criteria.
5142 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5143 struct btrfs_path *path,
5144 u64 min_trans)
5146 struct btrfs_fs_info *fs_info = root->fs_info;
5147 struct extent_buffer *cur;
5148 struct btrfs_key found_key;
5149 int slot;
5150 int sret;
5151 u32 nritems;
5152 int level;
5153 int ret = 1;
5154 int keep_locks = path->keep_locks;
5156 path->keep_locks = 1;
5157 again:
5158 cur = btrfs_read_lock_root_node(root);
5159 level = btrfs_header_level(cur);
5160 WARN_ON(path->nodes[level]);
5161 path->nodes[level] = cur;
5162 path->locks[level] = BTRFS_READ_LOCK;
5164 if (btrfs_header_generation(cur) < min_trans) {
5165 ret = 1;
5166 goto out;
5168 while (1) {
5169 nritems = btrfs_header_nritems(cur);
5170 level = btrfs_header_level(cur);
5171 sret = btrfs_bin_search(cur, min_key, level, &slot);
5173 /* at the lowest level, we're done, setup the path and exit */
5174 if (level == path->lowest_level) {
5175 if (slot >= nritems)
5176 goto find_next_key;
5177 ret = 0;
5178 path->slots[level] = slot;
5179 btrfs_item_key_to_cpu(cur, &found_key, slot);
5180 goto out;
5182 if (sret && slot > 0)
5183 slot--;
5185 * check this node pointer against the min_trans parameters.
5186 * If it is too old, old, skip to the next one.
5188 while (slot < nritems) {
5189 u64 gen;
5191 gen = btrfs_node_ptr_generation(cur, slot);
5192 if (gen < min_trans) {
5193 slot++;
5194 continue;
5196 break;
5198 find_next_key:
5200 * we didn't find a candidate key in this node, walk forward
5201 * and find another one
5203 if (slot >= nritems) {
5204 path->slots[level] = slot;
5205 btrfs_set_path_blocking(path);
5206 sret = btrfs_find_next_key(root, path, min_key, level,
5207 min_trans);
5208 if (sret == 0) {
5209 btrfs_release_path(path);
5210 goto again;
5211 } else {
5212 goto out;
5215 /* save our key for returning back */
5216 btrfs_node_key_to_cpu(cur, &found_key, slot);
5217 path->slots[level] = slot;
5218 if (level == path->lowest_level) {
5219 ret = 0;
5220 goto out;
5222 btrfs_set_path_blocking(path);
5223 cur = read_node_slot(fs_info, cur, slot);
5224 if (IS_ERR(cur)) {
5225 ret = PTR_ERR(cur);
5226 goto out;
5229 btrfs_tree_read_lock(cur);
5231 path->locks[level - 1] = BTRFS_READ_LOCK;
5232 path->nodes[level - 1] = cur;
5233 unlock_up(path, level, 1, 0, NULL);
5234 btrfs_clear_path_blocking(path, NULL, 0);
5236 out:
5237 path->keep_locks = keep_locks;
5238 if (ret == 0) {
5239 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5240 btrfs_set_path_blocking(path);
5241 memcpy(min_key, &found_key, sizeof(found_key));
5243 return ret;
5246 static int tree_move_down(struct btrfs_fs_info *fs_info,
5247 struct btrfs_path *path,
5248 int *level)
5250 struct extent_buffer *eb;
5252 BUG_ON(*level == 0);
5253 eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5254 if (IS_ERR(eb))
5255 return PTR_ERR(eb);
5257 path->nodes[*level - 1] = eb;
5258 path->slots[*level - 1] = 0;
5259 (*level)--;
5260 return 0;
5263 static int tree_move_next_or_upnext(struct btrfs_path *path,
5264 int *level, int root_level)
5266 int ret = 0;
5267 int nritems;
5268 nritems = btrfs_header_nritems(path->nodes[*level]);
5270 path->slots[*level]++;
5272 while (path->slots[*level] >= nritems) {
5273 if (*level == root_level)
5274 return -1;
5276 /* move upnext */
5277 path->slots[*level] = 0;
5278 free_extent_buffer(path->nodes[*level]);
5279 path->nodes[*level] = NULL;
5280 (*level)++;
5281 path->slots[*level]++;
5283 nritems = btrfs_header_nritems(path->nodes[*level]);
5284 ret = 1;
5286 return ret;
5290 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5291 * or down.
5293 static int tree_advance(struct btrfs_fs_info *fs_info,
5294 struct btrfs_path *path,
5295 int *level, int root_level,
5296 int allow_down,
5297 struct btrfs_key *key)
5299 int ret;
5301 if (*level == 0 || !allow_down) {
5302 ret = tree_move_next_or_upnext(path, level, root_level);
5303 } else {
5304 ret = tree_move_down(fs_info, path, level);
5306 if (ret >= 0) {
5307 if (*level == 0)
5308 btrfs_item_key_to_cpu(path->nodes[*level], key,
5309 path->slots[*level]);
5310 else
5311 btrfs_node_key_to_cpu(path->nodes[*level], key,
5312 path->slots[*level]);
5314 return ret;
5317 static int tree_compare_item(struct btrfs_path *left_path,
5318 struct btrfs_path *right_path,
5319 char *tmp_buf)
5321 int cmp;
5322 int len1, len2;
5323 unsigned long off1, off2;
5325 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5326 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5327 if (len1 != len2)
5328 return 1;
5330 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5331 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5332 right_path->slots[0]);
5334 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5336 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5337 if (cmp)
5338 return 1;
5339 return 0;
5342 #define ADVANCE 1
5343 #define ADVANCE_ONLY_NEXT -1
5346 * This function compares two trees and calls the provided callback for
5347 * every changed/new/deleted item it finds.
5348 * If shared tree blocks are encountered, whole subtrees are skipped, making
5349 * the compare pretty fast on snapshotted subvolumes.
5351 * This currently works on commit roots only. As commit roots are read only,
5352 * we don't do any locking. The commit roots are protected with transactions.
5353 * Transactions are ended and rejoined when a commit is tried in between.
5355 * This function checks for modifications done to the trees while comparing.
5356 * If it detects a change, it aborts immediately.
5358 int btrfs_compare_trees(struct btrfs_root *left_root,
5359 struct btrfs_root *right_root,
5360 btrfs_changed_cb_t changed_cb, void *ctx)
5362 struct btrfs_fs_info *fs_info = left_root->fs_info;
5363 int ret;
5364 int cmp;
5365 struct btrfs_path *left_path = NULL;
5366 struct btrfs_path *right_path = NULL;
5367 struct btrfs_key left_key;
5368 struct btrfs_key right_key;
5369 char *tmp_buf = NULL;
5370 int left_root_level;
5371 int right_root_level;
5372 int left_level;
5373 int right_level;
5374 int left_end_reached;
5375 int right_end_reached;
5376 int advance_left;
5377 int advance_right;
5378 u64 left_blockptr;
5379 u64 right_blockptr;
5380 u64 left_gen;
5381 u64 right_gen;
5383 left_path = btrfs_alloc_path();
5384 if (!left_path) {
5385 ret = -ENOMEM;
5386 goto out;
5388 right_path = btrfs_alloc_path();
5389 if (!right_path) {
5390 ret = -ENOMEM;
5391 goto out;
5394 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5395 if (!tmp_buf) {
5396 ret = -ENOMEM;
5397 goto out;
5400 left_path->search_commit_root = 1;
5401 left_path->skip_locking = 1;
5402 right_path->search_commit_root = 1;
5403 right_path->skip_locking = 1;
5406 * Strategy: Go to the first items of both trees. Then do
5408 * If both trees are at level 0
5409 * Compare keys of current items
5410 * If left < right treat left item as new, advance left tree
5411 * and repeat
5412 * If left > right treat right item as deleted, advance right tree
5413 * and repeat
5414 * If left == right do deep compare of items, treat as changed if
5415 * needed, advance both trees and repeat
5416 * If both trees are at the same level but not at level 0
5417 * Compare keys of current nodes/leafs
5418 * If left < right advance left tree and repeat
5419 * If left > right advance right tree and repeat
5420 * If left == right compare blockptrs of the next nodes/leafs
5421 * If they match advance both trees but stay at the same level
5422 * and repeat
5423 * If they don't match advance both trees while allowing to go
5424 * deeper and repeat
5425 * If tree levels are different
5426 * Advance the tree that needs it and repeat
5428 * Advancing a tree means:
5429 * If we are at level 0, try to go to the next slot. If that's not
5430 * possible, go one level up and repeat. Stop when we found a level
5431 * where we could go to the next slot. We may at this point be on a
5432 * node or a leaf.
5434 * If we are not at level 0 and not on shared tree blocks, go one
5435 * level deeper.
5437 * If we are not at level 0 and on shared tree blocks, go one slot to
5438 * the right if possible or go up and right.
5441 down_read(&fs_info->commit_root_sem);
5442 left_level = btrfs_header_level(left_root->commit_root);
5443 left_root_level = left_level;
5444 left_path->nodes[left_level] =
5445 btrfs_clone_extent_buffer(left_root->commit_root);
5446 if (!left_path->nodes[left_level]) {
5447 up_read(&fs_info->commit_root_sem);
5448 ret = -ENOMEM;
5449 goto out;
5451 extent_buffer_get(left_path->nodes[left_level]);
5453 right_level = btrfs_header_level(right_root->commit_root);
5454 right_root_level = right_level;
5455 right_path->nodes[right_level] =
5456 btrfs_clone_extent_buffer(right_root->commit_root);
5457 if (!right_path->nodes[right_level]) {
5458 up_read(&fs_info->commit_root_sem);
5459 ret = -ENOMEM;
5460 goto out;
5462 extent_buffer_get(right_path->nodes[right_level]);
5463 up_read(&fs_info->commit_root_sem);
5465 if (left_level == 0)
5466 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5467 &left_key, left_path->slots[left_level]);
5468 else
5469 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5470 &left_key, left_path->slots[left_level]);
5471 if (right_level == 0)
5472 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5473 &right_key, right_path->slots[right_level]);
5474 else
5475 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5476 &right_key, right_path->slots[right_level]);
5478 left_end_reached = right_end_reached = 0;
5479 advance_left = advance_right = 0;
5481 while (1) {
5482 if (advance_left && !left_end_reached) {
5483 ret = tree_advance(fs_info, left_path, &left_level,
5484 left_root_level,
5485 advance_left != ADVANCE_ONLY_NEXT,
5486 &left_key);
5487 if (ret == -1)
5488 left_end_reached = ADVANCE;
5489 else if (ret < 0)
5490 goto out;
5491 advance_left = 0;
5493 if (advance_right && !right_end_reached) {
5494 ret = tree_advance(fs_info, right_path, &right_level,
5495 right_root_level,
5496 advance_right != ADVANCE_ONLY_NEXT,
5497 &right_key);
5498 if (ret == -1)
5499 right_end_reached = ADVANCE;
5500 else if (ret < 0)
5501 goto out;
5502 advance_right = 0;
5505 if (left_end_reached && right_end_reached) {
5506 ret = 0;
5507 goto out;
5508 } else if (left_end_reached) {
5509 if (right_level == 0) {
5510 ret = changed_cb(left_path, right_path,
5511 &right_key,
5512 BTRFS_COMPARE_TREE_DELETED,
5513 ctx);
5514 if (ret < 0)
5515 goto out;
5517 advance_right = ADVANCE;
5518 continue;
5519 } else if (right_end_reached) {
5520 if (left_level == 0) {
5521 ret = changed_cb(left_path, right_path,
5522 &left_key,
5523 BTRFS_COMPARE_TREE_NEW,
5524 ctx);
5525 if (ret < 0)
5526 goto out;
5528 advance_left = ADVANCE;
5529 continue;
5532 if (left_level == 0 && right_level == 0) {
5533 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5534 if (cmp < 0) {
5535 ret = changed_cb(left_path, right_path,
5536 &left_key,
5537 BTRFS_COMPARE_TREE_NEW,
5538 ctx);
5539 if (ret < 0)
5540 goto out;
5541 advance_left = ADVANCE;
5542 } else if (cmp > 0) {
5543 ret = changed_cb(left_path, right_path,
5544 &right_key,
5545 BTRFS_COMPARE_TREE_DELETED,
5546 ctx);
5547 if (ret < 0)
5548 goto out;
5549 advance_right = ADVANCE;
5550 } else {
5551 enum btrfs_compare_tree_result result;
5553 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5554 ret = tree_compare_item(left_path, right_path,
5555 tmp_buf);
5556 if (ret)
5557 result = BTRFS_COMPARE_TREE_CHANGED;
5558 else
5559 result = BTRFS_COMPARE_TREE_SAME;
5560 ret = changed_cb(left_path, right_path,
5561 &left_key, result, ctx);
5562 if (ret < 0)
5563 goto out;
5564 advance_left = ADVANCE;
5565 advance_right = ADVANCE;
5567 } else if (left_level == right_level) {
5568 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5569 if (cmp < 0) {
5570 advance_left = ADVANCE;
5571 } else if (cmp > 0) {
5572 advance_right = ADVANCE;
5573 } else {
5574 left_blockptr = btrfs_node_blockptr(
5575 left_path->nodes[left_level],
5576 left_path->slots[left_level]);
5577 right_blockptr = btrfs_node_blockptr(
5578 right_path->nodes[right_level],
5579 right_path->slots[right_level]);
5580 left_gen = btrfs_node_ptr_generation(
5581 left_path->nodes[left_level],
5582 left_path->slots[left_level]);
5583 right_gen = btrfs_node_ptr_generation(
5584 right_path->nodes[right_level],
5585 right_path->slots[right_level]);
5586 if (left_blockptr == right_blockptr &&
5587 left_gen == right_gen) {
5589 * As we're on a shared block, don't
5590 * allow to go deeper.
5592 advance_left = ADVANCE_ONLY_NEXT;
5593 advance_right = ADVANCE_ONLY_NEXT;
5594 } else {
5595 advance_left = ADVANCE;
5596 advance_right = ADVANCE;
5599 } else if (left_level < right_level) {
5600 advance_right = ADVANCE;
5601 } else {
5602 advance_left = ADVANCE;
5606 out:
5607 btrfs_free_path(left_path);
5608 btrfs_free_path(right_path);
5609 kvfree(tmp_buf);
5610 return ret;
5614 * this is similar to btrfs_next_leaf, but does not try to preserve
5615 * and fixup the path. It looks for and returns the next key in the
5616 * tree based on the current path and the min_trans parameters.
5618 * 0 is returned if another key is found, < 0 if there are any errors
5619 * and 1 is returned if there are no higher keys in the tree
5621 * path->keep_locks should be set to 1 on the search made before
5622 * calling this function.
5624 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5625 struct btrfs_key *key, int level, u64 min_trans)
5627 int slot;
5628 struct extent_buffer *c;
5630 WARN_ON(!path->keep_locks);
5631 while (level < BTRFS_MAX_LEVEL) {
5632 if (!path->nodes[level])
5633 return 1;
5635 slot = path->slots[level] + 1;
5636 c = path->nodes[level];
5637 next:
5638 if (slot >= btrfs_header_nritems(c)) {
5639 int ret;
5640 int orig_lowest;
5641 struct btrfs_key cur_key;
5642 if (level + 1 >= BTRFS_MAX_LEVEL ||
5643 !path->nodes[level + 1])
5644 return 1;
5646 if (path->locks[level + 1]) {
5647 level++;
5648 continue;
5651 slot = btrfs_header_nritems(c) - 1;
5652 if (level == 0)
5653 btrfs_item_key_to_cpu(c, &cur_key, slot);
5654 else
5655 btrfs_node_key_to_cpu(c, &cur_key, slot);
5657 orig_lowest = path->lowest_level;
5658 btrfs_release_path(path);
5659 path->lowest_level = level;
5660 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5661 0, 0);
5662 path->lowest_level = orig_lowest;
5663 if (ret < 0)
5664 return ret;
5666 c = path->nodes[level];
5667 slot = path->slots[level];
5668 if (ret == 0)
5669 slot++;
5670 goto next;
5673 if (level == 0)
5674 btrfs_item_key_to_cpu(c, key, slot);
5675 else {
5676 u64 gen = btrfs_node_ptr_generation(c, slot);
5678 if (gen < min_trans) {
5679 slot++;
5680 goto next;
5682 btrfs_node_key_to_cpu(c, key, slot);
5684 return 0;
5686 return 1;
5690 * search the tree again to find a leaf with greater keys
5691 * returns 0 if it found something or 1 if there are no greater leaves.
5692 * returns < 0 on io errors.
5694 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5696 return btrfs_next_old_leaf(root, path, 0);
5699 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5700 u64 time_seq)
5702 int slot;
5703 int level;
5704 struct extent_buffer *c;
5705 struct extent_buffer *next;
5706 struct btrfs_key key;
5707 u32 nritems;
5708 int ret;
5709 int old_spinning = path->leave_spinning;
5710 int next_rw_lock = 0;
5712 nritems = btrfs_header_nritems(path->nodes[0]);
5713 if (nritems == 0)
5714 return 1;
5716 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5717 again:
5718 level = 1;
5719 next = NULL;
5720 next_rw_lock = 0;
5721 btrfs_release_path(path);
5723 path->keep_locks = 1;
5724 path->leave_spinning = 1;
5726 if (time_seq)
5727 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5728 else
5729 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5730 path->keep_locks = 0;
5732 if (ret < 0)
5733 return ret;
5735 nritems = btrfs_header_nritems(path->nodes[0]);
5737 * by releasing the path above we dropped all our locks. A balance
5738 * could have added more items next to the key that used to be
5739 * at the very end of the block. So, check again here and
5740 * advance the path if there are now more items available.
5742 if (nritems > 0 && path->slots[0] < nritems - 1) {
5743 if (ret == 0)
5744 path->slots[0]++;
5745 ret = 0;
5746 goto done;
5749 * So the above check misses one case:
5750 * - after releasing the path above, someone has removed the item that
5751 * used to be at the very end of the block, and balance between leafs
5752 * gets another one with bigger key.offset to replace it.
5754 * This one should be returned as well, or we can get leaf corruption
5755 * later(esp. in __btrfs_drop_extents()).
5757 * And a bit more explanation about this check,
5758 * with ret > 0, the key isn't found, the path points to the slot
5759 * where it should be inserted, so the path->slots[0] item must be the
5760 * bigger one.
5762 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5763 ret = 0;
5764 goto done;
5767 while (level < BTRFS_MAX_LEVEL) {
5768 if (!path->nodes[level]) {
5769 ret = 1;
5770 goto done;
5773 slot = path->slots[level] + 1;
5774 c = path->nodes[level];
5775 if (slot >= btrfs_header_nritems(c)) {
5776 level++;
5777 if (level == BTRFS_MAX_LEVEL) {
5778 ret = 1;
5779 goto done;
5781 continue;
5784 if (next) {
5785 btrfs_tree_unlock_rw(next, next_rw_lock);
5786 free_extent_buffer(next);
5789 next = c;
5790 next_rw_lock = path->locks[level];
5791 ret = read_block_for_search(root, path, &next, level,
5792 slot, &key);
5793 if (ret == -EAGAIN)
5794 goto again;
5796 if (ret < 0) {
5797 btrfs_release_path(path);
5798 goto done;
5801 if (!path->skip_locking) {
5802 ret = btrfs_try_tree_read_lock(next);
5803 if (!ret && time_seq) {
5805 * If we don't get the lock, we may be racing
5806 * with push_leaf_left, holding that lock while
5807 * itself waiting for the leaf we've currently
5808 * locked. To solve this situation, we give up
5809 * on our lock and cycle.
5811 free_extent_buffer(next);
5812 btrfs_release_path(path);
5813 cond_resched();
5814 goto again;
5816 if (!ret) {
5817 btrfs_set_path_blocking(path);
5818 btrfs_tree_read_lock(next);
5819 btrfs_clear_path_blocking(path, next,
5820 BTRFS_READ_LOCK);
5822 next_rw_lock = BTRFS_READ_LOCK;
5824 break;
5826 path->slots[level] = slot;
5827 while (1) {
5828 level--;
5829 c = path->nodes[level];
5830 if (path->locks[level])
5831 btrfs_tree_unlock_rw(c, path->locks[level]);
5833 free_extent_buffer(c);
5834 path->nodes[level] = next;
5835 path->slots[level] = 0;
5836 if (!path->skip_locking)
5837 path->locks[level] = next_rw_lock;
5838 if (!level)
5839 break;
5841 ret = read_block_for_search(root, path, &next, level,
5842 0, &key);
5843 if (ret == -EAGAIN)
5844 goto again;
5846 if (ret < 0) {
5847 btrfs_release_path(path);
5848 goto done;
5851 if (!path->skip_locking) {
5852 ret = btrfs_try_tree_read_lock(next);
5853 if (!ret) {
5854 btrfs_set_path_blocking(path);
5855 btrfs_tree_read_lock(next);
5856 btrfs_clear_path_blocking(path, next,
5857 BTRFS_READ_LOCK);
5859 next_rw_lock = BTRFS_READ_LOCK;
5862 ret = 0;
5863 done:
5864 unlock_up(path, 0, 1, 0, NULL);
5865 path->leave_spinning = old_spinning;
5866 if (!old_spinning)
5867 btrfs_set_path_blocking(path);
5869 return ret;
5873 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5874 * searching until it gets past min_objectid or finds an item of 'type'
5876 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5878 int btrfs_previous_item(struct btrfs_root *root,
5879 struct btrfs_path *path, u64 min_objectid,
5880 int type)
5882 struct btrfs_key found_key;
5883 struct extent_buffer *leaf;
5884 u32 nritems;
5885 int ret;
5887 while (1) {
5888 if (path->slots[0] == 0) {
5889 btrfs_set_path_blocking(path);
5890 ret = btrfs_prev_leaf(root, path);
5891 if (ret != 0)
5892 return ret;
5893 } else {
5894 path->slots[0]--;
5896 leaf = path->nodes[0];
5897 nritems = btrfs_header_nritems(leaf);
5898 if (nritems == 0)
5899 return 1;
5900 if (path->slots[0] == nritems)
5901 path->slots[0]--;
5903 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5904 if (found_key.objectid < min_objectid)
5905 break;
5906 if (found_key.type == type)
5907 return 0;
5908 if (found_key.objectid == min_objectid &&
5909 found_key.type < type)
5910 break;
5912 return 1;
5916 * search in extent tree to find a previous Metadata/Data extent item with
5917 * min objecitd.
5919 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5921 int btrfs_previous_extent_item(struct btrfs_root *root,
5922 struct btrfs_path *path, u64 min_objectid)
5924 struct btrfs_key found_key;
5925 struct extent_buffer *leaf;
5926 u32 nritems;
5927 int ret;
5929 while (1) {
5930 if (path->slots[0] == 0) {
5931 btrfs_set_path_blocking(path);
5932 ret = btrfs_prev_leaf(root, path);
5933 if (ret != 0)
5934 return ret;
5935 } else {
5936 path->slots[0]--;
5938 leaf = path->nodes[0];
5939 nritems = btrfs_header_nritems(leaf);
5940 if (nritems == 0)
5941 return 1;
5942 if (path->slots[0] == nritems)
5943 path->slots[0]--;
5945 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5946 if (found_key.objectid < min_objectid)
5947 break;
5948 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5949 found_key.type == BTRFS_METADATA_ITEM_KEY)
5950 return 0;
5951 if (found_key.objectid == min_objectid &&
5952 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5953 break;
5955 return 1;