spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / fs / btrfs / ctree.c
blob0639a555e16ed1975702ed5509dc9bc1c4dbf490
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include "ctree.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "print-tree.h"
25 #include "locking.h"
27 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_path *path, int level);
29 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_key *ins_key,
31 struct btrfs_path *path, int data_size, int extend);
32 static int push_node_left(struct btrfs_trans_handle *trans,
33 struct btrfs_root *root, struct extent_buffer *dst,
34 struct extent_buffer *src, int empty);
35 static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf);
39 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot);
42 struct btrfs_path *btrfs_alloc_path(void)
44 struct btrfs_path *path;
45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
46 return path;
50 * set all locked nodes in the path to blocking locks. This should
51 * be done before scheduling
53 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
55 int i;
56 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
57 if (!p->nodes[i] || !p->locks[i])
58 continue;
59 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
60 if (p->locks[i] == BTRFS_READ_LOCK)
61 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
62 else if (p->locks[i] == BTRFS_WRITE_LOCK)
63 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 * reset all the locked nodes in the patch to spinning locks.
70 * held is used to keep lockdep happy, when lockdep is enabled
71 * we set held to a blocking lock before we go around and
72 * retake all the spinlocks in the path. You can safely use NULL
73 * for held
75 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
76 struct extent_buffer *held, int held_rw)
78 int i;
80 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 /* lockdep really cares that we take all of these spinlocks
82 * in the right order. If any of the locks in the path are not
83 * currently blocking, it is going to complain. So, make really
84 * really sure by forcing the path to blocking before we clear
85 * the path blocking.
87 if (held) {
88 btrfs_set_lock_blocking_rw(held, held_rw);
89 if (held_rw == BTRFS_WRITE_LOCK)
90 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
91 else if (held_rw == BTRFS_READ_LOCK)
92 held_rw = BTRFS_READ_LOCK_BLOCKING;
94 btrfs_set_path_blocking(p);
95 #endif
97 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
98 if (p->nodes[i] && p->locks[i]) {
99 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
100 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
101 p->locks[i] = BTRFS_WRITE_LOCK;
102 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
103 p->locks[i] = BTRFS_READ_LOCK;
107 #ifdef CONFIG_DEBUG_LOCK_ALLOC
108 if (held)
109 btrfs_clear_lock_blocking_rw(held, held_rw);
110 #endif
113 /* this also releases the path */
114 void btrfs_free_path(struct btrfs_path *p)
116 if (!p)
117 return;
118 btrfs_release_path(p);
119 kmem_cache_free(btrfs_path_cachep, p);
123 * path release drops references on the extent buffers in the path
124 * and it drops any locks held by this path
126 * It is safe to call this on paths that no locks or extent buffers held.
128 noinline void btrfs_release_path(struct btrfs_path *p)
130 int i;
132 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
133 p->slots[i] = 0;
134 if (!p->nodes[i])
135 continue;
136 if (p->locks[i]) {
137 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
138 p->locks[i] = 0;
140 free_extent_buffer(p->nodes[i]);
141 p->nodes[i] = NULL;
146 * safely gets a reference on the root node of a tree. A lock
147 * is not taken, so a concurrent writer may put a different node
148 * at the root of the tree. See btrfs_lock_root_node for the
149 * looping required.
151 * The extent buffer returned by this has a reference taken, so
152 * it won't disappear. It may stop being the root of the tree
153 * at any time because there are no locks held.
155 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
157 struct extent_buffer *eb;
159 rcu_read_lock();
160 eb = rcu_dereference(root->node);
161 extent_buffer_get(eb);
162 rcu_read_unlock();
163 return eb;
166 /* loop around taking references on and locking the root node of the
167 * tree until you end up with a lock on the root. A locked buffer
168 * is returned, with a reference held.
170 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
172 struct extent_buffer *eb;
174 while (1) {
175 eb = btrfs_root_node(root);
176 btrfs_tree_lock(eb);
177 if (eb == root->node)
178 break;
179 btrfs_tree_unlock(eb);
180 free_extent_buffer(eb);
182 return eb;
185 /* loop around taking references on and locking the root node of the
186 * tree until you end up with a lock on the root. A locked buffer
187 * is returned, with a reference held.
189 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
191 struct extent_buffer *eb;
193 while (1) {
194 eb = btrfs_root_node(root);
195 btrfs_tree_read_lock(eb);
196 if (eb == root->node)
197 break;
198 btrfs_tree_read_unlock(eb);
199 free_extent_buffer(eb);
201 return eb;
204 /* cowonly root (everything not a reference counted cow subvolume), just get
205 * put onto a simple dirty list. transaction.c walks this to make sure they
206 * get properly updated on disk.
208 static void add_root_to_dirty_list(struct btrfs_root *root)
210 if (root->track_dirty && list_empty(&root->dirty_list)) {
211 list_add(&root->dirty_list,
212 &root->fs_info->dirty_cowonly_roots);
217 * used by snapshot creation to make a copy of a root for a tree with
218 * a given objectid. The buffer with the new root node is returned in
219 * cow_ret, and this func returns zero on success or a negative error code.
221 int btrfs_copy_root(struct btrfs_trans_handle *trans,
222 struct btrfs_root *root,
223 struct extent_buffer *buf,
224 struct extent_buffer **cow_ret, u64 new_root_objectid)
226 struct extent_buffer *cow;
227 int ret = 0;
228 int level;
229 struct btrfs_disk_key disk_key;
231 WARN_ON(root->ref_cows && trans->transid !=
232 root->fs_info->running_transaction->transid);
233 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
235 level = btrfs_header_level(buf);
236 if (level == 0)
237 btrfs_item_key(buf, &disk_key, 0);
238 else
239 btrfs_node_key(buf, &disk_key, 0);
241 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
242 new_root_objectid, &disk_key, level,
243 buf->start, 0, 1);
244 if (IS_ERR(cow))
245 return PTR_ERR(cow);
247 copy_extent_buffer(cow, buf, 0, 0, cow->len);
248 btrfs_set_header_bytenr(cow, cow->start);
249 btrfs_set_header_generation(cow, trans->transid);
250 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
251 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
252 BTRFS_HEADER_FLAG_RELOC);
253 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
254 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
255 else
256 btrfs_set_header_owner(cow, new_root_objectid);
258 write_extent_buffer(cow, root->fs_info->fsid,
259 (unsigned long)btrfs_header_fsid(cow),
260 BTRFS_FSID_SIZE);
262 WARN_ON(btrfs_header_generation(buf) > trans->transid);
263 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
264 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
265 else
266 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
268 if (ret)
269 return ret;
271 btrfs_mark_buffer_dirty(cow);
272 *cow_ret = cow;
273 return 0;
277 * check if the tree block can be shared by multiple trees
279 int btrfs_block_can_be_shared(struct btrfs_root *root,
280 struct extent_buffer *buf)
283 * Tree blocks not in refernece counted trees and tree roots
284 * are never shared. If a block was allocated after the last
285 * snapshot and the block was not allocated by tree relocation,
286 * we know the block is not shared.
288 if (root->ref_cows &&
289 buf != root->node && buf != root->commit_root &&
290 (btrfs_header_generation(buf) <=
291 btrfs_root_last_snapshot(&root->root_item) ||
292 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
293 return 1;
294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
295 if (root->ref_cows &&
296 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
297 return 1;
298 #endif
299 return 0;
302 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
303 struct btrfs_root *root,
304 struct extent_buffer *buf,
305 struct extent_buffer *cow,
306 int *last_ref)
308 u64 refs;
309 u64 owner;
310 u64 flags;
311 u64 new_flags = 0;
312 int ret;
315 * Backrefs update rules:
317 * Always use full backrefs for extent pointers in tree block
318 * allocated by tree relocation.
320 * If a shared tree block is no longer referenced by its owner
321 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
322 * use full backrefs for extent pointers in tree block.
324 * If a tree block is been relocating
325 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
326 * use full backrefs for extent pointers in tree block.
327 * The reason for this is some operations (such as drop tree)
328 * are only allowed for blocks use full backrefs.
331 if (btrfs_block_can_be_shared(root, buf)) {
332 ret = btrfs_lookup_extent_info(trans, root, buf->start,
333 buf->len, &refs, &flags);
334 BUG_ON(ret);
335 BUG_ON(refs == 0);
336 } else {
337 refs = 1;
338 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
339 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
340 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
341 else
342 flags = 0;
345 owner = btrfs_header_owner(buf);
346 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
347 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
349 if (refs > 1) {
350 if ((owner == root->root_key.objectid ||
351 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
352 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
353 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
354 BUG_ON(ret);
356 if (root->root_key.objectid ==
357 BTRFS_TREE_RELOC_OBJECTID) {
358 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
359 BUG_ON(ret);
360 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
361 BUG_ON(ret);
363 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
364 } else {
366 if (root->root_key.objectid ==
367 BTRFS_TREE_RELOC_OBJECTID)
368 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
369 else
370 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
371 BUG_ON(ret);
373 if (new_flags != 0) {
374 ret = btrfs_set_disk_extent_flags(trans, root,
375 buf->start,
376 buf->len,
377 new_flags, 0);
378 BUG_ON(ret);
380 } else {
381 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
382 if (root->root_key.objectid ==
383 BTRFS_TREE_RELOC_OBJECTID)
384 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
385 else
386 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
387 BUG_ON(ret);
388 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
389 BUG_ON(ret);
391 clean_tree_block(trans, root, buf);
392 *last_ref = 1;
394 return 0;
398 * does the dirty work in cow of a single block. The parent block (if
399 * supplied) is updated to point to the new cow copy. The new buffer is marked
400 * dirty and returned locked. If you modify the block it needs to be marked
401 * dirty again.
403 * search_start -- an allocation hint for the new block
405 * empty_size -- a hint that you plan on doing more cow. This is the size in
406 * bytes the allocator should try to find free next to the block it returns.
407 * This is just a hint and may be ignored by the allocator.
409 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
410 struct btrfs_root *root,
411 struct extent_buffer *buf,
412 struct extent_buffer *parent, int parent_slot,
413 struct extent_buffer **cow_ret,
414 u64 search_start, u64 empty_size)
416 struct btrfs_disk_key disk_key;
417 struct extent_buffer *cow;
418 int level;
419 int last_ref = 0;
420 int unlock_orig = 0;
421 u64 parent_start;
423 if (*cow_ret == buf)
424 unlock_orig = 1;
426 btrfs_assert_tree_locked(buf);
428 WARN_ON(root->ref_cows && trans->transid !=
429 root->fs_info->running_transaction->transid);
430 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
432 level = btrfs_header_level(buf);
434 if (level == 0)
435 btrfs_item_key(buf, &disk_key, 0);
436 else
437 btrfs_node_key(buf, &disk_key, 0);
439 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
440 if (parent)
441 parent_start = parent->start;
442 else
443 parent_start = 0;
444 } else
445 parent_start = 0;
447 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
448 root->root_key.objectid, &disk_key,
449 level, search_start, empty_size, 1);
450 if (IS_ERR(cow))
451 return PTR_ERR(cow);
453 /* cow is set to blocking by btrfs_init_new_buffer */
455 copy_extent_buffer(cow, buf, 0, 0, cow->len);
456 btrfs_set_header_bytenr(cow, cow->start);
457 btrfs_set_header_generation(cow, trans->transid);
458 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
459 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
460 BTRFS_HEADER_FLAG_RELOC);
461 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
462 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
463 else
464 btrfs_set_header_owner(cow, root->root_key.objectid);
466 write_extent_buffer(cow, root->fs_info->fsid,
467 (unsigned long)btrfs_header_fsid(cow),
468 BTRFS_FSID_SIZE);
470 update_ref_for_cow(trans, root, buf, cow, &last_ref);
472 if (root->ref_cows)
473 btrfs_reloc_cow_block(trans, root, buf, cow);
475 if (buf == root->node) {
476 WARN_ON(parent && parent != buf);
477 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
478 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
479 parent_start = buf->start;
480 else
481 parent_start = 0;
483 extent_buffer_get(cow);
484 rcu_assign_pointer(root->node, cow);
486 btrfs_free_tree_block(trans, root, buf, parent_start,
487 last_ref, 1);
488 free_extent_buffer(buf);
489 add_root_to_dirty_list(root);
490 } else {
491 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
492 parent_start = parent->start;
493 else
494 parent_start = 0;
496 WARN_ON(trans->transid != btrfs_header_generation(parent));
497 btrfs_set_node_blockptr(parent, parent_slot,
498 cow->start);
499 btrfs_set_node_ptr_generation(parent, parent_slot,
500 trans->transid);
501 btrfs_mark_buffer_dirty(parent);
502 btrfs_free_tree_block(trans, root, buf, parent_start,
503 last_ref, 1);
505 if (unlock_orig)
506 btrfs_tree_unlock(buf);
507 free_extent_buffer(buf);
508 btrfs_mark_buffer_dirty(cow);
509 *cow_ret = cow;
510 return 0;
513 static inline int should_cow_block(struct btrfs_trans_handle *trans,
514 struct btrfs_root *root,
515 struct extent_buffer *buf)
517 /* ensure we can see the force_cow */
518 smp_rmb();
521 * We do not need to cow a block if
522 * 1) this block is not created or changed in this transaction;
523 * 2) this block does not belong to TREE_RELOC tree;
524 * 3) the root is not forced COW.
526 * What is forced COW:
527 * when we create snapshot during commiting the transaction,
528 * after we've finished coping src root, we must COW the shared
529 * block to ensure the metadata consistency.
531 if (btrfs_header_generation(buf) == trans->transid &&
532 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
533 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
534 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
535 !root->force_cow)
536 return 0;
537 return 1;
541 * cows a single block, see __btrfs_cow_block for the real work.
542 * This version of it has extra checks so that a block isn't cow'd more than
543 * once per transaction, as long as it hasn't been written yet
545 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
546 struct btrfs_root *root, struct extent_buffer *buf,
547 struct extent_buffer *parent, int parent_slot,
548 struct extent_buffer **cow_ret)
550 u64 search_start;
551 int ret;
553 if (trans->transaction != root->fs_info->running_transaction) {
554 printk(KERN_CRIT "trans %llu running %llu\n",
555 (unsigned long long)trans->transid,
556 (unsigned long long)
557 root->fs_info->running_transaction->transid);
558 WARN_ON(1);
560 if (trans->transid != root->fs_info->generation) {
561 printk(KERN_CRIT "trans %llu running %llu\n",
562 (unsigned long long)trans->transid,
563 (unsigned long long)root->fs_info->generation);
564 WARN_ON(1);
567 if (!should_cow_block(trans, root, buf)) {
568 *cow_ret = buf;
569 return 0;
572 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
574 if (parent)
575 btrfs_set_lock_blocking(parent);
576 btrfs_set_lock_blocking(buf);
578 ret = __btrfs_cow_block(trans, root, buf, parent,
579 parent_slot, cow_ret, search_start, 0);
581 trace_btrfs_cow_block(root, buf, *cow_ret);
583 return ret;
587 * helper function for defrag to decide if two blocks pointed to by a
588 * node are actually close by
590 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
592 if (blocknr < other && other - (blocknr + blocksize) < 32768)
593 return 1;
594 if (blocknr > other && blocknr - (other + blocksize) < 32768)
595 return 1;
596 return 0;
600 * compare two keys in a memcmp fashion
602 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
604 struct btrfs_key k1;
606 btrfs_disk_key_to_cpu(&k1, disk);
608 return btrfs_comp_cpu_keys(&k1, k2);
612 * same as comp_keys only with two btrfs_key's
614 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
616 if (k1->objectid > k2->objectid)
617 return 1;
618 if (k1->objectid < k2->objectid)
619 return -1;
620 if (k1->type > k2->type)
621 return 1;
622 if (k1->type < k2->type)
623 return -1;
624 if (k1->offset > k2->offset)
625 return 1;
626 if (k1->offset < k2->offset)
627 return -1;
628 return 0;
632 * this is used by the defrag code to go through all the
633 * leaves pointed to by a node and reallocate them so that
634 * disk order is close to key order
636 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
637 struct btrfs_root *root, struct extent_buffer *parent,
638 int start_slot, int cache_only, u64 *last_ret,
639 struct btrfs_key *progress)
641 struct extent_buffer *cur;
642 u64 blocknr;
643 u64 gen;
644 u64 search_start = *last_ret;
645 u64 last_block = 0;
646 u64 other;
647 u32 parent_nritems;
648 int end_slot;
649 int i;
650 int err = 0;
651 int parent_level;
652 int uptodate;
653 u32 blocksize;
654 int progress_passed = 0;
655 struct btrfs_disk_key disk_key;
657 parent_level = btrfs_header_level(parent);
658 if (cache_only && parent_level != 1)
659 return 0;
661 if (trans->transaction != root->fs_info->running_transaction)
662 WARN_ON(1);
663 if (trans->transid != root->fs_info->generation)
664 WARN_ON(1);
666 parent_nritems = btrfs_header_nritems(parent);
667 blocksize = btrfs_level_size(root, parent_level - 1);
668 end_slot = parent_nritems;
670 if (parent_nritems == 1)
671 return 0;
673 btrfs_set_lock_blocking(parent);
675 for (i = start_slot; i < end_slot; i++) {
676 int close = 1;
678 btrfs_node_key(parent, &disk_key, i);
679 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
680 continue;
682 progress_passed = 1;
683 blocknr = btrfs_node_blockptr(parent, i);
684 gen = btrfs_node_ptr_generation(parent, i);
685 if (last_block == 0)
686 last_block = blocknr;
688 if (i > 0) {
689 other = btrfs_node_blockptr(parent, i - 1);
690 close = close_blocks(blocknr, other, blocksize);
692 if (!close && i < end_slot - 2) {
693 other = btrfs_node_blockptr(parent, i + 1);
694 close = close_blocks(blocknr, other, blocksize);
696 if (close) {
697 last_block = blocknr;
698 continue;
701 cur = btrfs_find_tree_block(root, blocknr, blocksize);
702 if (cur)
703 uptodate = btrfs_buffer_uptodate(cur, gen);
704 else
705 uptodate = 0;
706 if (!cur || !uptodate) {
707 if (cache_only) {
708 free_extent_buffer(cur);
709 continue;
711 if (!cur) {
712 cur = read_tree_block(root, blocknr,
713 blocksize, gen);
714 if (!cur)
715 return -EIO;
716 } else if (!uptodate) {
717 btrfs_read_buffer(cur, gen);
720 if (search_start == 0)
721 search_start = last_block;
723 btrfs_tree_lock(cur);
724 btrfs_set_lock_blocking(cur);
725 err = __btrfs_cow_block(trans, root, cur, parent, i,
726 &cur, search_start,
727 min(16 * blocksize,
728 (end_slot - i) * blocksize));
729 if (err) {
730 btrfs_tree_unlock(cur);
731 free_extent_buffer(cur);
732 break;
734 search_start = cur->start;
735 last_block = cur->start;
736 *last_ret = search_start;
737 btrfs_tree_unlock(cur);
738 free_extent_buffer(cur);
740 return err;
744 * The leaf data grows from end-to-front in the node.
745 * this returns the address of the start of the last item,
746 * which is the stop of the leaf data stack
748 static inline unsigned int leaf_data_end(struct btrfs_root *root,
749 struct extent_buffer *leaf)
751 u32 nr = btrfs_header_nritems(leaf);
752 if (nr == 0)
753 return BTRFS_LEAF_DATA_SIZE(root);
754 return btrfs_item_offset_nr(leaf, nr - 1);
759 * search for key in the extent_buffer. The items start at offset p,
760 * and they are item_size apart. There are 'max' items in p.
762 * the slot in the array is returned via slot, and it points to
763 * the place where you would insert key if it is not found in
764 * the array.
766 * slot may point to max if the key is bigger than all of the keys
768 static noinline int generic_bin_search(struct extent_buffer *eb,
769 unsigned long p,
770 int item_size, struct btrfs_key *key,
771 int max, int *slot)
773 int low = 0;
774 int high = max;
775 int mid;
776 int ret;
777 struct btrfs_disk_key *tmp = NULL;
778 struct btrfs_disk_key unaligned;
779 unsigned long offset;
780 char *kaddr = NULL;
781 unsigned long map_start = 0;
782 unsigned long map_len = 0;
783 int err;
785 while (low < high) {
786 mid = (low + high) / 2;
787 offset = p + mid * item_size;
789 if (!kaddr || offset < map_start ||
790 (offset + sizeof(struct btrfs_disk_key)) >
791 map_start + map_len) {
793 err = map_private_extent_buffer(eb, offset,
794 sizeof(struct btrfs_disk_key),
795 &kaddr, &map_start, &map_len);
797 if (!err) {
798 tmp = (struct btrfs_disk_key *)(kaddr + offset -
799 map_start);
800 } else {
801 read_extent_buffer(eb, &unaligned,
802 offset, sizeof(unaligned));
803 tmp = &unaligned;
806 } else {
807 tmp = (struct btrfs_disk_key *)(kaddr + offset -
808 map_start);
810 ret = comp_keys(tmp, key);
812 if (ret < 0)
813 low = mid + 1;
814 else if (ret > 0)
815 high = mid;
816 else {
817 *slot = mid;
818 return 0;
821 *slot = low;
822 return 1;
826 * simple bin_search frontend that does the right thing for
827 * leaves vs nodes
829 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
830 int level, int *slot)
832 if (level == 0) {
833 return generic_bin_search(eb,
834 offsetof(struct btrfs_leaf, items),
835 sizeof(struct btrfs_item),
836 key, btrfs_header_nritems(eb),
837 slot);
838 } else {
839 return generic_bin_search(eb,
840 offsetof(struct btrfs_node, ptrs),
841 sizeof(struct btrfs_key_ptr),
842 key, btrfs_header_nritems(eb),
843 slot);
845 return -1;
848 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
849 int level, int *slot)
851 return bin_search(eb, key, level, slot);
854 static void root_add_used(struct btrfs_root *root, u32 size)
856 spin_lock(&root->accounting_lock);
857 btrfs_set_root_used(&root->root_item,
858 btrfs_root_used(&root->root_item) + size);
859 spin_unlock(&root->accounting_lock);
862 static void root_sub_used(struct btrfs_root *root, u32 size)
864 spin_lock(&root->accounting_lock);
865 btrfs_set_root_used(&root->root_item,
866 btrfs_root_used(&root->root_item) - size);
867 spin_unlock(&root->accounting_lock);
870 /* given a node and slot number, this reads the blocks it points to. The
871 * extent buffer is returned with a reference taken (but unlocked).
872 * NULL is returned on error.
874 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
875 struct extent_buffer *parent, int slot)
877 int level = btrfs_header_level(parent);
878 if (slot < 0)
879 return NULL;
880 if (slot >= btrfs_header_nritems(parent))
881 return NULL;
883 BUG_ON(level == 0);
885 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
886 btrfs_level_size(root, level - 1),
887 btrfs_node_ptr_generation(parent, slot));
891 * node level balancing, used to make sure nodes are in proper order for
892 * item deletion. We balance from the top down, so we have to make sure
893 * that a deletion won't leave an node completely empty later on.
895 static noinline int balance_level(struct btrfs_trans_handle *trans,
896 struct btrfs_root *root,
897 struct btrfs_path *path, int level)
899 struct extent_buffer *right = NULL;
900 struct extent_buffer *mid;
901 struct extent_buffer *left = NULL;
902 struct extent_buffer *parent = NULL;
903 int ret = 0;
904 int wret;
905 int pslot;
906 int orig_slot = path->slots[level];
907 u64 orig_ptr;
909 if (level == 0)
910 return 0;
912 mid = path->nodes[level];
914 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
915 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
916 WARN_ON(btrfs_header_generation(mid) != trans->transid);
918 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
920 if (level < BTRFS_MAX_LEVEL - 1) {
921 parent = path->nodes[level + 1];
922 pslot = path->slots[level + 1];
926 * deal with the case where there is only one pointer in the root
927 * by promoting the node below to a root
929 if (!parent) {
930 struct extent_buffer *child;
932 if (btrfs_header_nritems(mid) != 1)
933 return 0;
935 /* promote the child to a root */
936 child = read_node_slot(root, mid, 0);
937 BUG_ON(!child);
938 btrfs_tree_lock(child);
939 btrfs_set_lock_blocking(child);
940 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
941 if (ret) {
942 btrfs_tree_unlock(child);
943 free_extent_buffer(child);
944 goto enospc;
947 rcu_assign_pointer(root->node, child);
949 add_root_to_dirty_list(root);
950 btrfs_tree_unlock(child);
952 path->locks[level] = 0;
953 path->nodes[level] = NULL;
954 clean_tree_block(trans, root, mid);
955 btrfs_tree_unlock(mid);
956 /* once for the path */
957 free_extent_buffer(mid);
959 root_sub_used(root, mid->len);
960 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
961 /* once for the root ptr */
962 free_extent_buffer(mid);
963 return 0;
965 if (btrfs_header_nritems(mid) >
966 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
967 return 0;
969 btrfs_header_nritems(mid);
971 left = read_node_slot(root, parent, pslot - 1);
972 if (left) {
973 btrfs_tree_lock(left);
974 btrfs_set_lock_blocking(left);
975 wret = btrfs_cow_block(trans, root, left,
976 parent, pslot - 1, &left);
977 if (wret) {
978 ret = wret;
979 goto enospc;
982 right = read_node_slot(root, parent, pslot + 1);
983 if (right) {
984 btrfs_tree_lock(right);
985 btrfs_set_lock_blocking(right);
986 wret = btrfs_cow_block(trans, root, right,
987 parent, pslot + 1, &right);
988 if (wret) {
989 ret = wret;
990 goto enospc;
994 /* first, try to make some room in the middle buffer */
995 if (left) {
996 orig_slot += btrfs_header_nritems(left);
997 wret = push_node_left(trans, root, left, mid, 1);
998 if (wret < 0)
999 ret = wret;
1000 btrfs_header_nritems(mid);
1004 * then try to empty the right most buffer into the middle
1006 if (right) {
1007 wret = push_node_left(trans, root, mid, right, 1);
1008 if (wret < 0 && wret != -ENOSPC)
1009 ret = wret;
1010 if (btrfs_header_nritems(right) == 0) {
1011 clean_tree_block(trans, root, right);
1012 btrfs_tree_unlock(right);
1013 wret = del_ptr(trans, root, path, level + 1, pslot +
1015 if (wret)
1016 ret = wret;
1017 root_sub_used(root, right->len);
1018 btrfs_free_tree_block(trans, root, right, 0, 1, 0);
1019 free_extent_buffer(right);
1020 right = NULL;
1021 } else {
1022 struct btrfs_disk_key right_key;
1023 btrfs_node_key(right, &right_key, 0);
1024 btrfs_set_node_key(parent, &right_key, pslot + 1);
1025 btrfs_mark_buffer_dirty(parent);
1028 if (btrfs_header_nritems(mid) == 1) {
1030 * we're not allowed to leave a node with one item in the
1031 * tree during a delete. A deletion from lower in the tree
1032 * could try to delete the only pointer in this node.
1033 * So, pull some keys from the left.
1034 * There has to be a left pointer at this point because
1035 * otherwise we would have pulled some pointers from the
1036 * right
1038 BUG_ON(!left);
1039 wret = balance_node_right(trans, root, mid, left);
1040 if (wret < 0) {
1041 ret = wret;
1042 goto enospc;
1044 if (wret == 1) {
1045 wret = push_node_left(trans, root, left, mid, 1);
1046 if (wret < 0)
1047 ret = wret;
1049 BUG_ON(wret == 1);
1051 if (btrfs_header_nritems(mid) == 0) {
1052 clean_tree_block(trans, root, mid);
1053 btrfs_tree_unlock(mid);
1054 wret = del_ptr(trans, root, path, level + 1, pslot);
1055 if (wret)
1056 ret = wret;
1057 root_sub_used(root, mid->len);
1058 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
1059 free_extent_buffer(mid);
1060 mid = NULL;
1061 } else {
1062 /* update the parent key to reflect our changes */
1063 struct btrfs_disk_key mid_key;
1064 btrfs_node_key(mid, &mid_key, 0);
1065 btrfs_set_node_key(parent, &mid_key, pslot);
1066 btrfs_mark_buffer_dirty(parent);
1069 /* update the path */
1070 if (left) {
1071 if (btrfs_header_nritems(left) > orig_slot) {
1072 extent_buffer_get(left);
1073 /* left was locked after cow */
1074 path->nodes[level] = left;
1075 path->slots[level + 1] -= 1;
1076 path->slots[level] = orig_slot;
1077 if (mid) {
1078 btrfs_tree_unlock(mid);
1079 free_extent_buffer(mid);
1081 } else {
1082 orig_slot -= btrfs_header_nritems(left);
1083 path->slots[level] = orig_slot;
1086 /* double check we haven't messed things up */
1087 if (orig_ptr !=
1088 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1089 BUG();
1090 enospc:
1091 if (right) {
1092 btrfs_tree_unlock(right);
1093 free_extent_buffer(right);
1095 if (left) {
1096 if (path->nodes[level] != left)
1097 btrfs_tree_unlock(left);
1098 free_extent_buffer(left);
1100 return ret;
1103 /* Node balancing for insertion. Here we only split or push nodes around
1104 * when they are completely full. This is also done top down, so we
1105 * have to be pessimistic.
1107 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1108 struct btrfs_root *root,
1109 struct btrfs_path *path, int level)
1111 struct extent_buffer *right = NULL;
1112 struct extent_buffer *mid;
1113 struct extent_buffer *left = NULL;
1114 struct extent_buffer *parent = NULL;
1115 int ret = 0;
1116 int wret;
1117 int pslot;
1118 int orig_slot = path->slots[level];
1120 if (level == 0)
1121 return 1;
1123 mid = path->nodes[level];
1124 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1126 if (level < BTRFS_MAX_LEVEL - 1) {
1127 parent = path->nodes[level + 1];
1128 pslot = path->slots[level + 1];
1131 if (!parent)
1132 return 1;
1134 left = read_node_slot(root, parent, pslot - 1);
1136 /* first, try to make some room in the middle buffer */
1137 if (left) {
1138 u32 left_nr;
1140 btrfs_tree_lock(left);
1141 btrfs_set_lock_blocking(left);
1143 left_nr = btrfs_header_nritems(left);
1144 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1145 wret = 1;
1146 } else {
1147 ret = btrfs_cow_block(trans, root, left, parent,
1148 pslot - 1, &left);
1149 if (ret)
1150 wret = 1;
1151 else {
1152 wret = push_node_left(trans, root,
1153 left, mid, 0);
1156 if (wret < 0)
1157 ret = wret;
1158 if (wret == 0) {
1159 struct btrfs_disk_key disk_key;
1160 orig_slot += left_nr;
1161 btrfs_node_key(mid, &disk_key, 0);
1162 btrfs_set_node_key(parent, &disk_key, pslot);
1163 btrfs_mark_buffer_dirty(parent);
1164 if (btrfs_header_nritems(left) > orig_slot) {
1165 path->nodes[level] = left;
1166 path->slots[level + 1] -= 1;
1167 path->slots[level] = orig_slot;
1168 btrfs_tree_unlock(mid);
1169 free_extent_buffer(mid);
1170 } else {
1171 orig_slot -=
1172 btrfs_header_nritems(left);
1173 path->slots[level] = orig_slot;
1174 btrfs_tree_unlock(left);
1175 free_extent_buffer(left);
1177 return 0;
1179 btrfs_tree_unlock(left);
1180 free_extent_buffer(left);
1182 right = read_node_slot(root, parent, pslot + 1);
1185 * then try to empty the right most buffer into the middle
1187 if (right) {
1188 u32 right_nr;
1190 btrfs_tree_lock(right);
1191 btrfs_set_lock_blocking(right);
1193 right_nr = btrfs_header_nritems(right);
1194 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1195 wret = 1;
1196 } else {
1197 ret = btrfs_cow_block(trans, root, right,
1198 parent, pslot + 1,
1199 &right);
1200 if (ret)
1201 wret = 1;
1202 else {
1203 wret = balance_node_right(trans, root,
1204 right, mid);
1207 if (wret < 0)
1208 ret = wret;
1209 if (wret == 0) {
1210 struct btrfs_disk_key disk_key;
1212 btrfs_node_key(right, &disk_key, 0);
1213 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1214 btrfs_mark_buffer_dirty(parent);
1216 if (btrfs_header_nritems(mid) <= orig_slot) {
1217 path->nodes[level] = right;
1218 path->slots[level + 1] += 1;
1219 path->slots[level] = orig_slot -
1220 btrfs_header_nritems(mid);
1221 btrfs_tree_unlock(mid);
1222 free_extent_buffer(mid);
1223 } else {
1224 btrfs_tree_unlock(right);
1225 free_extent_buffer(right);
1227 return 0;
1229 btrfs_tree_unlock(right);
1230 free_extent_buffer(right);
1232 return 1;
1236 * readahead one full node of leaves, finding things that are close
1237 * to the block in 'slot', and triggering ra on them.
1239 static void reada_for_search(struct btrfs_root *root,
1240 struct btrfs_path *path,
1241 int level, int slot, u64 objectid)
1243 struct extent_buffer *node;
1244 struct btrfs_disk_key disk_key;
1245 u32 nritems;
1246 u64 search;
1247 u64 target;
1248 u64 nread = 0;
1249 u64 gen;
1250 int direction = path->reada;
1251 struct extent_buffer *eb;
1252 u32 nr;
1253 u32 blocksize;
1254 u32 nscan = 0;
1256 if (level != 1)
1257 return;
1259 if (!path->nodes[level])
1260 return;
1262 node = path->nodes[level];
1264 search = btrfs_node_blockptr(node, slot);
1265 blocksize = btrfs_level_size(root, level - 1);
1266 eb = btrfs_find_tree_block(root, search, blocksize);
1267 if (eb) {
1268 free_extent_buffer(eb);
1269 return;
1272 target = search;
1274 nritems = btrfs_header_nritems(node);
1275 nr = slot;
1277 while (1) {
1278 if (direction < 0) {
1279 if (nr == 0)
1280 break;
1281 nr--;
1282 } else if (direction > 0) {
1283 nr++;
1284 if (nr >= nritems)
1285 break;
1287 if (path->reada < 0 && objectid) {
1288 btrfs_node_key(node, &disk_key, nr);
1289 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1290 break;
1292 search = btrfs_node_blockptr(node, nr);
1293 if ((search <= target && target - search <= 65536) ||
1294 (search > target && search - target <= 65536)) {
1295 gen = btrfs_node_ptr_generation(node, nr);
1296 readahead_tree_block(root, search, blocksize, gen);
1297 nread += blocksize;
1299 nscan++;
1300 if ((nread > 65536 || nscan > 32))
1301 break;
1306 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1307 * cache
1309 static noinline int reada_for_balance(struct btrfs_root *root,
1310 struct btrfs_path *path, int level)
1312 int slot;
1313 int nritems;
1314 struct extent_buffer *parent;
1315 struct extent_buffer *eb;
1316 u64 gen;
1317 u64 block1 = 0;
1318 u64 block2 = 0;
1319 int ret = 0;
1320 int blocksize;
1322 parent = path->nodes[level + 1];
1323 if (!parent)
1324 return 0;
1326 nritems = btrfs_header_nritems(parent);
1327 slot = path->slots[level + 1];
1328 blocksize = btrfs_level_size(root, level);
1330 if (slot > 0) {
1331 block1 = btrfs_node_blockptr(parent, slot - 1);
1332 gen = btrfs_node_ptr_generation(parent, slot - 1);
1333 eb = btrfs_find_tree_block(root, block1, blocksize);
1334 if (eb && btrfs_buffer_uptodate(eb, gen))
1335 block1 = 0;
1336 free_extent_buffer(eb);
1338 if (slot + 1 < nritems) {
1339 block2 = btrfs_node_blockptr(parent, slot + 1);
1340 gen = btrfs_node_ptr_generation(parent, slot + 1);
1341 eb = btrfs_find_tree_block(root, block2, blocksize);
1342 if (eb && btrfs_buffer_uptodate(eb, gen))
1343 block2 = 0;
1344 free_extent_buffer(eb);
1346 if (block1 || block2) {
1347 ret = -EAGAIN;
1349 /* release the whole path */
1350 btrfs_release_path(path);
1352 /* read the blocks */
1353 if (block1)
1354 readahead_tree_block(root, block1, blocksize, 0);
1355 if (block2)
1356 readahead_tree_block(root, block2, blocksize, 0);
1358 if (block1) {
1359 eb = read_tree_block(root, block1, blocksize, 0);
1360 free_extent_buffer(eb);
1362 if (block2) {
1363 eb = read_tree_block(root, block2, blocksize, 0);
1364 free_extent_buffer(eb);
1367 return ret;
1372 * when we walk down the tree, it is usually safe to unlock the higher layers
1373 * in the tree. The exceptions are when our path goes through slot 0, because
1374 * operations on the tree might require changing key pointers higher up in the
1375 * tree.
1377 * callers might also have set path->keep_locks, which tells this code to keep
1378 * the lock if the path points to the last slot in the block. This is part of
1379 * walking through the tree, and selecting the next slot in the higher block.
1381 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1382 * if lowest_unlock is 1, level 0 won't be unlocked
1384 static noinline void unlock_up(struct btrfs_path *path, int level,
1385 int lowest_unlock)
1387 int i;
1388 int skip_level = level;
1389 int no_skips = 0;
1390 struct extent_buffer *t;
1392 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1393 if (!path->nodes[i])
1394 break;
1395 if (!path->locks[i])
1396 break;
1397 if (!no_skips && path->slots[i] == 0) {
1398 skip_level = i + 1;
1399 continue;
1401 if (!no_skips && path->keep_locks) {
1402 u32 nritems;
1403 t = path->nodes[i];
1404 nritems = btrfs_header_nritems(t);
1405 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1406 skip_level = i + 1;
1407 continue;
1410 if (skip_level < i && i >= lowest_unlock)
1411 no_skips = 1;
1413 t = path->nodes[i];
1414 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1415 btrfs_tree_unlock_rw(t, path->locks[i]);
1416 path->locks[i] = 0;
1422 * This releases any locks held in the path starting at level and
1423 * going all the way up to the root.
1425 * btrfs_search_slot will keep the lock held on higher nodes in a few
1426 * corner cases, such as COW of the block at slot zero in the node. This
1427 * ignores those rules, and it should only be called when there are no
1428 * more updates to be done higher up in the tree.
1430 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1432 int i;
1434 if (path->keep_locks)
1435 return;
1437 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1438 if (!path->nodes[i])
1439 continue;
1440 if (!path->locks[i])
1441 continue;
1442 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1443 path->locks[i] = 0;
1448 * helper function for btrfs_search_slot. The goal is to find a block
1449 * in cache without setting the path to blocking. If we find the block
1450 * we return zero and the path is unchanged.
1452 * If we can't find the block, we set the path blocking and do some
1453 * reada. -EAGAIN is returned and the search must be repeated.
1455 static int
1456 read_block_for_search(struct btrfs_trans_handle *trans,
1457 struct btrfs_root *root, struct btrfs_path *p,
1458 struct extent_buffer **eb_ret, int level, int slot,
1459 struct btrfs_key *key)
1461 u64 blocknr;
1462 u64 gen;
1463 u32 blocksize;
1464 struct extent_buffer *b = *eb_ret;
1465 struct extent_buffer *tmp;
1466 int ret;
1468 blocknr = btrfs_node_blockptr(b, slot);
1469 gen = btrfs_node_ptr_generation(b, slot);
1470 blocksize = btrfs_level_size(root, level - 1);
1472 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1473 if (tmp) {
1474 if (btrfs_buffer_uptodate(tmp, 0)) {
1475 if (btrfs_buffer_uptodate(tmp, gen)) {
1477 * we found an up to date block without
1478 * sleeping, return
1479 * right away
1481 *eb_ret = tmp;
1482 return 0;
1484 /* the pages were up to date, but we failed
1485 * the generation number check. Do a full
1486 * read for the generation number that is correct.
1487 * We must do this without dropping locks so
1488 * we can trust our generation number
1490 free_extent_buffer(tmp);
1491 btrfs_set_path_blocking(p);
1493 tmp = read_tree_block(root, blocknr, blocksize, gen);
1494 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1495 *eb_ret = tmp;
1496 return 0;
1498 free_extent_buffer(tmp);
1499 btrfs_release_path(p);
1500 return -EIO;
1505 * reduce lock contention at high levels
1506 * of the btree by dropping locks before
1507 * we read. Don't release the lock on the current
1508 * level because we need to walk this node to figure
1509 * out which blocks to read.
1511 btrfs_unlock_up_safe(p, level + 1);
1512 btrfs_set_path_blocking(p);
1514 free_extent_buffer(tmp);
1515 if (p->reada)
1516 reada_for_search(root, p, level, slot, key->objectid);
1518 btrfs_release_path(p);
1520 ret = -EAGAIN;
1521 tmp = read_tree_block(root, blocknr, blocksize, 0);
1522 if (tmp) {
1524 * If the read above didn't mark this buffer up to date,
1525 * it will never end up being up to date. Set ret to EIO now
1526 * and give up so that our caller doesn't loop forever
1527 * on our EAGAINs.
1529 if (!btrfs_buffer_uptodate(tmp, 0))
1530 ret = -EIO;
1531 free_extent_buffer(tmp);
1533 return ret;
1537 * helper function for btrfs_search_slot. This does all of the checks
1538 * for node-level blocks and does any balancing required based on
1539 * the ins_len.
1541 * If no extra work was required, zero is returned. If we had to
1542 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1543 * start over
1545 static int
1546 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1547 struct btrfs_root *root, struct btrfs_path *p,
1548 struct extent_buffer *b, int level, int ins_len,
1549 int *write_lock_level)
1551 int ret;
1552 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1553 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1554 int sret;
1556 if (*write_lock_level < level + 1) {
1557 *write_lock_level = level + 1;
1558 btrfs_release_path(p);
1559 goto again;
1562 sret = reada_for_balance(root, p, level);
1563 if (sret)
1564 goto again;
1566 btrfs_set_path_blocking(p);
1567 sret = split_node(trans, root, p, level);
1568 btrfs_clear_path_blocking(p, NULL, 0);
1570 BUG_ON(sret > 0);
1571 if (sret) {
1572 ret = sret;
1573 goto done;
1575 b = p->nodes[level];
1576 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1577 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
1578 int sret;
1580 if (*write_lock_level < level + 1) {
1581 *write_lock_level = level + 1;
1582 btrfs_release_path(p);
1583 goto again;
1586 sret = reada_for_balance(root, p, level);
1587 if (sret)
1588 goto again;
1590 btrfs_set_path_blocking(p);
1591 sret = balance_level(trans, root, p, level);
1592 btrfs_clear_path_blocking(p, NULL, 0);
1594 if (sret) {
1595 ret = sret;
1596 goto done;
1598 b = p->nodes[level];
1599 if (!b) {
1600 btrfs_release_path(p);
1601 goto again;
1603 BUG_ON(btrfs_header_nritems(b) == 1);
1605 return 0;
1607 again:
1608 ret = -EAGAIN;
1609 done:
1610 return ret;
1614 * look for key in the tree. path is filled in with nodes along the way
1615 * if key is found, we return zero and you can find the item in the leaf
1616 * level of the path (level 0)
1618 * If the key isn't found, the path points to the slot where it should
1619 * be inserted, and 1 is returned. If there are other errors during the
1620 * search a negative error number is returned.
1622 * if ins_len > 0, nodes and leaves will be split as we walk down the
1623 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1624 * possible)
1626 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1627 *root, struct btrfs_key *key, struct btrfs_path *p, int
1628 ins_len, int cow)
1630 struct extent_buffer *b;
1631 int slot;
1632 int ret;
1633 int err;
1634 int level;
1635 int lowest_unlock = 1;
1636 int root_lock;
1637 /* everything at write_lock_level or lower must be write locked */
1638 int write_lock_level = 0;
1639 u8 lowest_level = 0;
1641 lowest_level = p->lowest_level;
1642 WARN_ON(lowest_level && ins_len > 0);
1643 WARN_ON(p->nodes[0] != NULL);
1645 if (ins_len < 0) {
1646 lowest_unlock = 2;
1648 /* when we are removing items, we might have to go up to level
1649 * two as we update tree pointers Make sure we keep write
1650 * for those levels as well
1652 write_lock_level = 2;
1653 } else if (ins_len > 0) {
1655 * for inserting items, make sure we have a write lock on
1656 * level 1 so we can update keys
1658 write_lock_level = 1;
1661 if (!cow)
1662 write_lock_level = -1;
1664 if (cow && (p->keep_locks || p->lowest_level))
1665 write_lock_level = BTRFS_MAX_LEVEL;
1667 again:
1669 * we try very hard to do read locks on the root
1671 root_lock = BTRFS_READ_LOCK;
1672 level = 0;
1673 if (p->search_commit_root) {
1675 * the commit roots are read only
1676 * so we always do read locks
1678 b = root->commit_root;
1679 extent_buffer_get(b);
1680 level = btrfs_header_level(b);
1681 if (!p->skip_locking)
1682 btrfs_tree_read_lock(b);
1683 } else {
1684 if (p->skip_locking) {
1685 b = btrfs_root_node(root);
1686 level = btrfs_header_level(b);
1687 } else {
1688 /* we don't know the level of the root node
1689 * until we actually have it read locked
1691 b = btrfs_read_lock_root_node(root);
1692 level = btrfs_header_level(b);
1693 if (level <= write_lock_level) {
1694 /* whoops, must trade for write lock */
1695 btrfs_tree_read_unlock(b);
1696 free_extent_buffer(b);
1697 b = btrfs_lock_root_node(root);
1698 root_lock = BTRFS_WRITE_LOCK;
1700 /* the level might have changed, check again */
1701 level = btrfs_header_level(b);
1705 p->nodes[level] = b;
1706 if (!p->skip_locking)
1707 p->locks[level] = root_lock;
1709 while (b) {
1710 level = btrfs_header_level(b);
1713 * setup the path here so we can release it under lock
1714 * contention with the cow code
1716 if (cow) {
1718 * if we don't really need to cow this block
1719 * then we don't want to set the path blocking,
1720 * so we test it here
1722 if (!should_cow_block(trans, root, b))
1723 goto cow_done;
1725 btrfs_set_path_blocking(p);
1728 * must have write locks on this node and the
1729 * parent
1731 if (level + 1 > write_lock_level) {
1732 write_lock_level = level + 1;
1733 btrfs_release_path(p);
1734 goto again;
1737 err = btrfs_cow_block(trans, root, b,
1738 p->nodes[level + 1],
1739 p->slots[level + 1], &b);
1740 if (err) {
1741 ret = err;
1742 goto done;
1745 cow_done:
1746 BUG_ON(!cow && ins_len);
1748 p->nodes[level] = b;
1749 btrfs_clear_path_blocking(p, NULL, 0);
1752 * we have a lock on b and as long as we aren't changing
1753 * the tree, there is no way to for the items in b to change.
1754 * It is safe to drop the lock on our parent before we
1755 * go through the expensive btree search on b.
1757 * If cow is true, then we might be changing slot zero,
1758 * which may require changing the parent. So, we can't
1759 * drop the lock until after we know which slot we're
1760 * operating on.
1762 if (!cow)
1763 btrfs_unlock_up_safe(p, level + 1);
1765 ret = bin_search(b, key, level, &slot);
1767 if (level != 0) {
1768 int dec = 0;
1769 if (ret && slot > 0) {
1770 dec = 1;
1771 slot -= 1;
1773 p->slots[level] = slot;
1774 err = setup_nodes_for_search(trans, root, p, b, level,
1775 ins_len, &write_lock_level);
1776 if (err == -EAGAIN)
1777 goto again;
1778 if (err) {
1779 ret = err;
1780 goto done;
1782 b = p->nodes[level];
1783 slot = p->slots[level];
1786 * slot 0 is special, if we change the key
1787 * we have to update the parent pointer
1788 * which means we must have a write lock
1789 * on the parent
1791 if (slot == 0 && cow &&
1792 write_lock_level < level + 1) {
1793 write_lock_level = level + 1;
1794 btrfs_release_path(p);
1795 goto again;
1798 unlock_up(p, level, lowest_unlock);
1800 if (level == lowest_level) {
1801 if (dec)
1802 p->slots[level]++;
1803 goto done;
1806 err = read_block_for_search(trans, root, p,
1807 &b, level, slot, key);
1808 if (err == -EAGAIN)
1809 goto again;
1810 if (err) {
1811 ret = err;
1812 goto done;
1815 if (!p->skip_locking) {
1816 level = btrfs_header_level(b);
1817 if (level <= write_lock_level) {
1818 err = btrfs_try_tree_write_lock(b);
1819 if (!err) {
1820 btrfs_set_path_blocking(p);
1821 btrfs_tree_lock(b);
1822 btrfs_clear_path_blocking(p, b,
1823 BTRFS_WRITE_LOCK);
1825 p->locks[level] = BTRFS_WRITE_LOCK;
1826 } else {
1827 err = btrfs_try_tree_read_lock(b);
1828 if (!err) {
1829 btrfs_set_path_blocking(p);
1830 btrfs_tree_read_lock(b);
1831 btrfs_clear_path_blocking(p, b,
1832 BTRFS_READ_LOCK);
1834 p->locks[level] = BTRFS_READ_LOCK;
1836 p->nodes[level] = b;
1838 } else {
1839 p->slots[level] = slot;
1840 if (ins_len > 0 &&
1841 btrfs_leaf_free_space(root, b) < ins_len) {
1842 if (write_lock_level < 1) {
1843 write_lock_level = 1;
1844 btrfs_release_path(p);
1845 goto again;
1848 btrfs_set_path_blocking(p);
1849 err = split_leaf(trans, root, key,
1850 p, ins_len, ret == 0);
1851 btrfs_clear_path_blocking(p, NULL, 0);
1853 BUG_ON(err > 0);
1854 if (err) {
1855 ret = err;
1856 goto done;
1859 if (!p->search_for_split)
1860 unlock_up(p, level, lowest_unlock);
1861 goto done;
1864 ret = 1;
1865 done:
1867 * we don't really know what they plan on doing with the path
1868 * from here on, so for now just mark it as blocking
1870 if (!p->leave_spinning)
1871 btrfs_set_path_blocking(p);
1872 if (ret < 0)
1873 btrfs_release_path(p);
1874 return ret;
1878 * adjust the pointers going up the tree, starting at level
1879 * making sure the right key of each node is points to 'key'.
1880 * This is used after shifting pointers to the left, so it stops
1881 * fixing up pointers when a given leaf/node is not in slot 0 of the
1882 * higher levels
1884 * If this fails to write a tree block, it returns -1, but continues
1885 * fixing up the blocks in ram so the tree is consistent.
1887 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1888 struct btrfs_root *root, struct btrfs_path *path,
1889 struct btrfs_disk_key *key, int level)
1891 int i;
1892 int ret = 0;
1893 struct extent_buffer *t;
1895 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1896 int tslot = path->slots[i];
1897 if (!path->nodes[i])
1898 break;
1899 t = path->nodes[i];
1900 btrfs_set_node_key(t, key, tslot);
1901 btrfs_mark_buffer_dirty(path->nodes[i]);
1902 if (tslot != 0)
1903 break;
1905 return ret;
1909 * update item key.
1911 * This function isn't completely safe. It's the caller's responsibility
1912 * that the new key won't break the order
1914 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1915 struct btrfs_root *root, struct btrfs_path *path,
1916 struct btrfs_key *new_key)
1918 struct btrfs_disk_key disk_key;
1919 struct extent_buffer *eb;
1920 int slot;
1922 eb = path->nodes[0];
1923 slot = path->slots[0];
1924 if (slot > 0) {
1925 btrfs_item_key(eb, &disk_key, slot - 1);
1926 if (comp_keys(&disk_key, new_key) >= 0)
1927 return -1;
1929 if (slot < btrfs_header_nritems(eb) - 1) {
1930 btrfs_item_key(eb, &disk_key, slot + 1);
1931 if (comp_keys(&disk_key, new_key) <= 0)
1932 return -1;
1935 btrfs_cpu_key_to_disk(&disk_key, new_key);
1936 btrfs_set_item_key(eb, &disk_key, slot);
1937 btrfs_mark_buffer_dirty(eb);
1938 if (slot == 0)
1939 fixup_low_keys(trans, root, path, &disk_key, 1);
1940 return 0;
1944 * try to push data from one node into the next node left in the
1945 * tree.
1947 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1948 * error, and > 0 if there was no room in the left hand block.
1950 static int push_node_left(struct btrfs_trans_handle *trans,
1951 struct btrfs_root *root, struct extent_buffer *dst,
1952 struct extent_buffer *src, int empty)
1954 int push_items = 0;
1955 int src_nritems;
1956 int dst_nritems;
1957 int ret = 0;
1959 src_nritems = btrfs_header_nritems(src);
1960 dst_nritems = btrfs_header_nritems(dst);
1961 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1962 WARN_ON(btrfs_header_generation(src) != trans->transid);
1963 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1965 if (!empty && src_nritems <= 8)
1966 return 1;
1968 if (push_items <= 0)
1969 return 1;
1971 if (empty) {
1972 push_items = min(src_nritems, push_items);
1973 if (push_items < src_nritems) {
1974 /* leave at least 8 pointers in the node if
1975 * we aren't going to empty it
1977 if (src_nritems - push_items < 8) {
1978 if (push_items <= 8)
1979 return 1;
1980 push_items -= 8;
1983 } else
1984 push_items = min(src_nritems - 8, push_items);
1986 copy_extent_buffer(dst, src,
1987 btrfs_node_key_ptr_offset(dst_nritems),
1988 btrfs_node_key_ptr_offset(0),
1989 push_items * sizeof(struct btrfs_key_ptr));
1991 if (push_items < src_nritems) {
1992 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1993 btrfs_node_key_ptr_offset(push_items),
1994 (src_nritems - push_items) *
1995 sizeof(struct btrfs_key_ptr));
1997 btrfs_set_header_nritems(src, src_nritems - push_items);
1998 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1999 btrfs_mark_buffer_dirty(src);
2000 btrfs_mark_buffer_dirty(dst);
2002 return ret;
2006 * try to push data from one node into the next node right in the
2007 * tree.
2009 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2010 * error, and > 0 if there was no room in the right hand block.
2012 * this will only push up to 1/2 the contents of the left node over
2014 static int balance_node_right(struct btrfs_trans_handle *trans,
2015 struct btrfs_root *root,
2016 struct extent_buffer *dst,
2017 struct extent_buffer *src)
2019 int push_items = 0;
2020 int max_push;
2021 int src_nritems;
2022 int dst_nritems;
2023 int ret = 0;
2025 WARN_ON(btrfs_header_generation(src) != trans->transid);
2026 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2028 src_nritems = btrfs_header_nritems(src);
2029 dst_nritems = btrfs_header_nritems(dst);
2030 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2031 if (push_items <= 0)
2032 return 1;
2034 if (src_nritems < 4)
2035 return 1;
2037 max_push = src_nritems / 2 + 1;
2038 /* don't try to empty the node */
2039 if (max_push >= src_nritems)
2040 return 1;
2042 if (max_push < push_items)
2043 push_items = max_push;
2045 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2046 btrfs_node_key_ptr_offset(0),
2047 (dst_nritems) *
2048 sizeof(struct btrfs_key_ptr));
2050 copy_extent_buffer(dst, src,
2051 btrfs_node_key_ptr_offset(0),
2052 btrfs_node_key_ptr_offset(src_nritems - push_items),
2053 push_items * sizeof(struct btrfs_key_ptr));
2055 btrfs_set_header_nritems(src, src_nritems - push_items);
2056 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2058 btrfs_mark_buffer_dirty(src);
2059 btrfs_mark_buffer_dirty(dst);
2061 return ret;
2065 * helper function to insert a new root level in the tree.
2066 * A new node is allocated, and a single item is inserted to
2067 * point to the existing root
2069 * returns zero on success or < 0 on failure.
2071 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2072 struct btrfs_root *root,
2073 struct btrfs_path *path, int level)
2075 u64 lower_gen;
2076 struct extent_buffer *lower;
2077 struct extent_buffer *c;
2078 struct extent_buffer *old;
2079 struct btrfs_disk_key lower_key;
2081 BUG_ON(path->nodes[level]);
2082 BUG_ON(path->nodes[level-1] != root->node);
2084 lower = path->nodes[level-1];
2085 if (level == 1)
2086 btrfs_item_key(lower, &lower_key, 0);
2087 else
2088 btrfs_node_key(lower, &lower_key, 0);
2090 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2091 root->root_key.objectid, &lower_key,
2092 level, root->node->start, 0, 0);
2093 if (IS_ERR(c))
2094 return PTR_ERR(c);
2096 root_add_used(root, root->nodesize);
2098 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2099 btrfs_set_header_nritems(c, 1);
2100 btrfs_set_header_level(c, level);
2101 btrfs_set_header_bytenr(c, c->start);
2102 btrfs_set_header_generation(c, trans->transid);
2103 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2104 btrfs_set_header_owner(c, root->root_key.objectid);
2106 write_extent_buffer(c, root->fs_info->fsid,
2107 (unsigned long)btrfs_header_fsid(c),
2108 BTRFS_FSID_SIZE);
2110 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2111 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2112 BTRFS_UUID_SIZE);
2114 btrfs_set_node_key(c, &lower_key, 0);
2115 btrfs_set_node_blockptr(c, 0, lower->start);
2116 lower_gen = btrfs_header_generation(lower);
2117 WARN_ON(lower_gen != trans->transid);
2119 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2121 btrfs_mark_buffer_dirty(c);
2123 old = root->node;
2124 rcu_assign_pointer(root->node, c);
2126 /* the super has an extra ref to root->node */
2127 free_extent_buffer(old);
2129 add_root_to_dirty_list(root);
2130 extent_buffer_get(c);
2131 path->nodes[level] = c;
2132 path->locks[level] = BTRFS_WRITE_LOCK;
2133 path->slots[level] = 0;
2134 return 0;
2138 * worker function to insert a single pointer in a node.
2139 * the node should have enough room for the pointer already
2141 * slot and level indicate where you want the key to go, and
2142 * blocknr is the block the key points to.
2144 * returns zero on success and < 0 on any error
2146 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2147 *root, struct btrfs_path *path, struct btrfs_disk_key
2148 *key, u64 bytenr, int slot, int level)
2150 struct extent_buffer *lower;
2151 int nritems;
2153 BUG_ON(!path->nodes[level]);
2154 btrfs_assert_tree_locked(path->nodes[level]);
2155 lower = path->nodes[level];
2156 nritems = btrfs_header_nritems(lower);
2157 BUG_ON(slot > nritems);
2158 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
2159 BUG();
2160 if (slot != nritems) {
2161 memmove_extent_buffer(lower,
2162 btrfs_node_key_ptr_offset(slot + 1),
2163 btrfs_node_key_ptr_offset(slot),
2164 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2166 btrfs_set_node_key(lower, key, slot);
2167 btrfs_set_node_blockptr(lower, slot, bytenr);
2168 WARN_ON(trans->transid == 0);
2169 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2170 btrfs_set_header_nritems(lower, nritems + 1);
2171 btrfs_mark_buffer_dirty(lower);
2172 return 0;
2176 * split the node at the specified level in path in two.
2177 * The path is corrected to point to the appropriate node after the split
2179 * Before splitting this tries to make some room in the node by pushing
2180 * left and right, if either one works, it returns right away.
2182 * returns 0 on success and < 0 on failure
2184 static noinline int split_node(struct btrfs_trans_handle *trans,
2185 struct btrfs_root *root,
2186 struct btrfs_path *path, int level)
2188 struct extent_buffer *c;
2189 struct extent_buffer *split;
2190 struct btrfs_disk_key disk_key;
2191 int mid;
2192 int ret;
2193 int wret;
2194 u32 c_nritems;
2196 c = path->nodes[level];
2197 WARN_ON(btrfs_header_generation(c) != trans->transid);
2198 if (c == root->node) {
2199 /* trying to split the root, lets make a new one */
2200 ret = insert_new_root(trans, root, path, level + 1);
2201 if (ret)
2202 return ret;
2203 } else {
2204 ret = push_nodes_for_insert(trans, root, path, level);
2205 c = path->nodes[level];
2206 if (!ret && btrfs_header_nritems(c) <
2207 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2208 return 0;
2209 if (ret < 0)
2210 return ret;
2213 c_nritems = btrfs_header_nritems(c);
2214 mid = (c_nritems + 1) / 2;
2215 btrfs_node_key(c, &disk_key, mid);
2217 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2218 root->root_key.objectid,
2219 &disk_key, level, c->start, 0, 0);
2220 if (IS_ERR(split))
2221 return PTR_ERR(split);
2223 root_add_used(root, root->nodesize);
2225 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2226 btrfs_set_header_level(split, btrfs_header_level(c));
2227 btrfs_set_header_bytenr(split, split->start);
2228 btrfs_set_header_generation(split, trans->transid);
2229 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
2230 btrfs_set_header_owner(split, root->root_key.objectid);
2231 write_extent_buffer(split, root->fs_info->fsid,
2232 (unsigned long)btrfs_header_fsid(split),
2233 BTRFS_FSID_SIZE);
2234 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2235 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2236 BTRFS_UUID_SIZE);
2239 copy_extent_buffer(split, c,
2240 btrfs_node_key_ptr_offset(0),
2241 btrfs_node_key_ptr_offset(mid),
2242 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2243 btrfs_set_header_nritems(split, c_nritems - mid);
2244 btrfs_set_header_nritems(c, mid);
2245 ret = 0;
2247 btrfs_mark_buffer_dirty(c);
2248 btrfs_mark_buffer_dirty(split);
2250 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2251 path->slots[level + 1] + 1,
2252 level + 1);
2253 if (wret)
2254 ret = wret;
2256 if (path->slots[level] >= mid) {
2257 path->slots[level] -= mid;
2258 btrfs_tree_unlock(c);
2259 free_extent_buffer(c);
2260 path->nodes[level] = split;
2261 path->slots[level + 1] += 1;
2262 } else {
2263 btrfs_tree_unlock(split);
2264 free_extent_buffer(split);
2266 return ret;
2270 * how many bytes are required to store the items in a leaf. start
2271 * and nr indicate which items in the leaf to check. This totals up the
2272 * space used both by the item structs and the item data
2274 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2276 int data_len;
2277 int nritems = btrfs_header_nritems(l);
2278 int end = min(nritems, start + nr) - 1;
2280 if (!nr)
2281 return 0;
2282 data_len = btrfs_item_end_nr(l, start);
2283 data_len = data_len - btrfs_item_offset_nr(l, end);
2284 data_len += sizeof(struct btrfs_item) * nr;
2285 WARN_ON(data_len < 0);
2286 return data_len;
2290 * The space between the end of the leaf items and
2291 * the start of the leaf data. IOW, how much room
2292 * the leaf has left for both items and data
2294 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2295 struct extent_buffer *leaf)
2297 int nritems = btrfs_header_nritems(leaf);
2298 int ret;
2299 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2300 if (ret < 0) {
2301 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2302 "used %d nritems %d\n",
2303 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2304 leaf_space_used(leaf, 0, nritems), nritems);
2306 return ret;
2310 * min slot controls the lowest index we're willing to push to the
2311 * right. We'll push up to and including min_slot, but no lower
2313 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2314 struct btrfs_root *root,
2315 struct btrfs_path *path,
2316 int data_size, int empty,
2317 struct extent_buffer *right,
2318 int free_space, u32 left_nritems,
2319 u32 min_slot)
2321 struct extent_buffer *left = path->nodes[0];
2322 struct extent_buffer *upper = path->nodes[1];
2323 struct btrfs_disk_key disk_key;
2324 int slot;
2325 u32 i;
2326 int push_space = 0;
2327 int push_items = 0;
2328 struct btrfs_item *item;
2329 u32 nr;
2330 u32 right_nritems;
2331 u32 data_end;
2332 u32 this_item_size;
2334 if (empty)
2335 nr = 0;
2336 else
2337 nr = max_t(u32, 1, min_slot);
2339 if (path->slots[0] >= left_nritems)
2340 push_space += data_size;
2342 slot = path->slots[1];
2343 i = left_nritems - 1;
2344 while (i >= nr) {
2345 item = btrfs_item_nr(left, i);
2347 if (!empty && push_items > 0) {
2348 if (path->slots[0] > i)
2349 break;
2350 if (path->slots[0] == i) {
2351 int space = btrfs_leaf_free_space(root, left);
2352 if (space + push_space * 2 > free_space)
2353 break;
2357 if (path->slots[0] == i)
2358 push_space += data_size;
2360 this_item_size = btrfs_item_size(left, item);
2361 if (this_item_size + sizeof(*item) + push_space > free_space)
2362 break;
2364 push_items++;
2365 push_space += this_item_size + sizeof(*item);
2366 if (i == 0)
2367 break;
2368 i--;
2371 if (push_items == 0)
2372 goto out_unlock;
2374 if (!empty && push_items == left_nritems)
2375 WARN_ON(1);
2377 /* push left to right */
2378 right_nritems = btrfs_header_nritems(right);
2380 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2381 push_space -= leaf_data_end(root, left);
2383 /* make room in the right data area */
2384 data_end = leaf_data_end(root, right);
2385 memmove_extent_buffer(right,
2386 btrfs_leaf_data(right) + data_end - push_space,
2387 btrfs_leaf_data(right) + data_end,
2388 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2390 /* copy from the left data area */
2391 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2392 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2393 btrfs_leaf_data(left) + leaf_data_end(root, left),
2394 push_space);
2396 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2397 btrfs_item_nr_offset(0),
2398 right_nritems * sizeof(struct btrfs_item));
2400 /* copy the items from left to right */
2401 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2402 btrfs_item_nr_offset(left_nritems - push_items),
2403 push_items * sizeof(struct btrfs_item));
2405 /* update the item pointers */
2406 right_nritems += push_items;
2407 btrfs_set_header_nritems(right, right_nritems);
2408 push_space = BTRFS_LEAF_DATA_SIZE(root);
2409 for (i = 0; i < right_nritems; i++) {
2410 item = btrfs_item_nr(right, i);
2411 push_space -= btrfs_item_size(right, item);
2412 btrfs_set_item_offset(right, item, push_space);
2415 left_nritems -= push_items;
2416 btrfs_set_header_nritems(left, left_nritems);
2418 if (left_nritems)
2419 btrfs_mark_buffer_dirty(left);
2420 else
2421 clean_tree_block(trans, root, left);
2423 btrfs_mark_buffer_dirty(right);
2425 btrfs_item_key(right, &disk_key, 0);
2426 btrfs_set_node_key(upper, &disk_key, slot + 1);
2427 btrfs_mark_buffer_dirty(upper);
2429 /* then fixup the leaf pointer in the path */
2430 if (path->slots[0] >= left_nritems) {
2431 path->slots[0] -= left_nritems;
2432 if (btrfs_header_nritems(path->nodes[0]) == 0)
2433 clean_tree_block(trans, root, path->nodes[0]);
2434 btrfs_tree_unlock(path->nodes[0]);
2435 free_extent_buffer(path->nodes[0]);
2436 path->nodes[0] = right;
2437 path->slots[1] += 1;
2438 } else {
2439 btrfs_tree_unlock(right);
2440 free_extent_buffer(right);
2442 return 0;
2444 out_unlock:
2445 btrfs_tree_unlock(right);
2446 free_extent_buffer(right);
2447 return 1;
2451 * push some data in the path leaf to the right, trying to free up at
2452 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2454 * returns 1 if the push failed because the other node didn't have enough
2455 * room, 0 if everything worked out and < 0 if there were major errors.
2457 * this will push starting from min_slot to the end of the leaf. It won't
2458 * push any slot lower than min_slot
2460 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2461 *root, struct btrfs_path *path,
2462 int min_data_size, int data_size,
2463 int empty, u32 min_slot)
2465 struct extent_buffer *left = path->nodes[0];
2466 struct extent_buffer *right;
2467 struct extent_buffer *upper;
2468 int slot;
2469 int free_space;
2470 u32 left_nritems;
2471 int ret;
2473 if (!path->nodes[1])
2474 return 1;
2476 slot = path->slots[1];
2477 upper = path->nodes[1];
2478 if (slot >= btrfs_header_nritems(upper) - 1)
2479 return 1;
2481 btrfs_assert_tree_locked(path->nodes[1]);
2483 right = read_node_slot(root, upper, slot + 1);
2484 if (right == NULL)
2485 return 1;
2487 btrfs_tree_lock(right);
2488 btrfs_set_lock_blocking(right);
2490 free_space = btrfs_leaf_free_space(root, right);
2491 if (free_space < data_size)
2492 goto out_unlock;
2494 /* cow and double check */
2495 ret = btrfs_cow_block(trans, root, right, upper,
2496 slot + 1, &right);
2497 if (ret)
2498 goto out_unlock;
2500 free_space = btrfs_leaf_free_space(root, right);
2501 if (free_space < data_size)
2502 goto out_unlock;
2504 left_nritems = btrfs_header_nritems(left);
2505 if (left_nritems == 0)
2506 goto out_unlock;
2508 return __push_leaf_right(trans, root, path, min_data_size, empty,
2509 right, free_space, left_nritems, min_slot);
2510 out_unlock:
2511 btrfs_tree_unlock(right);
2512 free_extent_buffer(right);
2513 return 1;
2517 * push some data in the path leaf to the left, trying to free up at
2518 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2520 * max_slot can put a limit on how far into the leaf we'll push items. The
2521 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2522 * items
2524 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2525 struct btrfs_root *root,
2526 struct btrfs_path *path, int data_size,
2527 int empty, struct extent_buffer *left,
2528 int free_space, u32 right_nritems,
2529 u32 max_slot)
2531 struct btrfs_disk_key disk_key;
2532 struct extent_buffer *right = path->nodes[0];
2533 int i;
2534 int push_space = 0;
2535 int push_items = 0;
2536 struct btrfs_item *item;
2537 u32 old_left_nritems;
2538 u32 nr;
2539 int ret = 0;
2540 int wret;
2541 u32 this_item_size;
2542 u32 old_left_item_size;
2544 if (empty)
2545 nr = min(right_nritems, max_slot);
2546 else
2547 nr = min(right_nritems - 1, max_slot);
2549 for (i = 0; i < nr; i++) {
2550 item = btrfs_item_nr(right, i);
2552 if (!empty && push_items > 0) {
2553 if (path->slots[0] < i)
2554 break;
2555 if (path->slots[0] == i) {
2556 int space = btrfs_leaf_free_space(root, right);
2557 if (space + push_space * 2 > free_space)
2558 break;
2562 if (path->slots[0] == i)
2563 push_space += data_size;
2565 this_item_size = btrfs_item_size(right, item);
2566 if (this_item_size + sizeof(*item) + push_space > free_space)
2567 break;
2569 push_items++;
2570 push_space += this_item_size + sizeof(*item);
2573 if (push_items == 0) {
2574 ret = 1;
2575 goto out;
2577 if (!empty && push_items == btrfs_header_nritems(right))
2578 WARN_ON(1);
2580 /* push data from right to left */
2581 copy_extent_buffer(left, right,
2582 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2583 btrfs_item_nr_offset(0),
2584 push_items * sizeof(struct btrfs_item));
2586 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2587 btrfs_item_offset_nr(right, push_items - 1);
2589 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2590 leaf_data_end(root, left) - push_space,
2591 btrfs_leaf_data(right) +
2592 btrfs_item_offset_nr(right, push_items - 1),
2593 push_space);
2594 old_left_nritems = btrfs_header_nritems(left);
2595 BUG_ON(old_left_nritems <= 0);
2597 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2598 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2599 u32 ioff;
2601 item = btrfs_item_nr(left, i);
2603 ioff = btrfs_item_offset(left, item);
2604 btrfs_set_item_offset(left, item,
2605 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2607 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2609 /* fixup right node */
2610 if (push_items > right_nritems) {
2611 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2612 right_nritems);
2613 WARN_ON(1);
2616 if (push_items < right_nritems) {
2617 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2618 leaf_data_end(root, right);
2619 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2620 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2621 btrfs_leaf_data(right) +
2622 leaf_data_end(root, right), push_space);
2624 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2625 btrfs_item_nr_offset(push_items),
2626 (btrfs_header_nritems(right) - push_items) *
2627 sizeof(struct btrfs_item));
2629 right_nritems -= push_items;
2630 btrfs_set_header_nritems(right, right_nritems);
2631 push_space = BTRFS_LEAF_DATA_SIZE(root);
2632 for (i = 0; i < right_nritems; i++) {
2633 item = btrfs_item_nr(right, i);
2635 push_space = push_space - btrfs_item_size(right, item);
2636 btrfs_set_item_offset(right, item, push_space);
2639 btrfs_mark_buffer_dirty(left);
2640 if (right_nritems)
2641 btrfs_mark_buffer_dirty(right);
2642 else
2643 clean_tree_block(trans, root, right);
2645 btrfs_item_key(right, &disk_key, 0);
2646 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2647 if (wret)
2648 ret = wret;
2650 /* then fixup the leaf pointer in the path */
2651 if (path->slots[0] < push_items) {
2652 path->slots[0] += old_left_nritems;
2653 btrfs_tree_unlock(path->nodes[0]);
2654 free_extent_buffer(path->nodes[0]);
2655 path->nodes[0] = left;
2656 path->slots[1] -= 1;
2657 } else {
2658 btrfs_tree_unlock(left);
2659 free_extent_buffer(left);
2660 path->slots[0] -= push_items;
2662 BUG_ON(path->slots[0] < 0);
2663 return ret;
2664 out:
2665 btrfs_tree_unlock(left);
2666 free_extent_buffer(left);
2667 return ret;
2671 * push some data in the path leaf to the left, trying to free up at
2672 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2674 * max_slot can put a limit on how far into the leaf we'll push items. The
2675 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2676 * items
2678 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2679 *root, struct btrfs_path *path, int min_data_size,
2680 int data_size, int empty, u32 max_slot)
2682 struct extent_buffer *right = path->nodes[0];
2683 struct extent_buffer *left;
2684 int slot;
2685 int free_space;
2686 u32 right_nritems;
2687 int ret = 0;
2689 slot = path->slots[1];
2690 if (slot == 0)
2691 return 1;
2692 if (!path->nodes[1])
2693 return 1;
2695 right_nritems = btrfs_header_nritems(right);
2696 if (right_nritems == 0)
2697 return 1;
2699 btrfs_assert_tree_locked(path->nodes[1]);
2701 left = read_node_slot(root, path->nodes[1], slot - 1);
2702 if (left == NULL)
2703 return 1;
2705 btrfs_tree_lock(left);
2706 btrfs_set_lock_blocking(left);
2708 free_space = btrfs_leaf_free_space(root, left);
2709 if (free_space < data_size) {
2710 ret = 1;
2711 goto out;
2714 /* cow and double check */
2715 ret = btrfs_cow_block(trans, root, left,
2716 path->nodes[1], slot - 1, &left);
2717 if (ret) {
2718 /* we hit -ENOSPC, but it isn't fatal here */
2719 ret = 1;
2720 goto out;
2723 free_space = btrfs_leaf_free_space(root, left);
2724 if (free_space < data_size) {
2725 ret = 1;
2726 goto out;
2729 return __push_leaf_left(trans, root, path, min_data_size,
2730 empty, left, free_space, right_nritems,
2731 max_slot);
2732 out:
2733 btrfs_tree_unlock(left);
2734 free_extent_buffer(left);
2735 return ret;
2739 * split the path's leaf in two, making sure there is at least data_size
2740 * available for the resulting leaf level of the path.
2742 * returns 0 if all went well and < 0 on failure.
2744 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2745 struct btrfs_root *root,
2746 struct btrfs_path *path,
2747 struct extent_buffer *l,
2748 struct extent_buffer *right,
2749 int slot, int mid, int nritems)
2751 int data_copy_size;
2752 int rt_data_off;
2753 int i;
2754 int ret = 0;
2755 int wret;
2756 struct btrfs_disk_key disk_key;
2758 nritems = nritems - mid;
2759 btrfs_set_header_nritems(right, nritems);
2760 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2762 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2763 btrfs_item_nr_offset(mid),
2764 nritems * sizeof(struct btrfs_item));
2766 copy_extent_buffer(right, l,
2767 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2768 data_copy_size, btrfs_leaf_data(l) +
2769 leaf_data_end(root, l), data_copy_size);
2771 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2772 btrfs_item_end_nr(l, mid);
2774 for (i = 0; i < nritems; i++) {
2775 struct btrfs_item *item = btrfs_item_nr(right, i);
2776 u32 ioff;
2778 ioff = btrfs_item_offset(right, item);
2779 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2782 btrfs_set_header_nritems(l, mid);
2783 ret = 0;
2784 btrfs_item_key(right, &disk_key, 0);
2785 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2786 path->slots[1] + 1, 1);
2787 if (wret)
2788 ret = wret;
2790 btrfs_mark_buffer_dirty(right);
2791 btrfs_mark_buffer_dirty(l);
2792 BUG_ON(path->slots[0] != slot);
2794 if (mid <= slot) {
2795 btrfs_tree_unlock(path->nodes[0]);
2796 free_extent_buffer(path->nodes[0]);
2797 path->nodes[0] = right;
2798 path->slots[0] -= mid;
2799 path->slots[1] += 1;
2800 } else {
2801 btrfs_tree_unlock(right);
2802 free_extent_buffer(right);
2805 BUG_ON(path->slots[0] < 0);
2807 return ret;
2811 * double splits happen when we need to insert a big item in the middle
2812 * of a leaf. A double split can leave us with 3 mostly empty leaves:
2813 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2814 * A B C
2816 * We avoid this by trying to push the items on either side of our target
2817 * into the adjacent leaves. If all goes well we can avoid the double split
2818 * completely.
2820 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
2821 struct btrfs_root *root,
2822 struct btrfs_path *path,
2823 int data_size)
2825 int ret;
2826 int progress = 0;
2827 int slot;
2828 u32 nritems;
2830 slot = path->slots[0];
2833 * try to push all the items after our slot into the
2834 * right leaf
2836 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
2837 if (ret < 0)
2838 return ret;
2840 if (ret == 0)
2841 progress++;
2843 nritems = btrfs_header_nritems(path->nodes[0]);
2845 * our goal is to get our slot at the start or end of a leaf. If
2846 * we've done so we're done
2848 if (path->slots[0] == 0 || path->slots[0] == nritems)
2849 return 0;
2851 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
2852 return 0;
2854 /* try to push all the items before our slot into the next leaf */
2855 slot = path->slots[0];
2856 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
2857 if (ret < 0)
2858 return ret;
2860 if (ret == 0)
2861 progress++;
2863 if (progress)
2864 return 0;
2865 return 1;
2869 * split the path's leaf in two, making sure there is at least data_size
2870 * available for the resulting leaf level of the path.
2872 * returns 0 if all went well and < 0 on failure.
2874 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2875 struct btrfs_root *root,
2876 struct btrfs_key *ins_key,
2877 struct btrfs_path *path, int data_size,
2878 int extend)
2880 struct btrfs_disk_key disk_key;
2881 struct extent_buffer *l;
2882 u32 nritems;
2883 int mid;
2884 int slot;
2885 struct extent_buffer *right;
2886 int ret = 0;
2887 int wret;
2888 int split;
2889 int num_doubles = 0;
2890 int tried_avoid_double = 0;
2892 l = path->nodes[0];
2893 slot = path->slots[0];
2894 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2895 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2896 return -EOVERFLOW;
2898 /* first try to make some room by pushing left and right */
2899 if (data_size) {
2900 wret = push_leaf_right(trans, root, path, data_size,
2901 data_size, 0, 0);
2902 if (wret < 0)
2903 return wret;
2904 if (wret) {
2905 wret = push_leaf_left(trans, root, path, data_size,
2906 data_size, 0, (u32)-1);
2907 if (wret < 0)
2908 return wret;
2910 l = path->nodes[0];
2912 /* did the pushes work? */
2913 if (btrfs_leaf_free_space(root, l) >= data_size)
2914 return 0;
2917 if (!path->nodes[1]) {
2918 ret = insert_new_root(trans, root, path, 1);
2919 if (ret)
2920 return ret;
2922 again:
2923 split = 1;
2924 l = path->nodes[0];
2925 slot = path->slots[0];
2926 nritems = btrfs_header_nritems(l);
2927 mid = (nritems + 1) / 2;
2929 if (mid <= slot) {
2930 if (nritems == 1 ||
2931 leaf_space_used(l, mid, nritems - mid) + data_size >
2932 BTRFS_LEAF_DATA_SIZE(root)) {
2933 if (slot >= nritems) {
2934 split = 0;
2935 } else {
2936 mid = slot;
2937 if (mid != nritems &&
2938 leaf_space_used(l, mid, nritems - mid) +
2939 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2940 if (data_size && !tried_avoid_double)
2941 goto push_for_double;
2942 split = 2;
2946 } else {
2947 if (leaf_space_used(l, 0, mid) + data_size >
2948 BTRFS_LEAF_DATA_SIZE(root)) {
2949 if (!extend && data_size && slot == 0) {
2950 split = 0;
2951 } else if ((extend || !data_size) && slot == 0) {
2952 mid = 1;
2953 } else {
2954 mid = slot;
2955 if (mid != nritems &&
2956 leaf_space_used(l, mid, nritems - mid) +
2957 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2958 if (data_size && !tried_avoid_double)
2959 goto push_for_double;
2960 split = 2 ;
2966 if (split == 0)
2967 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2968 else
2969 btrfs_item_key(l, &disk_key, mid);
2971 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
2972 root->root_key.objectid,
2973 &disk_key, 0, l->start, 0, 0);
2974 if (IS_ERR(right))
2975 return PTR_ERR(right);
2977 root_add_used(root, root->leafsize);
2979 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2980 btrfs_set_header_bytenr(right, right->start);
2981 btrfs_set_header_generation(right, trans->transid);
2982 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
2983 btrfs_set_header_owner(right, root->root_key.objectid);
2984 btrfs_set_header_level(right, 0);
2985 write_extent_buffer(right, root->fs_info->fsid,
2986 (unsigned long)btrfs_header_fsid(right),
2987 BTRFS_FSID_SIZE);
2989 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2990 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2991 BTRFS_UUID_SIZE);
2993 if (split == 0) {
2994 if (mid <= slot) {
2995 btrfs_set_header_nritems(right, 0);
2996 wret = insert_ptr(trans, root, path,
2997 &disk_key, right->start,
2998 path->slots[1] + 1, 1);
2999 if (wret)
3000 ret = wret;
3002 btrfs_tree_unlock(path->nodes[0]);
3003 free_extent_buffer(path->nodes[0]);
3004 path->nodes[0] = right;
3005 path->slots[0] = 0;
3006 path->slots[1] += 1;
3007 } else {
3008 btrfs_set_header_nritems(right, 0);
3009 wret = insert_ptr(trans, root, path,
3010 &disk_key,
3011 right->start,
3012 path->slots[1], 1);
3013 if (wret)
3014 ret = wret;
3015 btrfs_tree_unlock(path->nodes[0]);
3016 free_extent_buffer(path->nodes[0]);
3017 path->nodes[0] = right;
3018 path->slots[0] = 0;
3019 if (path->slots[1] == 0) {
3020 wret = fixup_low_keys(trans, root,
3021 path, &disk_key, 1);
3022 if (wret)
3023 ret = wret;
3026 btrfs_mark_buffer_dirty(right);
3027 return ret;
3030 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3031 BUG_ON(ret);
3033 if (split == 2) {
3034 BUG_ON(num_doubles != 0);
3035 num_doubles++;
3036 goto again;
3039 return ret;
3041 push_for_double:
3042 push_for_double_split(trans, root, path, data_size);
3043 tried_avoid_double = 1;
3044 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3045 return 0;
3046 goto again;
3049 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3050 struct btrfs_root *root,
3051 struct btrfs_path *path, int ins_len)
3053 struct btrfs_key key;
3054 struct extent_buffer *leaf;
3055 struct btrfs_file_extent_item *fi;
3056 u64 extent_len = 0;
3057 u32 item_size;
3058 int ret;
3060 leaf = path->nodes[0];
3061 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3063 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3064 key.type != BTRFS_EXTENT_CSUM_KEY);
3066 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3067 return 0;
3069 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3070 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3071 fi = btrfs_item_ptr(leaf, path->slots[0],
3072 struct btrfs_file_extent_item);
3073 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3075 btrfs_release_path(path);
3077 path->keep_locks = 1;
3078 path->search_for_split = 1;
3079 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3080 path->search_for_split = 0;
3081 if (ret < 0)
3082 goto err;
3084 ret = -EAGAIN;
3085 leaf = path->nodes[0];
3086 /* if our item isn't there or got smaller, return now */
3087 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3088 goto err;
3090 /* the leaf has changed, it now has room. return now */
3091 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3092 goto err;
3094 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3095 fi = btrfs_item_ptr(leaf, path->slots[0],
3096 struct btrfs_file_extent_item);
3097 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3098 goto err;
3101 btrfs_set_path_blocking(path);
3102 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3103 if (ret)
3104 goto err;
3106 path->keep_locks = 0;
3107 btrfs_unlock_up_safe(path, 1);
3108 return 0;
3109 err:
3110 path->keep_locks = 0;
3111 return ret;
3114 static noinline int split_item(struct btrfs_trans_handle *trans,
3115 struct btrfs_root *root,
3116 struct btrfs_path *path,
3117 struct btrfs_key *new_key,
3118 unsigned long split_offset)
3120 struct extent_buffer *leaf;
3121 struct btrfs_item *item;
3122 struct btrfs_item *new_item;
3123 int slot;
3124 char *buf;
3125 u32 nritems;
3126 u32 item_size;
3127 u32 orig_offset;
3128 struct btrfs_disk_key disk_key;
3130 leaf = path->nodes[0];
3131 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3133 btrfs_set_path_blocking(path);
3135 item = btrfs_item_nr(leaf, path->slots[0]);
3136 orig_offset = btrfs_item_offset(leaf, item);
3137 item_size = btrfs_item_size(leaf, item);
3139 buf = kmalloc(item_size, GFP_NOFS);
3140 if (!buf)
3141 return -ENOMEM;
3143 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3144 path->slots[0]), item_size);
3146 slot = path->slots[0] + 1;
3147 nritems = btrfs_header_nritems(leaf);
3148 if (slot != nritems) {
3149 /* shift the items */
3150 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3151 btrfs_item_nr_offset(slot),
3152 (nritems - slot) * sizeof(struct btrfs_item));
3155 btrfs_cpu_key_to_disk(&disk_key, new_key);
3156 btrfs_set_item_key(leaf, &disk_key, slot);
3158 new_item = btrfs_item_nr(leaf, slot);
3160 btrfs_set_item_offset(leaf, new_item, orig_offset);
3161 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3163 btrfs_set_item_offset(leaf, item,
3164 orig_offset + item_size - split_offset);
3165 btrfs_set_item_size(leaf, item, split_offset);
3167 btrfs_set_header_nritems(leaf, nritems + 1);
3169 /* write the data for the start of the original item */
3170 write_extent_buffer(leaf, buf,
3171 btrfs_item_ptr_offset(leaf, path->slots[0]),
3172 split_offset);
3174 /* write the data for the new item */
3175 write_extent_buffer(leaf, buf + split_offset,
3176 btrfs_item_ptr_offset(leaf, slot),
3177 item_size - split_offset);
3178 btrfs_mark_buffer_dirty(leaf);
3180 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3181 kfree(buf);
3182 return 0;
3186 * This function splits a single item into two items,
3187 * giving 'new_key' to the new item and splitting the
3188 * old one at split_offset (from the start of the item).
3190 * The path may be released by this operation. After
3191 * the split, the path is pointing to the old item. The
3192 * new item is going to be in the same node as the old one.
3194 * Note, the item being split must be smaller enough to live alone on
3195 * a tree block with room for one extra struct btrfs_item
3197 * This allows us to split the item in place, keeping a lock on the
3198 * leaf the entire time.
3200 int btrfs_split_item(struct btrfs_trans_handle *trans,
3201 struct btrfs_root *root,
3202 struct btrfs_path *path,
3203 struct btrfs_key *new_key,
3204 unsigned long split_offset)
3206 int ret;
3207 ret = setup_leaf_for_split(trans, root, path,
3208 sizeof(struct btrfs_item));
3209 if (ret)
3210 return ret;
3212 ret = split_item(trans, root, path, new_key, split_offset);
3213 return ret;
3217 * This function duplicate a item, giving 'new_key' to the new item.
3218 * It guarantees both items live in the same tree leaf and the new item
3219 * is contiguous with the original item.
3221 * This allows us to split file extent in place, keeping a lock on the
3222 * leaf the entire time.
3224 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3225 struct btrfs_root *root,
3226 struct btrfs_path *path,
3227 struct btrfs_key *new_key)
3229 struct extent_buffer *leaf;
3230 int ret;
3231 u32 item_size;
3233 leaf = path->nodes[0];
3234 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3235 ret = setup_leaf_for_split(trans, root, path,
3236 item_size + sizeof(struct btrfs_item));
3237 if (ret)
3238 return ret;
3240 path->slots[0]++;
3241 ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
3242 item_size, item_size +
3243 sizeof(struct btrfs_item), 1);
3244 BUG_ON(ret);
3246 leaf = path->nodes[0];
3247 memcpy_extent_buffer(leaf,
3248 btrfs_item_ptr_offset(leaf, path->slots[0]),
3249 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3250 item_size);
3251 return 0;
3255 * make the item pointed to by the path smaller. new_size indicates
3256 * how small to make it, and from_end tells us if we just chop bytes
3257 * off the end of the item or if we shift the item to chop bytes off
3258 * the front.
3260 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3261 struct btrfs_root *root,
3262 struct btrfs_path *path,
3263 u32 new_size, int from_end)
3265 int slot;
3266 struct extent_buffer *leaf;
3267 struct btrfs_item *item;
3268 u32 nritems;
3269 unsigned int data_end;
3270 unsigned int old_data_start;
3271 unsigned int old_size;
3272 unsigned int size_diff;
3273 int i;
3275 leaf = path->nodes[0];
3276 slot = path->slots[0];
3278 old_size = btrfs_item_size_nr(leaf, slot);
3279 if (old_size == new_size)
3280 return 0;
3282 nritems = btrfs_header_nritems(leaf);
3283 data_end = leaf_data_end(root, leaf);
3285 old_data_start = btrfs_item_offset_nr(leaf, slot);
3287 size_diff = old_size - new_size;
3289 BUG_ON(slot < 0);
3290 BUG_ON(slot >= nritems);
3293 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3295 /* first correct the data pointers */
3296 for (i = slot; i < nritems; i++) {
3297 u32 ioff;
3298 item = btrfs_item_nr(leaf, i);
3300 ioff = btrfs_item_offset(leaf, item);
3301 btrfs_set_item_offset(leaf, item, ioff + size_diff);
3304 /* shift the data */
3305 if (from_end) {
3306 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3307 data_end + size_diff, btrfs_leaf_data(leaf) +
3308 data_end, old_data_start + new_size - data_end);
3309 } else {
3310 struct btrfs_disk_key disk_key;
3311 u64 offset;
3313 btrfs_item_key(leaf, &disk_key, slot);
3315 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3316 unsigned long ptr;
3317 struct btrfs_file_extent_item *fi;
3319 fi = btrfs_item_ptr(leaf, slot,
3320 struct btrfs_file_extent_item);
3321 fi = (struct btrfs_file_extent_item *)(
3322 (unsigned long)fi - size_diff);
3324 if (btrfs_file_extent_type(leaf, fi) ==
3325 BTRFS_FILE_EXTENT_INLINE) {
3326 ptr = btrfs_item_ptr_offset(leaf, slot);
3327 memmove_extent_buffer(leaf, ptr,
3328 (unsigned long)fi,
3329 offsetof(struct btrfs_file_extent_item,
3330 disk_bytenr));
3334 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3335 data_end + size_diff, btrfs_leaf_data(leaf) +
3336 data_end, old_data_start - data_end);
3338 offset = btrfs_disk_key_offset(&disk_key);
3339 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3340 btrfs_set_item_key(leaf, &disk_key, slot);
3341 if (slot == 0)
3342 fixup_low_keys(trans, root, path, &disk_key, 1);
3345 item = btrfs_item_nr(leaf, slot);
3346 btrfs_set_item_size(leaf, item, new_size);
3347 btrfs_mark_buffer_dirty(leaf);
3349 if (btrfs_leaf_free_space(root, leaf) < 0) {
3350 btrfs_print_leaf(root, leaf);
3351 BUG();
3353 return 0;
3357 * make the item pointed to by the path bigger, data_size is the new size.
3359 int btrfs_extend_item(struct btrfs_trans_handle *trans,
3360 struct btrfs_root *root, struct btrfs_path *path,
3361 u32 data_size)
3363 int slot;
3364 struct extent_buffer *leaf;
3365 struct btrfs_item *item;
3366 u32 nritems;
3367 unsigned int data_end;
3368 unsigned int old_data;
3369 unsigned int old_size;
3370 int i;
3372 leaf = path->nodes[0];
3374 nritems = btrfs_header_nritems(leaf);
3375 data_end = leaf_data_end(root, leaf);
3377 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3378 btrfs_print_leaf(root, leaf);
3379 BUG();
3381 slot = path->slots[0];
3382 old_data = btrfs_item_end_nr(leaf, slot);
3384 BUG_ON(slot < 0);
3385 if (slot >= nritems) {
3386 btrfs_print_leaf(root, leaf);
3387 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3388 slot, nritems);
3389 BUG_ON(1);
3393 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3395 /* first correct the data pointers */
3396 for (i = slot; i < nritems; i++) {
3397 u32 ioff;
3398 item = btrfs_item_nr(leaf, i);
3400 ioff = btrfs_item_offset(leaf, item);
3401 btrfs_set_item_offset(leaf, item, ioff - data_size);
3404 /* shift the data */
3405 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3406 data_end - data_size, btrfs_leaf_data(leaf) +
3407 data_end, old_data - data_end);
3409 data_end = old_data;
3410 old_size = btrfs_item_size_nr(leaf, slot);
3411 item = btrfs_item_nr(leaf, slot);
3412 btrfs_set_item_size(leaf, item, old_size + data_size);
3413 btrfs_mark_buffer_dirty(leaf);
3415 if (btrfs_leaf_free_space(root, leaf) < 0) {
3416 btrfs_print_leaf(root, leaf);
3417 BUG();
3419 return 0;
3423 * Given a key and some data, insert items into the tree.
3424 * This does all the path init required, making room in the tree if needed.
3425 * Returns the number of keys that were inserted.
3427 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3428 struct btrfs_root *root,
3429 struct btrfs_path *path,
3430 struct btrfs_key *cpu_key, u32 *data_size,
3431 int nr)
3433 struct extent_buffer *leaf;
3434 struct btrfs_item *item;
3435 int ret = 0;
3436 int slot;
3437 int i;
3438 u32 nritems;
3439 u32 total_data = 0;
3440 u32 total_size = 0;
3441 unsigned int data_end;
3442 struct btrfs_disk_key disk_key;
3443 struct btrfs_key found_key;
3445 for (i = 0; i < nr; i++) {
3446 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3447 BTRFS_LEAF_DATA_SIZE(root)) {
3448 break;
3449 nr = i;
3451 total_data += data_size[i];
3452 total_size += data_size[i] + sizeof(struct btrfs_item);
3454 BUG_ON(nr == 0);
3456 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3457 if (ret == 0)
3458 return -EEXIST;
3459 if (ret < 0)
3460 goto out;
3462 leaf = path->nodes[0];
3464 nritems = btrfs_header_nritems(leaf);
3465 data_end = leaf_data_end(root, leaf);
3467 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3468 for (i = nr; i >= 0; i--) {
3469 total_data -= data_size[i];
3470 total_size -= data_size[i] + sizeof(struct btrfs_item);
3471 if (total_size < btrfs_leaf_free_space(root, leaf))
3472 break;
3474 nr = i;
3477 slot = path->slots[0];
3478 BUG_ON(slot < 0);
3480 if (slot != nritems) {
3481 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3483 item = btrfs_item_nr(leaf, slot);
3484 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3486 /* figure out how many keys we can insert in here */
3487 total_data = data_size[0];
3488 for (i = 1; i < nr; i++) {
3489 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3490 break;
3491 total_data += data_size[i];
3493 nr = i;
3495 if (old_data < data_end) {
3496 btrfs_print_leaf(root, leaf);
3497 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3498 slot, old_data, data_end);
3499 BUG_ON(1);
3502 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3504 /* first correct the data pointers */
3505 for (i = slot; i < nritems; i++) {
3506 u32 ioff;
3508 item = btrfs_item_nr(leaf, i);
3509 ioff = btrfs_item_offset(leaf, item);
3510 btrfs_set_item_offset(leaf, item, ioff - total_data);
3512 /* shift the items */
3513 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3514 btrfs_item_nr_offset(slot),
3515 (nritems - slot) * sizeof(struct btrfs_item));
3517 /* shift the data */
3518 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3519 data_end - total_data, btrfs_leaf_data(leaf) +
3520 data_end, old_data - data_end);
3521 data_end = old_data;
3522 } else {
3524 * this sucks but it has to be done, if we are inserting at
3525 * the end of the leaf only insert 1 of the items, since we
3526 * have no way of knowing whats on the next leaf and we'd have
3527 * to drop our current locks to figure it out
3529 nr = 1;
3532 /* setup the item for the new data */
3533 for (i = 0; i < nr; i++) {
3534 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3535 btrfs_set_item_key(leaf, &disk_key, slot + i);
3536 item = btrfs_item_nr(leaf, slot + i);
3537 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3538 data_end -= data_size[i];
3539 btrfs_set_item_size(leaf, item, data_size[i]);
3541 btrfs_set_header_nritems(leaf, nritems + nr);
3542 btrfs_mark_buffer_dirty(leaf);
3544 ret = 0;
3545 if (slot == 0) {
3546 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3547 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3550 if (btrfs_leaf_free_space(root, leaf) < 0) {
3551 btrfs_print_leaf(root, leaf);
3552 BUG();
3554 out:
3555 if (!ret)
3556 ret = nr;
3557 return ret;
3561 * this is a helper for btrfs_insert_empty_items, the main goal here is
3562 * to save stack depth by doing the bulk of the work in a function
3563 * that doesn't call btrfs_search_slot
3565 int setup_items_for_insert(struct btrfs_trans_handle *trans,
3566 struct btrfs_root *root, struct btrfs_path *path,
3567 struct btrfs_key *cpu_key, u32 *data_size,
3568 u32 total_data, u32 total_size, int nr)
3570 struct btrfs_item *item;
3571 int i;
3572 u32 nritems;
3573 unsigned int data_end;
3574 struct btrfs_disk_key disk_key;
3575 int ret;
3576 struct extent_buffer *leaf;
3577 int slot;
3579 leaf = path->nodes[0];
3580 slot = path->slots[0];
3582 nritems = btrfs_header_nritems(leaf);
3583 data_end = leaf_data_end(root, leaf);
3585 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3586 btrfs_print_leaf(root, leaf);
3587 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3588 total_size, btrfs_leaf_free_space(root, leaf));
3589 BUG();
3592 if (slot != nritems) {
3593 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3595 if (old_data < data_end) {
3596 btrfs_print_leaf(root, leaf);
3597 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3598 slot, old_data, data_end);
3599 BUG_ON(1);
3602 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3604 /* first correct the data pointers */
3605 for (i = slot; i < nritems; i++) {
3606 u32 ioff;
3608 item = btrfs_item_nr(leaf, i);
3609 ioff = btrfs_item_offset(leaf, item);
3610 btrfs_set_item_offset(leaf, item, ioff - total_data);
3612 /* shift the items */
3613 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3614 btrfs_item_nr_offset(slot),
3615 (nritems - slot) * sizeof(struct btrfs_item));
3617 /* shift the data */
3618 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3619 data_end - total_data, btrfs_leaf_data(leaf) +
3620 data_end, old_data - data_end);
3621 data_end = old_data;
3624 /* setup the item for the new data */
3625 for (i = 0; i < nr; i++) {
3626 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3627 btrfs_set_item_key(leaf, &disk_key, slot + i);
3628 item = btrfs_item_nr(leaf, slot + i);
3629 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3630 data_end -= data_size[i];
3631 btrfs_set_item_size(leaf, item, data_size[i]);
3634 btrfs_set_header_nritems(leaf, nritems + nr);
3636 ret = 0;
3637 if (slot == 0) {
3638 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3639 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3641 btrfs_unlock_up_safe(path, 1);
3642 btrfs_mark_buffer_dirty(leaf);
3644 if (btrfs_leaf_free_space(root, leaf) < 0) {
3645 btrfs_print_leaf(root, leaf);
3646 BUG();
3648 return ret;
3652 * Given a key and some data, insert items into the tree.
3653 * This does all the path init required, making room in the tree if needed.
3655 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3656 struct btrfs_root *root,
3657 struct btrfs_path *path,
3658 struct btrfs_key *cpu_key, u32 *data_size,
3659 int nr)
3661 int ret = 0;
3662 int slot;
3663 int i;
3664 u32 total_size = 0;
3665 u32 total_data = 0;
3667 for (i = 0; i < nr; i++)
3668 total_data += data_size[i];
3670 total_size = total_data + (nr * sizeof(struct btrfs_item));
3671 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3672 if (ret == 0)
3673 return -EEXIST;
3674 if (ret < 0)
3675 goto out;
3677 slot = path->slots[0];
3678 BUG_ON(slot < 0);
3680 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3681 total_data, total_size, nr);
3683 out:
3684 return ret;
3688 * Given a key and some data, insert an item into the tree.
3689 * This does all the path init required, making room in the tree if needed.
3691 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3692 *root, struct btrfs_key *cpu_key, void *data, u32
3693 data_size)
3695 int ret = 0;
3696 struct btrfs_path *path;
3697 struct extent_buffer *leaf;
3698 unsigned long ptr;
3700 path = btrfs_alloc_path();
3701 if (!path)
3702 return -ENOMEM;
3703 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3704 if (!ret) {
3705 leaf = path->nodes[0];
3706 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3707 write_extent_buffer(leaf, data, ptr, data_size);
3708 btrfs_mark_buffer_dirty(leaf);
3710 btrfs_free_path(path);
3711 return ret;
3715 * delete the pointer from a given node.
3717 * the tree should have been previously balanced so the deletion does not
3718 * empty a node.
3720 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3721 struct btrfs_path *path, int level, int slot)
3723 struct extent_buffer *parent = path->nodes[level];
3724 u32 nritems;
3725 int ret = 0;
3726 int wret;
3728 nritems = btrfs_header_nritems(parent);
3729 if (slot != nritems - 1) {
3730 memmove_extent_buffer(parent,
3731 btrfs_node_key_ptr_offset(slot),
3732 btrfs_node_key_ptr_offset(slot + 1),
3733 sizeof(struct btrfs_key_ptr) *
3734 (nritems - slot - 1));
3736 nritems--;
3737 btrfs_set_header_nritems(parent, nritems);
3738 if (nritems == 0 && parent == root->node) {
3739 BUG_ON(btrfs_header_level(root->node) != 1);
3740 /* just turn the root into a leaf and break */
3741 btrfs_set_header_level(root->node, 0);
3742 } else if (slot == 0) {
3743 struct btrfs_disk_key disk_key;
3745 btrfs_node_key(parent, &disk_key, 0);
3746 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3747 if (wret)
3748 ret = wret;
3750 btrfs_mark_buffer_dirty(parent);
3751 return ret;
3755 * a helper function to delete the leaf pointed to by path->slots[1] and
3756 * path->nodes[1].
3758 * This deletes the pointer in path->nodes[1] and frees the leaf
3759 * block extent. zero is returned if it all worked out, < 0 otherwise.
3761 * The path must have already been setup for deleting the leaf, including
3762 * all the proper balancing. path->nodes[1] must be locked.
3764 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3765 struct btrfs_root *root,
3766 struct btrfs_path *path,
3767 struct extent_buffer *leaf)
3769 int ret;
3771 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3772 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3773 if (ret)
3774 return ret;
3777 * btrfs_free_extent is expensive, we want to make sure we
3778 * aren't holding any locks when we call it
3780 btrfs_unlock_up_safe(path, 0);
3782 root_sub_used(root, leaf->len);
3784 btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
3785 return 0;
3788 * delete the item at the leaf level in path. If that empties
3789 * the leaf, remove it from the tree
3791 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3792 struct btrfs_path *path, int slot, int nr)
3794 struct extent_buffer *leaf;
3795 struct btrfs_item *item;
3796 int last_off;
3797 int dsize = 0;
3798 int ret = 0;
3799 int wret;
3800 int i;
3801 u32 nritems;
3803 leaf = path->nodes[0];
3804 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3806 for (i = 0; i < nr; i++)
3807 dsize += btrfs_item_size_nr(leaf, slot + i);
3809 nritems = btrfs_header_nritems(leaf);
3811 if (slot + nr != nritems) {
3812 int data_end = leaf_data_end(root, leaf);
3814 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3815 data_end + dsize,
3816 btrfs_leaf_data(leaf) + data_end,
3817 last_off - data_end);
3819 for (i = slot + nr; i < nritems; i++) {
3820 u32 ioff;
3822 item = btrfs_item_nr(leaf, i);
3823 ioff = btrfs_item_offset(leaf, item);
3824 btrfs_set_item_offset(leaf, item, ioff + dsize);
3827 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3828 btrfs_item_nr_offset(slot + nr),
3829 sizeof(struct btrfs_item) *
3830 (nritems - slot - nr));
3832 btrfs_set_header_nritems(leaf, nritems - nr);
3833 nritems -= nr;
3835 /* delete the leaf if we've emptied it */
3836 if (nritems == 0) {
3837 if (leaf == root->node) {
3838 btrfs_set_header_level(leaf, 0);
3839 } else {
3840 btrfs_set_path_blocking(path);
3841 clean_tree_block(trans, root, leaf);
3842 ret = btrfs_del_leaf(trans, root, path, leaf);
3843 BUG_ON(ret);
3845 } else {
3846 int used = leaf_space_used(leaf, 0, nritems);
3847 if (slot == 0) {
3848 struct btrfs_disk_key disk_key;
3850 btrfs_item_key(leaf, &disk_key, 0);
3851 wret = fixup_low_keys(trans, root, path,
3852 &disk_key, 1);
3853 if (wret)
3854 ret = wret;
3857 /* delete the leaf if it is mostly empty */
3858 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
3859 /* push_leaf_left fixes the path.
3860 * make sure the path still points to our leaf
3861 * for possible call to del_ptr below
3863 slot = path->slots[1];
3864 extent_buffer_get(leaf);
3866 btrfs_set_path_blocking(path);
3867 wret = push_leaf_left(trans, root, path, 1, 1,
3868 1, (u32)-1);
3869 if (wret < 0 && wret != -ENOSPC)
3870 ret = wret;
3872 if (path->nodes[0] == leaf &&
3873 btrfs_header_nritems(leaf)) {
3874 wret = push_leaf_right(trans, root, path, 1,
3875 1, 1, 0);
3876 if (wret < 0 && wret != -ENOSPC)
3877 ret = wret;
3880 if (btrfs_header_nritems(leaf) == 0) {
3881 path->slots[1] = slot;
3882 ret = btrfs_del_leaf(trans, root, path, leaf);
3883 BUG_ON(ret);
3884 free_extent_buffer(leaf);
3885 } else {
3886 /* if we're still in the path, make sure
3887 * we're dirty. Otherwise, one of the
3888 * push_leaf functions must have already
3889 * dirtied this buffer
3891 if (path->nodes[0] == leaf)
3892 btrfs_mark_buffer_dirty(leaf);
3893 free_extent_buffer(leaf);
3895 } else {
3896 btrfs_mark_buffer_dirty(leaf);
3899 return ret;
3903 * search the tree again to find a leaf with lesser keys
3904 * returns 0 if it found something or 1 if there are no lesser leaves.
3905 * returns < 0 on io errors.
3907 * This may release the path, and so you may lose any locks held at the
3908 * time you call it.
3910 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3912 struct btrfs_key key;
3913 struct btrfs_disk_key found_key;
3914 int ret;
3916 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3918 if (key.offset > 0)
3919 key.offset--;
3920 else if (key.type > 0)
3921 key.type--;
3922 else if (key.objectid > 0)
3923 key.objectid--;
3924 else
3925 return 1;
3927 btrfs_release_path(path);
3928 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3929 if (ret < 0)
3930 return ret;
3931 btrfs_item_key(path->nodes[0], &found_key, 0);
3932 ret = comp_keys(&found_key, &key);
3933 if (ret < 0)
3934 return 0;
3935 return 1;
3939 * A helper function to walk down the tree starting at min_key, and looking
3940 * for nodes or leaves that are either in cache or have a minimum
3941 * transaction id. This is used by the btree defrag code, and tree logging
3943 * This does not cow, but it does stuff the starting key it finds back
3944 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3945 * key and get a writable path.
3947 * This does lock as it descends, and path->keep_locks should be set
3948 * to 1 by the caller.
3950 * This honors path->lowest_level to prevent descent past a given level
3951 * of the tree.
3953 * min_trans indicates the oldest transaction that you are interested
3954 * in walking through. Any nodes or leaves older than min_trans are
3955 * skipped over (without reading them).
3957 * returns zero if something useful was found, < 0 on error and 1 if there
3958 * was nothing in the tree that matched the search criteria.
3960 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
3961 struct btrfs_key *max_key,
3962 struct btrfs_path *path, int cache_only,
3963 u64 min_trans)
3965 struct extent_buffer *cur;
3966 struct btrfs_key found_key;
3967 int slot;
3968 int sret;
3969 u32 nritems;
3970 int level;
3971 int ret = 1;
3973 WARN_ON(!path->keep_locks);
3974 again:
3975 cur = btrfs_read_lock_root_node(root);
3976 level = btrfs_header_level(cur);
3977 WARN_ON(path->nodes[level]);
3978 path->nodes[level] = cur;
3979 path->locks[level] = BTRFS_READ_LOCK;
3981 if (btrfs_header_generation(cur) < min_trans) {
3982 ret = 1;
3983 goto out;
3985 while (1) {
3986 nritems = btrfs_header_nritems(cur);
3987 level = btrfs_header_level(cur);
3988 sret = bin_search(cur, min_key, level, &slot);
3990 /* at the lowest level, we're done, setup the path and exit */
3991 if (level == path->lowest_level) {
3992 if (slot >= nritems)
3993 goto find_next_key;
3994 ret = 0;
3995 path->slots[level] = slot;
3996 btrfs_item_key_to_cpu(cur, &found_key, slot);
3997 goto out;
3999 if (sret && slot > 0)
4000 slot--;
4002 * check this node pointer against the cache_only and
4003 * min_trans parameters. If it isn't in cache or is too
4004 * old, skip to the next one.
4006 while (slot < nritems) {
4007 u64 blockptr;
4008 u64 gen;
4009 struct extent_buffer *tmp;
4010 struct btrfs_disk_key disk_key;
4012 blockptr = btrfs_node_blockptr(cur, slot);
4013 gen = btrfs_node_ptr_generation(cur, slot);
4014 if (gen < min_trans) {
4015 slot++;
4016 continue;
4018 if (!cache_only)
4019 break;
4021 if (max_key) {
4022 btrfs_node_key(cur, &disk_key, slot);
4023 if (comp_keys(&disk_key, max_key) >= 0) {
4024 ret = 1;
4025 goto out;
4029 tmp = btrfs_find_tree_block(root, blockptr,
4030 btrfs_level_size(root, level - 1));
4032 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
4033 free_extent_buffer(tmp);
4034 break;
4036 if (tmp)
4037 free_extent_buffer(tmp);
4038 slot++;
4040 find_next_key:
4042 * we didn't find a candidate key in this node, walk forward
4043 * and find another one
4045 if (slot >= nritems) {
4046 path->slots[level] = slot;
4047 btrfs_set_path_blocking(path);
4048 sret = btrfs_find_next_key(root, path, min_key, level,
4049 cache_only, min_trans);
4050 if (sret == 0) {
4051 btrfs_release_path(path);
4052 goto again;
4053 } else {
4054 goto out;
4057 /* save our key for returning back */
4058 btrfs_node_key_to_cpu(cur, &found_key, slot);
4059 path->slots[level] = slot;
4060 if (level == path->lowest_level) {
4061 ret = 0;
4062 unlock_up(path, level, 1);
4063 goto out;
4065 btrfs_set_path_blocking(path);
4066 cur = read_node_slot(root, cur, slot);
4067 BUG_ON(!cur);
4069 btrfs_tree_read_lock(cur);
4071 path->locks[level - 1] = BTRFS_READ_LOCK;
4072 path->nodes[level - 1] = cur;
4073 unlock_up(path, level, 1);
4074 btrfs_clear_path_blocking(path, NULL, 0);
4076 out:
4077 if (ret == 0)
4078 memcpy(min_key, &found_key, sizeof(found_key));
4079 btrfs_set_path_blocking(path);
4080 return ret;
4084 * this is similar to btrfs_next_leaf, but does not try to preserve
4085 * and fixup the path. It looks for and returns the next key in the
4086 * tree based on the current path and the cache_only and min_trans
4087 * parameters.
4089 * 0 is returned if another key is found, < 0 if there are any errors
4090 * and 1 is returned if there are no higher keys in the tree
4092 * path->keep_locks should be set to 1 on the search made before
4093 * calling this function.
4095 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4096 struct btrfs_key *key, int level,
4097 int cache_only, u64 min_trans)
4099 int slot;
4100 struct extent_buffer *c;
4102 WARN_ON(!path->keep_locks);
4103 while (level < BTRFS_MAX_LEVEL) {
4104 if (!path->nodes[level])
4105 return 1;
4107 slot = path->slots[level] + 1;
4108 c = path->nodes[level];
4109 next:
4110 if (slot >= btrfs_header_nritems(c)) {
4111 int ret;
4112 int orig_lowest;
4113 struct btrfs_key cur_key;
4114 if (level + 1 >= BTRFS_MAX_LEVEL ||
4115 !path->nodes[level + 1])
4116 return 1;
4118 if (path->locks[level + 1]) {
4119 level++;
4120 continue;
4123 slot = btrfs_header_nritems(c) - 1;
4124 if (level == 0)
4125 btrfs_item_key_to_cpu(c, &cur_key, slot);
4126 else
4127 btrfs_node_key_to_cpu(c, &cur_key, slot);
4129 orig_lowest = path->lowest_level;
4130 btrfs_release_path(path);
4131 path->lowest_level = level;
4132 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4133 0, 0);
4134 path->lowest_level = orig_lowest;
4135 if (ret < 0)
4136 return ret;
4138 c = path->nodes[level];
4139 slot = path->slots[level];
4140 if (ret == 0)
4141 slot++;
4142 goto next;
4145 if (level == 0)
4146 btrfs_item_key_to_cpu(c, key, slot);
4147 else {
4148 u64 blockptr = btrfs_node_blockptr(c, slot);
4149 u64 gen = btrfs_node_ptr_generation(c, slot);
4151 if (cache_only) {
4152 struct extent_buffer *cur;
4153 cur = btrfs_find_tree_block(root, blockptr,
4154 btrfs_level_size(root, level - 1));
4155 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4156 slot++;
4157 if (cur)
4158 free_extent_buffer(cur);
4159 goto next;
4161 free_extent_buffer(cur);
4163 if (gen < min_trans) {
4164 slot++;
4165 goto next;
4167 btrfs_node_key_to_cpu(c, key, slot);
4169 return 0;
4171 return 1;
4175 * search the tree again to find a leaf with greater keys
4176 * returns 0 if it found something or 1 if there are no greater leaves.
4177 * returns < 0 on io errors.
4179 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4181 int slot;
4182 int level;
4183 struct extent_buffer *c;
4184 struct extent_buffer *next;
4185 struct btrfs_key key;
4186 u32 nritems;
4187 int ret;
4188 int old_spinning = path->leave_spinning;
4189 int next_rw_lock = 0;
4191 nritems = btrfs_header_nritems(path->nodes[0]);
4192 if (nritems == 0)
4193 return 1;
4195 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4196 again:
4197 level = 1;
4198 next = NULL;
4199 next_rw_lock = 0;
4200 btrfs_release_path(path);
4202 path->keep_locks = 1;
4203 path->leave_spinning = 1;
4205 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4206 path->keep_locks = 0;
4208 if (ret < 0)
4209 return ret;
4211 nritems = btrfs_header_nritems(path->nodes[0]);
4213 * by releasing the path above we dropped all our locks. A balance
4214 * could have added more items next to the key that used to be
4215 * at the very end of the block. So, check again here and
4216 * advance the path if there are now more items available.
4218 if (nritems > 0 && path->slots[0] < nritems - 1) {
4219 if (ret == 0)
4220 path->slots[0]++;
4221 ret = 0;
4222 goto done;
4225 while (level < BTRFS_MAX_LEVEL) {
4226 if (!path->nodes[level]) {
4227 ret = 1;
4228 goto done;
4231 slot = path->slots[level] + 1;
4232 c = path->nodes[level];
4233 if (slot >= btrfs_header_nritems(c)) {
4234 level++;
4235 if (level == BTRFS_MAX_LEVEL) {
4236 ret = 1;
4237 goto done;
4239 continue;
4242 if (next) {
4243 btrfs_tree_unlock_rw(next, next_rw_lock);
4244 free_extent_buffer(next);
4247 next = c;
4248 next_rw_lock = path->locks[level];
4249 ret = read_block_for_search(NULL, root, path, &next, level,
4250 slot, &key);
4251 if (ret == -EAGAIN)
4252 goto again;
4254 if (ret < 0) {
4255 btrfs_release_path(path);
4256 goto done;
4259 if (!path->skip_locking) {
4260 ret = btrfs_try_tree_read_lock(next);
4261 if (!ret) {
4262 btrfs_set_path_blocking(path);
4263 btrfs_tree_read_lock(next);
4264 btrfs_clear_path_blocking(path, next,
4265 BTRFS_READ_LOCK);
4267 next_rw_lock = BTRFS_READ_LOCK;
4269 break;
4271 path->slots[level] = slot;
4272 while (1) {
4273 level--;
4274 c = path->nodes[level];
4275 if (path->locks[level])
4276 btrfs_tree_unlock_rw(c, path->locks[level]);
4278 free_extent_buffer(c);
4279 path->nodes[level] = next;
4280 path->slots[level] = 0;
4281 if (!path->skip_locking)
4282 path->locks[level] = next_rw_lock;
4283 if (!level)
4284 break;
4286 ret = read_block_for_search(NULL, root, path, &next, level,
4287 0, &key);
4288 if (ret == -EAGAIN)
4289 goto again;
4291 if (ret < 0) {
4292 btrfs_release_path(path);
4293 goto done;
4296 if (!path->skip_locking) {
4297 ret = btrfs_try_tree_read_lock(next);
4298 if (!ret) {
4299 btrfs_set_path_blocking(path);
4300 btrfs_tree_read_lock(next);
4301 btrfs_clear_path_blocking(path, next,
4302 BTRFS_READ_LOCK);
4304 next_rw_lock = BTRFS_READ_LOCK;
4307 ret = 0;
4308 done:
4309 unlock_up(path, 0, 1);
4310 path->leave_spinning = old_spinning;
4311 if (!old_spinning)
4312 btrfs_set_path_blocking(path);
4314 return ret;
4318 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4319 * searching until it gets past min_objectid or finds an item of 'type'
4321 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4323 int btrfs_previous_item(struct btrfs_root *root,
4324 struct btrfs_path *path, u64 min_objectid,
4325 int type)
4327 struct btrfs_key found_key;
4328 struct extent_buffer *leaf;
4329 u32 nritems;
4330 int ret;
4332 while (1) {
4333 if (path->slots[0] == 0) {
4334 btrfs_set_path_blocking(path);
4335 ret = btrfs_prev_leaf(root, path);
4336 if (ret != 0)
4337 return ret;
4338 } else {
4339 path->slots[0]--;
4341 leaf = path->nodes[0];
4342 nritems = btrfs_header_nritems(leaf);
4343 if (nritems == 0)
4344 return 1;
4345 if (path->slots[0] == nritems)
4346 path->slots[0]--;
4348 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4349 if (found_key.objectid < min_objectid)
4350 break;
4351 if (found_key.type == type)
4352 return 0;
4353 if (found_key.objectid == min_objectid &&
4354 found_key.type < type)
4355 break;
4357 return 1;