2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 #include "transaction.h"
23 #include "print-tree.h"
26 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
27 *root
, struct btrfs_path
*path
, int level
);
28 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_key
*ins_key
,
30 struct btrfs_path
*path
, int data_size
, int extend
);
31 static int push_node_left(struct btrfs_trans_handle
*trans
,
32 struct btrfs_root
*root
, struct extent_buffer
*dst
,
33 struct extent_buffer
*src
, int empty
);
34 static int balance_node_right(struct btrfs_trans_handle
*trans
,
35 struct btrfs_root
*root
,
36 struct extent_buffer
*dst_buf
,
37 struct extent_buffer
*src_buf
);
38 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
39 struct btrfs_path
*path
, int level
, int slot
);
41 struct btrfs_path
*btrfs_alloc_path(void)
43 struct btrfs_path
*path
;
44 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
54 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
57 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
58 if (p
->nodes
[i
] && p
->locks
[i
])
59 btrfs_set_lock_blocking(p
->nodes
[i
]);
64 * reset all the locked nodes in the patch to spinning locks.
66 * held is used to keep lockdep happy, when lockdep is enabled
67 * we set held to a blocking lock before we go around and
68 * retake all the spinlocks in the path. You can safely use NULL
71 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
72 struct extent_buffer
*held
)
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /* lockdep really cares that we take all of these spinlocks
78 * in the right order. If any of the locks in the path are not
79 * currently blocking, it is going to complain. So, make really
80 * really sure by forcing the path to blocking before we clear
84 btrfs_set_lock_blocking(held
);
85 btrfs_set_path_blocking(p
);
88 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
89 if (p
->nodes
[i
] && p
->locks
[i
])
90 btrfs_clear_lock_blocking(p
->nodes
[i
]);
93 #ifdef CONFIG_DEBUG_LOCK_ALLOC
95 btrfs_clear_lock_blocking(held
);
99 /* this also releases the path */
100 void btrfs_free_path(struct btrfs_path
*p
)
102 btrfs_release_path(NULL
, p
);
103 kmem_cache_free(btrfs_path_cachep
, p
);
107 * path release drops references on the extent buffers in the path
108 * and it drops any locks held by this path
110 * It is safe to call this on paths that no locks or extent buffers held.
112 noinline
void btrfs_release_path(struct btrfs_root
*root
, struct btrfs_path
*p
)
116 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
121 btrfs_tree_unlock(p
->nodes
[i
]);
124 free_extent_buffer(p
->nodes
[i
]);
130 * safely gets a reference on the root node of a tree. A lock
131 * is not taken, so a concurrent writer may put a different node
132 * at the root of the tree. See btrfs_lock_root_node for the
135 * The extent buffer returned by this has a reference taken, so
136 * it won't disappear. It may stop being the root of the tree
137 * at any time because there are no locks held.
139 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
141 struct extent_buffer
*eb
;
142 spin_lock(&root
->node_lock
);
144 extent_buffer_get(eb
);
145 spin_unlock(&root
->node_lock
);
149 /* loop around taking references on and locking the root node of the
150 * tree until you end up with a lock on the root. A locked buffer
151 * is returned, with a reference held.
153 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
155 struct extent_buffer
*eb
;
158 eb
= btrfs_root_node(root
);
161 spin_lock(&root
->node_lock
);
162 if (eb
== root
->node
) {
163 spin_unlock(&root
->node_lock
);
166 spin_unlock(&root
->node_lock
);
168 btrfs_tree_unlock(eb
);
169 free_extent_buffer(eb
);
174 /* cowonly root (everything not a reference counted cow subvolume), just get
175 * put onto a simple dirty list. transaction.c walks this to make sure they
176 * get properly updated on disk.
178 static void add_root_to_dirty_list(struct btrfs_root
*root
)
180 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
181 list_add(&root
->dirty_list
,
182 &root
->fs_info
->dirty_cowonly_roots
);
187 * used by snapshot creation to make a copy of a root for a tree with
188 * a given objectid. The buffer with the new root node is returned in
189 * cow_ret, and this func returns zero on success or a negative error code.
191 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
192 struct btrfs_root
*root
,
193 struct extent_buffer
*buf
,
194 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
196 struct extent_buffer
*cow
;
200 struct btrfs_disk_key disk_key
;
202 WARN_ON(root
->ref_cows
&& trans
->transid
!=
203 root
->fs_info
->running_transaction
->transid
);
204 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
206 level
= btrfs_header_level(buf
);
207 nritems
= btrfs_header_nritems(buf
);
209 btrfs_item_key(buf
, &disk_key
, 0);
211 btrfs_node_key(buf
, &disk_key
, 0);
213 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
214 new_root_objectid
, &disk_key
, level
,
219 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
220 btrfs_set_header_bytenr(cow
, cow
->start
);
221 btrfs_set_header_generation(cow
, trans
->transid
);
222 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
223 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
224 BTRFS_HEADER_FLAG_RELOC
);
225 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
226 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
228 btrfs_set_header_owner(cow
, new_root_objectid
);
230 write_extent_buffer(cow
, root
->fs_info
->fsid
,
231 (unsigned long)btrfs_header_fsid(cow
),
234 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
235 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
236 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
238 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
243 btrfs_mark_buffer_dirty(cow
);
249 * check if the tree block can be shared by multiple trees
251 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
252 struct extent_buffer
*buf
)
255 * Tree blocks not in refernece counted trees and tree roots
256 * are never shared. If a block was allocated after the last
257 * snapshot and the block was not allocated by tree relocation,
258 * we know the block is not shared.
260 if (root
->ref_cows
&&
261 buf
!= root
->node
&& buf
!= root
->commit_root
&&
262 (btrfs_header_generation(buf
) <=
263 btrfs_root_last_snapshot(&root
->root_item
) ||
264 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
266 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
267 if (root
->ref_cows
&&
268 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
274 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
275 struct btrfs_root
*root
,
276 struct extent_buffer
*buf
,
277 struct extent_buffer
*cow
)
286 * Backrefs update rules:
288 * Always use full backrefs for extent pointers in tree block
289 * allocated by tree relocation.
291 * If a shared tree block is no longer referenced by its owner
292 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
293 * use full backrefs for extent pointers in tree block.
295 * If a tree block is been relocating
296 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
297 * use full backrefs for extent pointers in tree block.
298 * The reason for this is some operations (such as drop tree)
299 * are only allowed for blocks use full backrefs.
302 if (btrfs_block_can_be_shared(root
, buf
)) {
303 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
304 buf
->len
, &refs
, &flags
);
309 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
310 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
311 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
316 owner
= btrfs_header_owner(buf
);
317 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
318 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
321 if ((owner
== root
->root_key
.objectid
||
322 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
323 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
324 ret
= btrfs_inc_ref(trans
, root
, buf
, 1);
327 if (root
->root_key
.objectid
==
328 BTRFS_TREE_RELOC_OBJECTID
) {
329 ret
= btrfs_dec_ref(trans
, root
, buf
, 0);
331 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
334 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
337 if (root
->root_key
.objectid
==
338 BTRFS_TREE_RELOC_OBJECTID
)
339 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
341 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
344 if (new_flags
!= 0) {
345 ret
= btrfs_set_disk_extent_flags(trans
, root
,
352 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
353 if (root
->root_key
.objectid
==
354 BTRFS_TREE_RELOC_OBJECTID
)
355 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
357 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
359 ret
= btrfs_dec_ref(trans
, root
, buf
, 1);
362 clean_tree_block(trans
, root
, buf
);
368 * does the dirty work in cow of a single block. The parent block (if
369 * supplied) is updated to point to the new cow copy. The new buffer is marked
370 * dirty and returned locked. If you modify the block it needs to be marked
373 * search_start -- an allocation hint for the new block
375 * empty_size -- a hint that you plan on doing more cow. This is the size in
376 * bytes the allocator should try to find free next to the block it returns.
377 * This is just a hint and may be ignored by the allocator.
379 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
380 struct btrfs_root
*root
,
381 struct extent_buffer
*buf
,
382 struct extent_buffer
*parent
, int parent_slot
,
383 struct extent_buffer
**cow_ret
,
384 u64 search_start
, u64 empty_size
)
386 struct btrfs_disk_key disk_key
;
387 struct extent_buffer
*cow
;
395 btrfs_assert_tree_locked(buf
);
397 WARN_ON(root
->ref_cows
&& trans
->transid
!=
398 root
->fs_info
->running_transaction
->transid
);
399 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
401 level
= btrfs_header_level(buf
);
404 btrfs_item_key(buf
, &disk_key
, 0);
406 btrfs_node_key(buf
, &disk_key
, 0);
408 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
410 parent_start
= parent
->start
;
416 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
417 root
->root_key
.objectid
, &disk_key
,
418 level
, search_start
, empty_size
);
422 /* cow is set to blocking by btrfs_init_new_buffer */
424 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
425 btrfs_set_header_bytenr(cow
, cow
->start
);
426 btrfs_set_header_generation(cow
, trans
->transid
);
427 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
428 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
429 BTRFS_HEADER_FLAG_RELOC
);
430 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
431 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
433 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
435 write_extent_buffer(cow
, root
->fs_info
->fsid
,
436 (unsigned long)btrfs_header_fsid(cow
),
439 update_ref_for_cow(trans
, root
, buf
, cow
);
441 if (buf
== root
->node
) {
442 WARN_ON(parent
&& parent
!= buf
);
443 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
444 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
445 parent_start
= buf
->start
;
449 spin_lock(&root
->node_lock
);
451 extent_buffer_get(cow
);
452 spin_unlock(&root
->node_lock
);
454 btrfs_free_extent(trans
, root
, buf
->start
, buf
->len
,
455 parent_start
, root
->root_key
.objectid
,
457 free_extent_buffer(buf
);
458 add_root_to_dirty_list(root
);
460 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
461 parent_start
= parent
->start
;
465 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
466 btrfs_set_node_blockptr(parent
, parent_slot
,
468 btrfs_set_node_ptr_generation(parent
, parent_slot
,
470 btrfs_mark_buffer_dirty(parent
);
471 btrfs_free_extent(trans
, root
, buf
->start
, buf
->len
,
472 parent_start
, root
->root_key
.objectid
,
476 btrfs_tree_unlock(buf
);
477 free_extent_buffer(buf
);
478 btrfs_mark_buffer_dirty(cow
);
483 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
484 struct btrfs_root
*root
,
485 struct extent_buffer
*buf
)
487 if (btrfs_header_generation(buf
) == trans
->transid
&&
488 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
489 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
490 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
496 * cows a single block, see __btrfs_cow_block for the real work.
497 * This version of it has extra checks so that a block isn't cow'd more than
498 * once per transaction, as long as it hasn't been written yet
500 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
501 struct btrfs_root
*root
, struct extent_buffer
*buf
,
502 struct extent_buffer
*parent
, int parent_slot
,
503 struct extent_buffer
**cow_ret
)
508 if (trans
->transaction
!= root
->fs_info
->running_transaction
) {
509 printk(KERN_CRIT
"trans %llu running %llu\n",
510 (unsigned long long)trans
->transid
,
512 root
->fs_info
->running_transaction
->transid
);
515 if (trans
->transid
!= root
->fs_info
->generation
) {
516 printk(KERN_CRIT
"trans %llu running %llu\n",
517 (unsigned long long)trans
->transid
,
518 (unsigned long long)root
->fs_info
->generation
);
522 if (!should_cow_block(trans
, root
, buf
)) {
527 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
530 btrfs_set_lock_blocking(parent
);
531 btrfs_set_lock_blocking(buf
);
533 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
534 parent_slot
, cow_ret
, search_start
, 0);
539 * helper function for defrag to decide if two blocks pointed to by a
540 * node are actually close by
542 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
544 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
546 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
552 * compare two keys in a memcmp fashion
554 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
558 btrfs_disk_key_to_cpu(&k1
, disk
);
560 if (k1
.objectid
> k2
->objectid
)
562 if (k1
.objectid
< k2
->objectid
)
564 if (k1
.type
> k2
->type
)
566 if (k1
.type
< k2
->type
)
568 if (k1
.offset
> k2
->offset
)
570 if (k1
.offset
< k2
->offset
)
576 * same as comp_keys only with two btrfs_key's
578 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
580 if (k1
->objectid
> k2
->objectid
)
582 if (k1
->objectid
< k2
->objectid
)
584 if (k1
->type
> k2
->type
)
586 if (k1
->type
< k2
->type
)
588 if (k1
->offset
> k2
->offset
)
590 if (k1
->offset
< k2
->offset
)
596 * this is used by the defrag code to go through all the
597 * leaves pointed to by a node and reallocate them so that
598 * disk order is close to key order
600 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
601 struct btrfs_root
*root
, struct extent_buffer
*parent
,
602 int start_slot
, int cache_only
, u64
*last_ret
,
603 struct btrfs_key
*progress
)
605 struct extent_buffer
*cur
;
608 u64 search_start
= *last_ret
;
618 int progress_passed
= 0;
619 struct btrfs_disk_key disk_key
;
621 parent_level
= btrfs_header_level(parent
);
622 if (cache_only
&& parent_level
!= 1)
625 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
627 if (trans
->transid
!= root
->fs_info
->generation
)
630 parent_nritems
= btrfs_header_nritems(parent
);
631 blocksize
= btrfs_level_size(root
, parent_level
- 1);
632 end_slot
= parent_nritems
;
634 if (parent_nritems
== 1)
637 btrfs_set_lock_blocking(parent
);
639 for (i
= start_slot
; i
< end_slot
; i
++) {
642 if (!parent
->map_token
) {
643 map_extent_buffer(parent
,
644 btrfs_node_key_ptr_offset(i
),
645 sizeof(struct btrfs_key_ptr
),
646 &parent
->map_token
, &parent
->kaddr
,
647 &parent
->map_start
, &parent
->map_len
,
650 btrfs_node_key(parent
, &disk_key
, i
);
651 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
655 blocknr
= btrfs_node_blockptr(parent
, i
);
656 gen
= btrfs_node_ptr_generation(parent
, i
);
658 last_block
= blocknr
;
661 other
= btrfs_node_blockptr(parent
, i
- 1);
662 close
= close_blocks(blocknr
, other
, blocksize
);
664 if (!close
&& i
< end_slot
- 2) {
665 other
= btrfs_node_blockptr(parent
, i
+ 1);
666 close
= close_blocks(blocknr
, other
, blocksize
);
669 last_block
= blocknr
;
672 if (parent
->map_token
) {
673 unmap_extent_buffer(parent
, parent
->map_token
,
675 parent
->map_token
= NULL
;
678 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
680 uptodate
= btrfs_buffer_uptodate(cur
, gen
);
683 if (!cur
|| !uptodate
) {
685 free_extent_buffer(cur
);
689 cur
= read_tree_block(root
, blocknr
,
691 } else if (!uptodate
) {
692 btrfs_read_buffer(cur
, gen
);
695 if (search_start
== 0)
696 search_start
= last_block
;
698 btrfs_tree_lock(cur
);
699 btrfs_set_lock_blocking(cur
);
700 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
703 (end_slot
- i
) * blocksize
));
705 btrfs_tree_unlock(cur
);
706 free_extent_buffer(cur
);
709 search_start
= cur
->start
;
710 last_block
= cur
->start
;
711 *last_ret
= search_start
;
712 btrfs_tree_unlock(cur
);
713 free_extent_buffer(cur
);
715 if (parent
->map_token
) {
716 unmap_extent_buffer(parent
, parent
->map_token
,
718 parent
->map_token
= NULL
;
724 * The leaf data grows from end-to-front in the node.
725 * this returns the address of the start of the last item,
726 * which is the stop of the leaf data stack
728 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
729 struct extent_buffer
*leaf
)
731 u32 nr
= btrfs_header_nritems(leaf
);
733 return BTRFS_LEAF_DATA_SIZE(root
);
734 return btrfs_item_offset_nr(leaf
, nr
- 1);
738 * extra debugging checks to make sure all the items in a key are
739 * well formed and in the proper order
741 static int check_node(struct btrfs_root
*root
, struct btrfs_path
*path
,
744 struct extent_buffer
*parent
= NULL
;
745 struct extent_buffer
*node
= path
->nodes
[level
];
746 struct btrfs_disk_key parent_key
;
747 struct btrfs_disk_key node_key
;
750 struct btrfs_key cpukey
;
751 u32 nritems
= btrfs_header_nritems(node
);
753 if (path
->nodes
[level
+ 1])
754 parent
= path
->nodes
[level
+ 1];
756 slot
= path
->slots
[level
];
757 BUG_ON(nritems
== 0);
759 parent_slot
= path
->slots
[level
+ 1];
760 btrfs_node_key(parent
, &parent_key
, parent_slot
);
761 btrfs_node_key(node
, &node_key
, 0);
762 BUG_ON(memcmp(&parent_key
, &node_key
,
763 sizeof(struct btrfs_disk_key
)));
764 BUG_ON(btrfs_node_blockptr(parent
, parent_slot
) !=
765 btrfs_header_bytenr(node
));
767 BUG_ON(nritems
> BTRFS_NODEPTRS_PER_BLOCK(root
));
769 btrfs_node_key_to_cpu(node
, &cpukey
, slot
- 1);
770 btrfs_node_key(node
, &node_key
, slot
);
771 BUG_ON(comp_keys(&node_key
, &cpukey
) <= 0);
773 if (slot
< nritems
- 1) {
774 btrfs_node_key_to_cpu(node
, &cpukey
, slot
+ 1);
775 btrfs_node_key(node
, &node_key
, slot
);
776 BUG_ON(comp_keys(&node_key
, &cpukey
) >= 0);
782 * extra checking to make sure all the items in a leaf are
783 * well formed and in the proper order
785 static int check_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
788 struct extent_buffer
*leaf
= path
->nodes
[level
];
789 struct extent_buffer
*parent
= NULL
;
791 struct btrfs_key cpukey
;
792 struct btrfs_disk_key parent_key
;
793 struct btrfs_disk_key leaf_key
;
794 int slot
= path
->slots
[0];
796 u32 nritems
= btrfs_header_nritems(leaf
);
798 if (path
->nodes
[level
+ 1])
799 parent
= path
->nodes
[level
+ 1];
805 parent_slot
= path
->slots
[level
+ 1];
806 btrfs_node_key(parent
, &parent_key
, parent_slot
);
807 btrfs_item_key(leaf
, &leaf_key
, 0);
809 BUG_ON(memcmp(&parent_key
, &leaf_key
,
810 sizeof(struct btrfs_disk_key
)));
811 BUG_ON(btrfs_node_blockptr(parent
, parent_slot
) !=
812 btrfs_header_bytenr(leaf
));
814 if (slot
!= 0 && slot
< nritems
- 1) {
815 btrfs_item_key(leaf
, &leaf_key
, slot
);
816 btrfs_item_key_to_cpu(leaf
, &cpukey
, slot
- 1);
817 if (comp_keys(&leaf_key
, &cpukey
) <= 0) {
818 btrfs_print_leaf(root
, leaf
);
819 printk(KERN_CRIT
"slot %d offset bad key\n", slot
);
822 if (btrfs_item_offset_nr(leaf
, slot
- 1) !=
823 btrfs_item_end_nr(leaf
, slot
)) {
824 btrfs_print_leaf(root
, leaf
);
825 printk(KERN_CRIT
"slot %d offset bad\n", slot
);
829 if (slot
< nritems
- 1) {
830 btrfs_item_key(leaf
, &leaf_key
, slot
);
831 btrfs_item_key_to_cpu(leaf
, &cpukey
, slot
+ 1);
832 BUG_ON(comp_keys(&leaf_key
, &cpukey
) >= 0);
833 if (btrfs_item_offset_nr(leaf
, slot
) !=
834 btrfs_item_end_nr(leaf
, slot
+ 1)) {
835 btrfs_print_leaf(root
, leaf
);
836 printk(KERN_CRIT
"slot %d offset bad\n", slot
);
840 BUG_ON(btrfs_item_offset_nr(leaf
, 0) +
841 btrfs_item_size_nr(leaf
, 0) != BTRFS_LEAF_DATA_SIZE(root
));
845 static noinline
int check_block(struct btrfs_root
*root
,
846 struct btrfs_path
*path
, int level
)
850 return check_leaf(root
, path
, level
);
851 return check_node(root
, path
, level
);
855 * search for key in the extent_buffer. The items start at offset p,
856 * and they are item_size apart. There are 'max' items in p.
858 * the slot in the array is returned via slot, and it points to
859 * the place where you would insert key if it is not found in
862 * slot may point to max if the key is bigger than all of the keys
864 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
866 int item_size
, struct btrfs_key
*key
,
873 struct btrfs_disk_key
*tmp
= NULL
;
874 struct btrfs_disk_key unaligned
;
875 unsigned long offset
;
876 char *map_token
= NULL
;
878 unsigned long map_start
= 0;
879 unsigned long map_len
= 0;
883 mid
= (low
+ high
) / 2;
884 offset
= p
+ mid
* item_size
;
886 if (!map_token
|| offset
< map_start
||
887 (offset
+ sizeof(struct btrfs_disk_key
)) >
888 map_start
+ map_len
) {
890 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
894 err
= map_private_extent_buffer(eb
, offset
,
895 sizeof(struct btrfs_disk_key
),
897 &map_start
, &map_len
, KM_USER0
);
900 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
903 read_extent_buffer(eb
, &unaligned
,
904 offset
, sizeof(unaligned
));
909 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
912 ret
= comp_keys(tmp
, key
);
921 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
927 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
932 * simple bin_search frontend that does the right thing for
935 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
936 int level
, int *slot
)
939 return generic_bin_search(eb
,
940 offsetof(struct btrfs_leaf
, items
),
941 sizeof(struct btrfs_item
),
942 key
, btrfs_header_nritems(eb
),
945 return generic_bin_search(eb
,
946 offsetof(struct btrfs_node
, ptrs
),
947 sizeof(struct btrfs_key_ptr
),
948 key
, btrfs_header_nritems(eb
),
954 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
955 int level
, int *slot
)
957 return bin_search(eb
, key
, level
, slot
);
960 /* given a node and slot number, this reads the blocks it points to. The
961 * extent buffer is returned with a reference taken (but unlocked).
962 * NULL is returned on error.
964 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
965 struct extent_buffer
*parent
, int slot
)
967 int level
= btrfs_header_level(parent
);
970 if (slot
>= btrfs_header_nritems(parent
))
975 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
976 btrfs_level_size(root
, level
- 1),
977 btrfs_node_ptr_generation(parent
, slot
));
981 * node level balancing, used to make sure nodes are in proper order for
982 * item deletion. We balance from the top down, so we have to make sure
983 * that a deletion won't leave an node completely empty later on.
985 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
986 struct btrfs_root
*root
,
987 struct btrfs_path
*path
, int level
)
989 struct extent_buffer
*right
= NULL
;
990 struct extent_buffer
*mid
;
991 struct extent_buffer
*left
= NULL
;
992 struct extent_buffer
*parent
= NULL
;
996 int orig_slot
= path
->slots
[level
];
997 int err_on_enospc
= 0;
1003 mid
= path
->nodes
[level
];
1005 WARN_ON(!path
->locks
[level
]);
1006 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1008 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1010 if (level
< BTRFS_MAX_LEVEL
- 1)
1011 parent
= path
->nodes
[level
+ 1];
1012 pslot
= path
->slots
[level
+ 1];
1015 * deal with the case where there is only one pointer in the root
1016 * by promoting the node below to a root
1019 struct extent_buffer
*child
;
1021 if (btrfs_header_nritems(mid
) != 1)
1024 /* promote the child to a root */
1025 child
= read_node_slot(root
, mid
, 0);
1027 btrfs_tree_lock(child
);
1028 btrfs_set_lock_blocking(child
);
1029 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1032 spin_lock(&root
->node_lock
);
1034 spin_unlock(&root
->node_lock
);
1036 add_root_to_dirty_list(root
);
1037 btrfs_tree_unlock(child
);
1039 path
->locks
[level
] = 0;
1040 path
->nodes
[level
] = NULL
;
1041 clean_tree_block(trans
, root
, mid
);
1042 btrfs_tree_unlock(mid
);
1043 /* once for the path */
1044 free_extent_buffer(mid
);
1045 ret
= btrfs_free_extent(trans
, root
, mid
->start
, mid
->len
,
1046 0, root
->root_key
.objectid
, level
, 1);
1047 /* once for the root ptr */
1048 free_extent_buffer(mid
);
1051 if (btrfs_header_nritems(mid
) >
1052 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
1055 if (btrfs_header_nritems(mid
) > 2)
1058 if (btrfs_header_nritems(mid
) < 2)
1061 left
= read_node_slot(root
, parent
, pslot
- 1);
1063 btrfs_tree_lock(left
);
1064 btrfs_set_lock_blocking(left
);
1065 wret
= btrfs_cow_block(trans
, root
, left
,
1066 parent
, pslot
- 1, &left
);
1072 right
= read_node_slot(root
, parent
, pslot
+ 1);
1074 btrfs_tree_lock(right
);
1075 btrfs_set_lock_blocking(right
);
1076 wret
= btrfs_cow_block(trans
, root
, right
,
1077 parent
, pslot
+ 1, &right
);
1084 /* first, try to make some room in the middle buffer */
1086 orig_slot
+= btrfs_header_nritems(left
);
1087 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1090 if (btrfs_header_nritems(mid
) < 2)
1095 * then try to empty the right most buffer into the middle
1098 wret
= push_node_left(trans
, root
, mid
, right
, 1);
1099 if (wret
< 0 && wret
!= -ENOSPC
)
1101 if (btrfs_header_nritems(right
) == 0) {
1102 u64 bytenr
= right
->start
;
1103 u32 blocksize
= right
->len
;
1105 clean_tree_block(trans
, root
, right
);
1106 btrfs_tree_unlock(right
);
1107 free_extent_buffer(right
);
1109 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
+
1113 wret
= btrfs_free_extent(trans
, root
, bytenr
,
1115 root
->root_key
.objectid
,
1120 struct btrfs_disk_key right_key
;
1121 btrfs_node_key(right
, &right_key
, 0);
1122 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1123 btrfs_mark_buffer_dirty(parent
);
1126 if (btrfs_header_nritems(mid
) == 1) {
1128 * we're not allowed to leave a node with one item in the
1129 * tree during a delete. A deletion from lower in the tree
1130 * could try to delete the only pointer in this node.
1131 * So, pull some keys from the left.
1132 * There has to be a left pointer at this point because
1133 * otherwise we would have pulled some pointers from the
1137 wret
= balance_node_right(trans
, root
, mid
, left
);
1143 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1149 if (btrfs_header_nritems(mid
) == 0) {
1150 /* we've managed to empty the middle node, drop it */
1151 u64 bytenr
= mid
->start
;
1152 u32 blocksize
= mid
->len
;
1154 clean_tree_block(trans
, root
, mid
);
1155 btrfs_tree_unlock(mid
);
1156 free_extent_buffer(mid
);
1158 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
);
1161 wret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
1162 0, root
->root_key
.objectid
,
1167 /* update the parent key to reflect our changes */
1168 struct btrfs_disk_key mid_key
;
1169 btrfs_node_key(mid
, &mid_key
, 0);
1170 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1171 btrfs_mark_buffer_dirty(parent
);
1174 /* update the path */
1176 if (btrfs_header_nritems(left
) > orig_slot
) {
1177 extent_buffer_get(left
);
1178 /* left was locked after cow */
1179 path
->nodes
[level
] = left
;
1180 path
->slots
[level
+ 1] -= 1;
1181 path
->slots
[level
] = orig_slot
;
1183 btrfs_tree_unlock(mid
);
1184 free_extent_buffer(mid
);
1187 orig_slot
-= btrfs_header_nritems(left
);
1188 path
->slots
[level
] = orig_slot
;
1191 /* double check we haven't messed things up */
1192 check_block(root
, path
, level
);
1194 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1198 btrfs_tree_unlock(right
);
1199 free_extent_buffer(right
);
1202 if (path
->nodes
[level
] != left
)
1203 btrfs_tree_unlock(left
);
1204 free_extent_buffer(left
);
1209 /* Node balancing for insertion. Here we only split or push nodes around
1210 * when they are completely full. This is also done top down, so we
1211 * have to be pessimistic.
1213 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1214 struct btrfs_root
*root
,
1215 struct btrfs_path
*path
, int level
)
1217 struct extent_buffer
*right
= NULL
;
1218 struct extent_buffer
*mid
;
1219 struct extent_buffer
*left
= NULL
;
1220 struct extent_buffer
*parent
= NULL
;
1224 int orig_slot
= path
->slots
[level
];
1230 mid
= path
->nodes
[level
];
1231 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1232 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1234 if (level
< BTRFS_MAX_LEVEL
- 1)
1235 parent
= path
->nodes
[level
+ 1];
1236 pslot
= path
->slots
[level
+ 1];
1241 left
= read_node_slot(root
, parent
, pslot
- 1);
1243 /* first, try to make some room in the middle buffer */
1247 btrfs_tree_lock(left
);
1248 btrfs_set_lock_blocking(left
);
1250 left_nr
= btrfs_header_nritems(left
);
1251 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1254 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1259 wret
= push_node_left(trans
, root
,
1266 struct btrfs_disk_key disk_key
;
1267 orig_slot
+= left_nr
;
1268 btrfs_node_key(mid
, &disk_key
, 0);
1269 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1270 btrfs_mark_buffer_dirty(parent
);
1271 if (btrfs_header_nritems(left
) > orig_slot
) {
1272 path
->nodes
[level
] = left
;
1273 path
->slots
[level
+ 1] -= 1;
1274 path
->slots
[level
] = orig_slot
;
1275 btrfs_tree_unlock(mid
);
1276 free_extent_buffer(mid
);
1279 btrfs_header_nritems(left
);
1280 path
->slots
[level
] = orig_slot
;
1281 btrfs_tree_unlock(left
);
1282 free_extent_buffer(left
);
1286 btrfs_tree_unlock(left
);
1287 free_extent_buffer(left
);
1289 right
= read_node_slot(root
, parent
, pslot
+ 1);
1292 * then try to empty the right most buffer into the middle
1297 btrfs_tree_lock(right
);
1298 btrfs_set_lock_blocking(right
);
1300 right_nr
= btrfs_header_nritems(right
);
1301 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1304 ret
= btrfs_cow_block(trans
, root
, right
,
1310 wret
= balance_node_right(trans
, root
,
1317 struct btrfs_disk_key disk_key
;
1319 btrfs_node_key(right
, &disk_key
, 0);
1320 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
1321 btrfs_mark_buffer_dirty(parent
);
1323 if (btrfs_header_nritems(mid
) <= orig_slot
) {
1324 path
->nodes
[level
] = right
;
1325 path
->slots
[level
+ 1] += 1;
1326 path
->slots
[level
] = orig_slot
-
1327 btrfs_header_nritems(mid
);
1328 btrfs_tree_unlock(mid
);
1329 free_extent_buffer(mid
);
1331 btrfs_tree_unlock(right
);
1332 free_extent_buffer(right
);
1336 btrfs_tree_unlock(right
);
1337 free_extent_buffer(right
);
1343 * readahead one full node of leaves, finding things that are close
1344 * to the block in 'slot', and triggering ra on them.
1346 static void reada_for_search(struct btrfs_root
*root
,
1347 struct btrfs_path
*path
,
1348 int level
, int slot
, u64 objectid
)
1350 struct extent_buffer
*node
;
1351 struct btrfs_disk_key disk_key
;
1356 int direction
= path
->reada
;
1357 struct extent_buffer
*eb
;
1365 if (!path
->nodes
[level
])
1368 node
= path
->nodes
[level
];
1370 search
= btrfs_node_blockptr(node
, slot
);
1371 blocksize
= btrfs_level_size(root
, level
- 1);
1372 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
1374 free_extent_buffer(eb
);
1380 nritems
= btrfs_header_nritems(node
);
1383 if (direction
< 0) {
1387 } else if (direction
> 0) {
1392 if (path
->reada
< 0 && objectid
) {
1393 btrfs_node_key(node
, &disk_key
, nr
);
1394 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
1397 search
= btrfs_node_blockptr(node
, nr
);
1398 if ((search
<= target
&& target
- search
<= 65536) ||
1399 (search
> target
&& search
- target
<= 65536)) {
1400 readahead_tree_block(root
, search
, blocksize
,
1401 btrfs_node_ptr_generation(node
, nr
));
1405 if ((nread
> 65536 || nscan
> 32))
1411 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1414 static noinline
int reada_for_balance(struct btrfs_root
*root
,
1415 struct btrfs_path
*path
, int level
)
1419 struct extent_buffer
*parent
;
1420 struct extent_buffer
*eb
;
1427 parent
= path
->nodes
[level
+ 1];
1431 nritems
= btrfs_header_nritems(parent
);
1432 slot
= path
->slots
[level
+ 1];
1433 blocksize
= btrfs_level_size(root
, level
);
1436 block1
= btrfs_node_blockptr(parent
, slot
- 1);
1437 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
1438 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
1439 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1441 free_extent_buffer(eb
);
1443 if (slot
+ 1 < nritems
) {
1444 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
1445 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
1446 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
1447 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1449 free_extent_buffer(eb
);
1451 if (block1
|| block2
) {
1454 /* release the whole path */
1455 btrfs_release_path(root
, path
);
1457 /* read the blocks */
1459 readahead_tree_block(root
, block1
, blocksize
, 0);
1461 readahead_tree_block(root
, block2
, blocksize
, 0);
1464 eb
= read_tree_block(root
, block1
, blocksize
, 0);
1465 free_extent_buffer(eb
);
1468 eb
= read_tree_block(root
, block2
, blocksize
, 0);
1469 free_extent_buffer(eb
);
1477 * when we walk down the tree, it is usually safe to unlock the higher layers
1478 * in the tree. The exceptions are when our path goes through slot 0, because
1479 * operations on the tree might require changing key pointers higher up in the
1482 * callers might also have set path->keep_locks, which tells this code to keep
1483 * the lock if the path points to the last slot in the block. This is part of
1484 * walking through the tree, and selecting the next slot in the higher block.
1486 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1487 * if lowest_unlock is 1, level 0 won't be unlocked
1489 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
1493 int skip_level
= level
;
1495 struct extent_buffer
*t
;
1497 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1498 if (!path
->nodes
[i
])
1500 if (!path
->locks
[i
])
1502 if (!no_skips
&& path
->slots
[i
] == 0) {
1506 if (!no_skips
&& path
->keep_locks
) {
1509 nritems
= btrfs_header_nritems(t
);
1510 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
1515 if (skip_level
< i
&& i
>= lowest_unlock
)
1519 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
1520 btrfs_tree_unlock(t
);
1527 * This releases any locks held in the path starting at level and
1528 * going all the way up to the root.
1530 * btrfs_search_slot will keep the lock held on higher nodes in a few
1531 * corner cases, such as COW of the block at slot zero in the node. This
1532 * ignores those rules, and it should only be called when there are no
1533 * more updates to be done higher up in the tree.
1535 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
1539 if (path
->keep_locks
)
1542 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1543 if (!path
->nodes
[i
])
1545 if (!path
->locks
[i
])
1547 btrfs_tree_unlock(path
->nodes
[i
]);
1553 * helper function for btrfs_search_slot. The goal is to find a block
1554 * in cache without setting the path to blocking. If we find the block
1555 * we return zero and the path is unchanged.
1557 * If we can't find the block, we set the path blocking and do some
1558 * reada. -EAGAIN is returned and the search must be repeated.
1561 read_block_for_search(struct btrfs_trans_handle
*trans
,
1562 struct btrfs_root
*root
, struct btrfs_path
*p
,
1563 struct extent_buffer
**eb_ret
, int level
, int slot
,
1564 struct btrfs_key
*key
)
1569 struct extent_buffer
*b
= *eb_ret
;
1570 struct extent_buffer
*tmp
;
1573 blocknr
= btrfs_node_blockptr(b
, slot
);
1574 gen
= btrfs_node_ptr_generation(b
, slot
);
1575 blocksize
= btrfs_level_size(root
, level
- 1);
1577 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1578 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
1580 * we found an up to date block without sleeping, return
1588 * reduce lock contention at high levels
1589 * of the btree by dropping locks before
1590 * we read. Don't release the lock on the current
1591 * level because we need to walk this node to figure
1592 * out which blocks to read.
1594 btrfs_unlock_up_safe(p
, level
+ 1);
1595 btrfs_set_path_blocking(p
);
1598 free_extent_buffer(tmp
);
1600 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
1602 btrfs_release_path(NULL
, p
);
1605 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
1608 * If the read above didn't mark this buffer up to date,
1609 * it will never end up being up to date. Set ret to EIO now
1610 * and give up so that our caller doesn't loop forever
1613 if (!btrfs_buffer_uptodate(tmp
, 0))
1615 free_extent_buffer(tmp
);
1621 * helper function for btrfs_search_slot. This does all of the checks
1622 * for node-level blocks and does any balancing required based on
1625 * If no extra work was required, zero is returned. If we had to
1626 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1630 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
1631 struct btrfs_root
*root
, struct btrfs_path
*p
,
1632 struct extent_buffer
*b
, int level
, int ins_len
)
1635 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
1636 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
1639 sret
= reada_for_balance(root
, p
, level
);
1643 btrfs_set_path_blocking(p
);
1644 sret
= split_node(trans
, root
, p
, level
);
1645 btrfs_clear_path_blocking(p
, NULL
);
1652 b
= p
->nodes
[level
];
1653 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
1654 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
1657 sret
= reada_for_balance(root
, p
, level
);
1661 btrfs_set_path_blocking(p
);
1662 sret
= balance_level(trans
, root
, p
, level
);
1663 btrfs_clear_path_blocking(p
, NULL
);
1669 b
= p
->nodes
[level
];
1671 btrfs_release_path(NULL
, p
);
1674 BUG_ON(btrfs_header_nritems(b
) == 1);
1685 * look for key in the tree. path is filled in with nodes along the way
1686 * if key is found, we return zero and you can find the item in the leaf
1687 * level of the path (level 0)
1689 * If the key isn't found, the path points to the slot where it should
1690 * be inserted, and 1 is returned. If there are other errors during the
1691 * search a negative error number is returned.
1693 * if ins_len > 0, nodes and leaves will be split as we walk down the
1694 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1697 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
1698 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
1701 struct extent_buffer
*b
;
1705 int lowest_unlock
= 1;
1706 u8 lowest_level
= 0;
1708 lowest_level
= p
->lowest_level
;
1709 WARN_ON(lowest_level
&& ins_len
> 0);
1710 WARN_ON(p
->nodes
[0] != NULL
);
1716 if (p
->search_commit_root
) {
1717 b
= root
->commit_root
;
1718 extent_buffer_get(b
);
1719 if (!p
->skip_locking
)
1722 if (p
->skip_locking
)
1723 b
= btrfs_root_node(root
);
1725 b
= btrfs_lock_root_node(root
);
1729 level
= btrfs_header_level(b
);
1732 * setup the path here so we can release it under lock
1733 * contention with the cow code
1735 p
->nodes
[level
] = b
;
1736 if (!p
->skip_locking
)
1737 p
->locks
[level
] = 1;
1743 * if we don't really need to cow this block
1744 * then we don't want to set the path blocking,
1745 * so we test it here
1747 if (!should_cow_block(trans
, root
, b
))
1750 btrfs_set_path_blocking(p
);
1752 wret
= btrfs_cow_block(trans
, root
, b
,
1753 p
->nodes
[level
+ 1],
1754 p
->slots
[level
+ 1], &b
);
1756 free_extent_buffer(b
);
1762 BUG_ON(!cow
&& ins_len
);
1763 if (level
!= btrfs_header_level(b
))
1765 level
= btrfs_header_level(b
);
1767 p
->nodes
[level
] = b
;
1768 if (!p
->skip_locking
)
1769 p
->locks
[level
] = 1;
1771 btrfs_clear_path_blocking(p
, NULL
);
1774 * we have a lock on b and as long as we aren't changing
1775 * the tree, there is no way to for the items in b to change.
1776 * It is safe to drop the lock on our parent before we
1777 * go through the expensive btree search on b.
1779 * If cow is true, then we might be changing slot zero,
1780 * which may require changing the parent. So, we can't
1781 * drop the lock until after we know which slot we're
1785 btrfs_unlock_up_safe(p
, level
+ 1);
1787 ret
= check_block(root
, p
, level
);
1793 ret
= bin_search(b
, key
, level
, &slot
);
1796 if (ret
&& slot
> 0)
1798 p
->slots
[level
] = slot
;
1799 ret
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
1805 b
= p
->nodes
[level
];
1806 slot
= p
->slots
[level
];
1808 unlock_up(p
, level
, lowest_unlock
);
1810 /* this is only true while dropping a snapshot */
1811 if (level
== lowest_level
) {
1816 ret
= read_block_for_search(trans
, root
, p
,
1817 &b
, level
, slot
, key
);
1824 if (!p
->skip_locking
) {
1827 btrfs_clear_path_blocking(p
, NULL
);
1828 lret
= btrfs_try_spin_lock(b
);
1831 btrfs_set_path_blocking(p
);
1833 btrfs_clear_path_blocking(p
, b
);
1837 p
->slots
[level
] = slot
;
1839 btrfs_leaf_free_space(root
, b
) < ins_len
) {
1842 btrfs_set_path_blocking(p
);
1843 sret
= split_leaf(trans
, root
, key
,
1844 p
, ins_len
, ret
== 0);
1845 btrfs_clear_path_blocking(p
, NULL
);
1853 if (!p
->search_for_split
)
1854 unlock_up(p
, level
, lowest_unlock
);
1861 * we don't really know what they plan on doing with the path
1862 * from here on, so for now just mark it as blocking
1864 if (!p
->leave_spinning
)
1865 btrfs_set_path_blocking(p
);
1867 btrfs_release_path(root
, p
);
1872 * adjust the pointers going up the tree, starting at level
1873 * making sure the right key of each node is points to 'key'.
1874 * This is used after shifting pointers to the left, so it stops
1875 * fixing up pointers when a given leaf/node is not in slot 0 of the
1878 * If this fails to write a tree block, it returns -1, but continues
1879 * fixing up the blocks in ram so the tree is consistent.
1881 static int fixup_low_keys(struct btrfs_trans_handle
*trans
,
1882 struct btrfs_root
*root
, struct btrfs_path
*path
,
1883 struct btrfs_disk_key
*key
, int level
)
1887 struct extent_buffer
*t
;
1889 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1890 int tslot
= path
->slots
[i
];
1891 if (!path
->nodes
[i
])
1894 btrfs_set_node_key(t
, key
, tslot
);
1895 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
1905 * This function isn't completely safe. It's the caller's responsibility
1906 * that the new key won't break the order
1908 int btrfs_set_item_key_safe(struct btrfs_trans_handle
*trans
,
1909 struct btrfs_root
*root
, struct btrfs_path
*path
,
1910 struct btrfs_key
*new_key
)
1912 struct btrfs_disk_key disk_key
;
1913 struct extent_buffer
*eb
;
1916 eb
= path
->nodes
[0];
1917 slot
= path
->slots
[0];
1919 btrfs_item_key(eb
, &disk_key
, slot
- 1);
1920 if (comp_keys(&disk_key
, new_key
) >= 0)
1923 if (slot
< btrfs_header_nritems(eb
) - 1) {
1924 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
1925 if (comp_keys(&disk_key
, new_key
) <= 0)
1929 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
1930 btrfs_set_item_key(eb
, &disk_key
, slot
);
1931 btrfs_mark_buffer_dirty(eb
);
1933 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
1938 * try to push data from one node into the next node left in the
1941 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1942 * error, and > 0 if there was no room in the left hand block.
1944 static int push_node_left(struct btrfs_trans_handle
*trans
,
1945 struct btrfs_root
*root
, struct extent_buffer
*dst
,
1946 struct extent_buffer
*src
, int empty
)
1953 src_nritems
= btrfs_header_nritems(src
);
1954 dst_nritems
= btrfs_header_nritems(dst
);
1955 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
1956 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
1957 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
1959 if (!empty
&& src_nritems
<= 8)
1962 if (push_items
<= 0)
1966 push_items
= min(src_nritems
, push_items
);
1967 if (push_items
< src_nritems
) {
1968 /* leave at least 8 pointers in the node if
1969 * we aren't going to empty it
1971 if (src_nritems
- push_items
< 8) {
1972 if (push_items
<= 8)
1978 push_items
= min(src_nritems
- 8, push_items
);
1980 copy_extent_buffer(dst
, src
,
1981 btrfs_node_key_ptr_offset(dst_nritems
),
1982 btrfs_node_key_ptr_offset(0),
1983 push_items
* sizeof(struct btrfs_key_ptr
));
1985 if (push_items
< src_nritems
) {
1986 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
1987 btrfs_node_key_ptr_offset(push_items
),
1988 (src_nritems
- push_items
) *
1989 sizeof(struct btrfs_key_ptr
));
1991 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
1992 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
1993 btrfs_mark_buffer_dirty(src
);
1994 btrfs_mark_buffer_dirty(dst
);
2000 * try to push data from one node into the next node right in the
2003 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2004 * error, and > 0 if there was no room in the right hand block.
2006 * this will only push up to 1/2 the contents of the left node over
2008 static int balance_node_right(struct btrfs_trans_handle
*trans
,
2009 struct btrfs_root
*root
,
2010 struct extent_buffer
*dst
,
2011 struct extent_buffer
*src
)
2019 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2020 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2022 src_nritems
= btrfs_header_nritems(src
);
2023 dst_nritems
= btrfs_header_nritems(dst
);
2024 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2025 if (push_items
<= 0)
2028 if (src_nritems
< 4)
2031 max_push
= src_nritems
/ 2 + 1;
2032 /* don't try to empty the node */
2033 if (max_push
>= src_nritems
)
2036 if (max_push
< push_items
)
2037 push_items
= max_push
;
2039 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
2040 btrfs_node_key_ptr_offset(0),
2042 sizeof(struct btrfs_key_ptr
));
2044 copy_extent_buffer(dst
, src
,
2045 btrfs_node_key_ptr_offset(0),
2046 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
2047 push_items
* sizeof(struct btrfs_key_ptr
));
2049 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2050 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2052 btrfs_mark_buffer_dirty(src
);
2053 btrfs_mark_buffer_dirty(dst
);
2059 * helper function to insert a new root level in the tree.
2060 * A new node is allocated, and a single item is inserted to
2061 * point to the existing root
2063 * returns zero on success or < 0 on failure.
2065 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
2066 struct btrfs_root
*root
,
2067 struct btrfs_path
*path
, int level
)
2070 struct extent_buffer
*lower
;
2071 struct extent_buffer
*c
;
2072 struct extent_buffer
*old
;
2073 struct btrfs_disk_key lower_key
;
2075 BUG_ON(path
->nodes
[level
]);
2076 BUG_ON(path
->nodes
[level
-1] != root
->node
);
2078 lower
= path
->nodes
[level
-1];
2080 btrfs_item_key(lower
, &lower_key
, 0);
2082 btrfs_node_key(lower
, &lower_key
, 0);
2084 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2085 root
->root_key
.objectid
, &lower_key
,
2086 level
, root
->node
->start
, 0);
2090 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
2091 btrfs_set_header_nritems(c
, 1);
2092 btrfs_set_header_level(c
, level
);
2093 btrfs_set_header_bytenr(c
, c
->start
);
2094 btrfs_set_header_generation(c
, trans
->transid
);
2095 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
2096 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
2098 write_extent_buffer(c
, root
->fs_info
->fsid
,
2099 (unsigned long)btrfs_header_fsid(c
),
2102 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
2103 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
2106 btrfs_set_node_key(c
, &lower_key
, 0);
2107 btrfs_set_node_blockptr(c
, 0, lower
->start
);
2108 lower_gen
= btrfs_header_generation(lower
);
2109 WARN_ON(lower_gen
!= trans
->transid
);
2111 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
2113 btrfs_mark_buffer_dirty(c
);
2115 spin_lock(&root
->node_lock
);
2118 spin_unlock(&root
->node_lock
);
2120 /* the super has an extra ref to root->node */
2121 free_extent_buffer(old
);
2123 add_root_to_dirty_list(root
);
2124 extent_buffer_get(c
);
2125 path
->nodes
[level
] = c
;
2126 path
->locks
[level
] = 1;
2127 path
->slots
[level
] = 0;
2132 * worker function to insert a single pointer in a node.
2133 * the node should have enough room for the pointer already
2135 * slot and level indicate where you want the key to go, and
2136 * blocknr is the block the key points to.
2138 * returns zero on success and < 0 on any error
2140 static int insert_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
2141 *root
, struct btrfs_path
*path
, struct btrfs_disk_key
2142 *key
, u64 bytenr
, int slot
, int level
)
2144 struct extent_buffer
*lower
;
2147 BUG_ON(!path
->nodes
[level
]);
2148 lower
= path
->nodes
[level
];
2149 nritems
= btrfs_header_nritems(lower
);
2150 BUG_ON(slot
> nritems
);
2151 if (nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
))
2153 if (slot
!= nritems
) {
2154 memmove_extent_buffer(lower
,
2155 btrfs_node_key_ptr_offset(slot
+ 1),
2156 btrfs_node_key_ptr_offset(slot
),
2157 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
2159 btrfs_set_node_key(lower
, key
, slot
);
2160 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
2161 WARN_ON(trans
->transid
== 0);
2162 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
2163 btrfs_set_header_nritems(lower
, nritems
+ 1);
2164 btrfs_mark_buffer_dirty(lower
);
2169 * split the node at the specified level in path in two.
2170 * The path is corrected to point to the appropriate node after the split
2172 * Before splitting this tries to make some room in the node by pushing
2173 * left and right, if either one works, it returns right away.
2175 * returns 0 on success and < 0 on failure
2177 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
2178 struct btrfs_root
*root
,
2179 struct btrfs_path
*path
, int level
)
2181 struct extent_buffer
*c
;
2182 struct extent_buffer
*split
;
2183 struct btrfs_disk_key disk_key
;
2189 c
= path
->nodes
[level
];
2190 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
2191 if (c
== root
->node
) {
2192 /* trying to split the root, lets make a new one */
2193 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
2197 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
2198 c
= path
->nodes
[level
];
2199 if (!ret
&& btrfs_header_nritems(c
) <
2200 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
2206 c_nritems
= btrfs_header_nritems(c
);
2207 mid
= (c_nritems
+ 1) / 2;
2208 btrfs_node_key(c
, &disk_key
, mid
);
2210 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2211 root
->root_key
.objectid
,
2212 &disk_key
, level
, c
->start
, 0);
2214 return PTR_ERR(split
);
2216 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
2217 btrfs_set_header_level(split
, btrfs_header_level(c
));
2218 btrfs_set_header_bytenr(split
, split
->start
);
2219 btrfs_set_header_generation(split
, trans
->transid
);
2220 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
2221 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
2222 write_extent_buffer(split
, root
->fs_info
->fsid
,
2223 (unsigned long)btrfs_header_fsid(split
),
2225 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
2226 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
2230 copy_extent_buffer(split
, c
,
2231 btrfs_node_key_ptr_offset(0),
2232 btrfs_node_key_ptr_offset(mid
),
2233 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
2234 btrfs_set_header_nritems(split
, c_nritems
- mid
);
2235 btrfs_set_header_nritems(c
, mid
);
2238 btrfs_mark_buffer_dirty(c
);
2239 btrfs_mark_buffer_dirty(split
);
2241 wret
= insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
2242 path
->slots
[level
+ 1] + 1,
2247 if (path
->slots
[level
] >= mid
) {
2248 path
->slots
[level
] -= mid
;
2249 btrfs_tree_unlock(c
);
2250 free_extent_buffer(c
);
2251 path
->nodes
[level
] = split
;
2252 path
->slots
[level
+ 1] += 1;
2254 btrfs_tree_unlock(split
);
2255 free_extent_buffer(split
);
2261 * how many bytes are required to store the items in a leaf. start
2262 * and nr indicate which items in the leaf to check. This totals up the
2263 * space used both by the item structs and the item data
2265 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
2268 int nritems
= btrfs_header_nritems(l
);
2269 int end
= min(nritems
, start
+ nr
) - 1;
2273 data_len
= btrfs_item_end_nr(l
, start
);
2274 data_len
= data_len
- btrfs_item_offset_nr(l
, end
);
2275 data_len
+= sizeof(struct btrfs_item
) * nr
;
2276 WARN_ON(data_len
< 0);
2281 * The space between the end of the leaf items and
2282 * the start of the leaf data. IOW, how much room
2283 * the leaf has left for both items and data
2285 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
2286 struct extent_buffer
*leaf
)
2288 int nritems
= btrfs_header_nritems(leaf
);
2290 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
2292 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
2293 "used %d nritems %d\n",
2294 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
2295 leaf_space_used(leaf
, 0, nritems
), nritems
);
2300 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
2301 struct btrfs_root
*root
,
2302 struct btrfs_path
*path
,
2303 int data_size
, int empty
,
2304 struct extent_buffer
*right
,
2305 int free_space
, u32 left_nritems
)
2307 struct extent_buffer
*left
= path
->nodes
[0];
2308 struct extent_buffer
*upper
= path
->nodes
[1];
2309 struct btrfs_disk_key disk_key
;
2314 struct btrfs_item
*item
;
2325 if (path
->slots
[0] >= left_nritems
)
2326 push_space
+= data_size
;
2328 slot
= path
->slots
[1];
2329 i
= left_nritems
- 1;
2331 item
= btrfs_item_nr(left
, i
);
2333 if (!empty
&& push_items
> 0) {
2334 if (path
->slots
[0] > i
)
2336 if (path
->slots
[0] == i
) {
2337 int space
= btrfs_leaf_free_space(root
, left
);
2338 if (space
+ push_space
* 2 > free_space
)
2343 if (path
->slots
[0] == i
)
2344 push_space
+= data_size
;
2346 if (!left
->map_token
) {
2347 map_extent_buffer(left
, (unsigned long)item
,
2348 sizeof(struct btrfs_item
),
2349 &left
->map_token
, &left
->kaddr
,
2350 &left
->map_start
, &left
->map_len
,
2354 this_item_size
= btrfs_item_size(left
, item
);
2355 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2359 push_space
+= this_item_size
+ sizeof(*item
);
2364 if (left
->map_token
) {
2365 unmap_extent_buffer(left
, left
->map_token
, KM_USER1
);
2366 left
->map_token
= NULL
;
2369 if (push_items
== 0)
2372 if (!empty
&& push_items
== left_nritems
)
2375 /* push left to right */
2376 right_nritems
= btrfs_header_nritems(right
);
2378 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
2379 push_space
-= leaf_data_end(root
, left
);
2381 /* make room in the right data area */
2382 data_end
= leaf_data_end(root
, right
);
2383 memmove_extent_buffer(right
,
2384 btrfs_leaf_data(right
) + data_end
- push_space
,
2385 btrfs_leaf_data(right
) + data_end
,
2386 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
2388 /* copy from the left data area */
2389 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
2390 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2391 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
2394 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
2395 btrfs_item_nr_offset(0),
2396 right_nritems
* sizeof(struct btrfs_item
));
2398 /* copy the items from left to right */
2399 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
2400 btrfs_item_nr_offset(left_nritems
- push_items
),
2401 push_items
* sizeof(struct btrfs_item
));
2403 /* update the item pointers */
2404 right_nritems
+= push_items
;
2405 btrfs_set_header_nritems(right
, right_nritems
);
2406 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2407 for (i
= 0; i
< right_nritems
; i
++) {
2408 item
= btrfs_item_nr(right
, i
);
2409 if (!right
->map_token
) {
2410 map_extent_buffer(right
, (unsigned long)item
,
2411 sizeof(struct btrfs_item
),
2412 &right
->map_token
, &right
->kaddr
,
2413 &right
->map_start
, &right
->map_len
,
2416 push_space
-= btrfs_item_size(right
, item
);
2417 btrfs_set_item_offset(right
, item
, push_space
);
2420 if (right
->map_token
) {
2421 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2422 right
->map_token
= NULL
;
2424 left_nritems
-= push_items
;
2425 btrfs_set_header_nritems(left
, left_nritems
);
2428 btrfs_mark_buffer_dirty(left
);
2429 btrfs_mark_buffer_dirty(right
);
2431 btrfs_item_key(right
, &disk_key
, 0);
2432 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
2433 btrfs_mark_buffer_dirty(upper
);
2435 /* then fixup the leaf pointer in the path */
2436 if (path
->slots
[0] >= left_nritems
) {
2437 path
->slots
[0] -= left_nritems
;
2438 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2439 clean_tree_block(trans
, root
, path
->nodes
[0]);
2440 btrfs_tree_unlock(path
->nodes
[0]);
2441 free_extent_buffer(path
->nodes
[0]);
2442 path
->nodes
[0] = right
;
2443 path
->slots
[1] += 1;
2445 btrfs_tree_unlock(right
);
2446 free_extent_buffer(right
);
2451 btrfs_tree_unlock(right
);
2452 free_extent_buffer(right
);
2457 * push some data in the path leaf to the right, trying to free up at
2458 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2460 * returns 1 if the push failed because the other node didn't have enough
2461 * room, 0 if everything worked out and < 0 if there were major errors.
2463 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
2464 *root
, struct btrfs_path
*path
, int data_size
,
2467 struct extent_buffer
*left
= path
->nodes
[0];
2468 struct extent_buffer
*right
;
2469 struct extent_buffer
*upper
;
2475 if (!path
->nodes
[1])
2478 slot
= path
->slots
[1];
2479 upper
= path
->nodes
[1];
2480 if (slot
>= btrfs_header_nritems(upper
) - 1)
2483 btrfs_assert_tree_locked(path
->nodes
[1]);
2485 right
= read_node_slot(root
, upper
, slot
+ 1);
2486 btrfs_tree_lock(right
);
2487 btrfs_set_lock_blocking(right
);
2489 free_space
= btrfs_leaf_free_space(root
, right
);
2490 if (free_space
< data_size
)
2493 /* cow and double check */
2494 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
2499 free_space
= btrfs_leaf_free_space(root
, right
);
2500 if (free_space
< data_size
)
2503 left_nritems
= btrfs_header_nritems(left
);
2504 if (left_nritems
== 0)
2507 return __push_leaf_right(trans
, root
, path
, data_size
, empty
,
2508 right
, free_space
, left_nritems
);
2510 btrfs_tree_unlock(right
);
2511 free_extent_buffer(right
);
2516 * push some data in the path leaf to the left, trying to free up at
2517 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2519 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
2520 struct btrfs_root
*root
,
2521 struct btrfs_path
*path
, int data_size
,
2522 int empty
, struct extent_buffer
*left
,
2523 int free_space
, int right_nritems
)
2525 struct btrfs_disk_key disk_key
;
2526 struct extent_buffer
*right
= path
->nodes
[0];
2531 struct btrfs_item
*item
;
2532 u32 old_left_nritems
;
2537 u32 old_left_item_size
;
2539 slot
= path
->slots
[1];
2544 nr
= right_nritems
- 1;
2546 for (i
= 0; i
< nr
; i
++) {
2547 item
= btrfs_item_nr(right
, i
);
2548 if (!right
->map_token
) {
2549 map_extent_buffer(right
, (unsigned long)item
,
2550 sizeof(struct btrfs_item
),
2551 &right
->map_token
, &right
->kaddr
,
2552 &right
->map_start
, &right
->map_len
,
2556 if (!empty
&& push_items
> 0) {
2557 if (path
->slots
[0] < i
)
2559 if (path
->slots
[0] == i
) {
2560 int space
= btrfs_leaf_free_space(root
, right
);
2561 if (space
+ push_space
* 2 > free_space
)
2566 if (path
->slots
[0] == i
)
2567 push_space
+= data_size
;
2569 this_item_size
= btrfs_item_size(right
, item
);
2570 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2574 push_space
+= this_item_size
+ sizeof(*item
);
2577 if (right
->map_token
) {
2578 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2579 right
->map_token
= NULL
;
2582 if (push_items
== 0) {
2586 if (!empty
&& push_items
== btrfs_header_nritems(right
))
2589 /* push data from right to left */
2590 copy_extent_buffer(left
, right
,
2591 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
2592 btrfs_item_nr_offset(0),
2593 push_items
* sizeof(struct btrfs_item
));
2595 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
2596 btrfs_item_offset_nr(right
, push_items
- 1);
2598 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
2599 leaf_data_end(root
, left
) - push_space
,
2600 btrfs_leaf_data(right
) +
2601 btrfs_item_offset_nr(right
, push_items
- 1),
2603 old_left_nritems
= btrfs_header_nritems(left
);
2604 BUG_ON(old_left_nritems
<= 0);
2606 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
2607 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
2610 item
= btrfs_item_nr(left
, i
);
2611 if (!left
->map_token
) {
2612 map_extent_buffer(left
, (unsigned long)item
,
2613 sizeof(struct btrfs_item
),
2614 &left
->map_token
, &left
->kaddr
,
2615 &left
->map_start
, &left
->map_len
,
2619 ioff
= btrfs_item_offset(left
, item
);
2620 btrfs_set_item_offset(left
, item
,
2621 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
));
2623 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
2624 if (left
->map_token
) {
2625 unmap_extent_buffer(left
, left
->map_token
, KM_USER1
);
2626 left
->map_token
= NULL
;
2629 /* fixup right node */
2630 if (push_items
> right_nritems
) {
2631 printk(KERN_CRIT
"push items %d nr %u\n", push_items
,
2636 if (push_items
< right_nritems
) {
2637 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
2638 leaf_data_end(root
, right
);
2639 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
2640 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2641 btrfs_leaf_data(right
) +
2642 leaf_data_end(root
, right
), push_space
);
2644 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
2645 btrfs_item_nr_offset(push_items
),
2646 (btrfs_header_nritems(right
) - push_items
) *
2647 sizeof(struct btrfs_item
));
2649 right_nritems
-= push_items
;
2650 btrfs_set_header_nritems(right
, right_nritems
);
2651 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2652 for (i
= 0; i
< right_nritems
; i
++) {
2653 item
= btrfs_item_nr(right
, i
);
2655 if (!right
->map_token
) {
2656 map_extent_buffer(right
, (unsigned long)item
,
2657 sizeof(struct btrfs_item
),
2658 &right
->map_token
, &right
->kaddr
,
2659 &right
->map_start
, &right
->map_len
,
2663 push_space
= push_space
- btrfs_item_size(right
, item
);
2664 btrfs_set_item_offset(right
, item
, push_space
);
2666 if (right
->map_token
) {
2667 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2668 right
->map_token
= NULL
;
2671 btrfs_mark_buffer_dirty(left
);
2673 btrfs_mark_buffer_dirty(right
);
2675 btrfs_item_key(right
, &disk_key
, 0);
2676 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
2680 /* then fixup the leaf pointer in the path */
2681 if (path
->slots
[0] < push_items
) {
2682 path
->slots
[0] += old_left_nritems
;
2683 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2684 clean_tree_block(trans
, root
, path
->nodes
[0]);
2685 btrfs_tree_unlock(path
->nodes
[0]);
2686 free_extent_buffer(path
->nodes
[0]);
2687 path
->nodes
[0] = left
;
2688 path
->slots
[1] -= 1;
2690 btrfs_tree_unlock(left
);
2691 free_extent_buffer(left
);
2692 path
->slots
[0] -= push_items
;
2694 BUG_ON(path
->slots
[0] < 0);
2697 btrfs_tree_unlock(left
);
2698 free_extent_buffer(left
);
2703 * push some data in the path leaf to the left, trying to free up at
2704 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2706 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
2707 *root
, struct btrfs_path
*path
, int data_size
,
2710 struct extent_buffer
*right
= path
->nodes
[0];
2711 struct extent_buffer
*left
;
2717 slot
= path
->slots
[1];
2720 if (!path
->nodes
[1])
2723 right_nritems
= btrfs_header_nritems(right
);
2724 if (right_nritems
== 0)
2727 btrfs_assert_tree_locked(path
->nodes
[1]);
2729 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
2730 btrfs_tree_lock(left
);
2731 btrfs_set_lock_blocking(left
);
2733 free_space
= btrfs_leaf_free_space(root
, left
);
2734 if (free_space
< data_size
) {
2739 /* cow and double check */
2740 ret
= btrfs_cow_block(trans
, root
, left
,
2741 path
->nodes
[1], slot
- 1, &left
);
2743 /* we hit -ENOSPC, but it isn't fatal here */
2748 free_space
= btrfs_leaf_free_space(root
, left
);
2749 if (free_space
< data_size
) {
2754 return __push_leaf_left(trans
, root
, path
, data_size
,
2755 empty
, left
, free_space
, right_nritems
);
2757 btrfs_tree_unlock(left
);
2758 free_extent_buffer(left
);
2763 * split the path's leaf in two, making sure there is at least data_size
2764 * available for the resulting leaf level of the path.
2766 * returns 0 if all went well and < 0 on failure.
2768 static noinline
int copy_for_split(struct btrfs_trans_handle
*trans
,
2769 struct btrfs_root
*root
,
2770 struct btrfs_path
*path
,
2771 struct extent_buffer
*l
,
2772 struct extent_buffer
*right
,
2773 int slot
, int mid
, int nritems
)
2780 struct btrfs_disk_key disk_key
;
2782 nritems
= nritems
- mid
;
2783 btrfs_set_header_nritems(right
, nritems
);
2784 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
2786 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
2787 btrfs_item_nr_offset(mid
),
2788 nritems
* sizeof(struct btrfs_item
));
2790 copy_extent_buffer(right
, l
,
2791 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
2792 data_copy_size
, btrfs_leaf_data(l
) +
2793 leaf_data_end(root
, l
), data_copy_size
);
2795 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
2796 btrfs_item_end_nr(l
, mid
);
2798 for (i
= 0; i
< nritems
; i
++) {
2799 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
2802 if (!right
->map_token
) {
2803 map_extent_buffer(right
, (unsigned long)item
,
2804 sizeof(struct btrfs_item
),
2805 &right
->map_token
, &right
->kaddr
,
2806 &right
->map_start
, &right
->map_len
,
2810 ioff
= btrfs_item_offset(right
, item
);
2811 btrfs_set_item_offset(right
, item
, ioff
+ rt_data_off
);
2814 if (right
->map_token
) {
2815 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2816 right
->map_token
= NULL
;
2819 btrfs_set_header_nritems(l
, mid
);
2821 btrfs_item_key(right
, &disk_key
, 0);
2822 wret
= insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
2823 path
->slots
[1] + 1, 1);
2827 btrfs_mark_buffer_dirty(right
);
2828 btrfs_mark_buffer_dirty(l
);
2829 BUG_ON(path
->slots
[0] != slot
);
2832 btrfs_tree_unlock(path
->nodes
[0]);
2833 free_extent_buffer(path
->nodes
[0]);
2834 path
->nodes
[0] = right
;
2835 path
->slots
[0] -= mid
;
2836 path
->slots
[1] += 1;
2838 btrfs_tree_unlock(right
);
2839 free_extent_buffer(right
);
2842 BUG_ON(path
->slots
[0] < 0);
2848 * split the path's leaf in two, making sure there is at least data_size
2849 * available for the resulting leaf level of the path.
2851 * returns 0 if all went well and < 0 on failure.
2853 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
2854 struct btrfs_root
*root
,
2855 struct btrfs_key
*ins_key
,
2856 struct btrfs_path
*path
, int data_size
,
2859 struct btrfs_disk_key disk_key
;
2860 struct extent_buffer
*l
;
2864 struct extent_buffer
*right
;
2868 int num_doubles
= 0;
2870 /* first try to make some room by pushing left and right */
2871 if (data_size
&& ins_key
->type
!= BTRFS_DIR_ITEM_KEY
) {
2872 wret
= push_leaf_right(trans
, root
, path
, data_size
, 0);
2876 wret
= push_leaf_left(trans
, root
, path
, data_size
, 0);
2882 /* did the pushes work? */
2883 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
2887 if (!path
->nodes
[1]) {
2888 ret
= insert_new_root(trans
, root
, path
, 1);
2895 slot
= path
->slots
[0];
2896 nritems
= btrfs_header_nritems(l
);
2897 mid
= (nritems
+ 1) / 2;
2901 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
2902 BTRFS_LEAF_DATA_SIZE(root
)) {
2903 if (slot
>= nritems
) {
2907 if (mid
!= nritems
&&
2908 leaf_space_used(l
, mid
, nritems
- mid
) +
2909 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2915 if (leaf_space_used(l
, 0, mid
) + data_size
>
2916 BTRFS_LEAF_DATA_SIZE(root
)) {
2917 if (!extend
&& data_size
&& slot
== 0) {
2919 } else if ((extend
|| !data_size
) && slot
== 0) {
2923 if (mid
!= nritems
&&
2924 leaf_space_used(l
, mid
, nritems
- mid
) +
2925 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2933 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
2935 btrfs_item_key(l
, &disk_key
, mid
);
2937 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
2938 root
->root_key
.objectid
,
2939 &disk_key
, 0, l
->start
, 0);
2940 if (IS_ERR(right
)) {
2942 return PTR_ERR(right
);
2945 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
2946 btrfs_set_header_bytenr(right
, right
->start
);
2947 btrfs_set_header_generation(right
, trans
->transid
);
2948 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
2949 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
2950 btrfs_set_header_level(right
, 0);
2951 write_extent_buffer(right
, root
->fs_info
->fsid
,
2952 (unsigned long)btrfs_header_fsid(right
),
2955 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
2956 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
2961 btrfs_set_header_nritems(right
, 0);
2962 wret
= insert_ptr(trans
, root
, path
,
2963 &disk_key
, right
->start
,
2964 path
->slots
[1] + 1, 1);
2968 btrfs_tree_unlock(path
->nodes
[0]);
2969 free_extent_buffer(path
->nodes
[0]);
2970 path
->nodes
[0] = right
;
2972 path
->slots
[1] += 1;
2974 btrfs_set_header_nritems(right
, 0);
2975 wret
= insert_ptr(trans
, root
, path
,
2981 btrfs_tree_unlock(path
->nodes
[0]);
2982 free_extent_buffer(path
->nodes
[0]);
2983 path
->nodes
[0] = right
;
2985 if (path
->slots
[1] == 0) {
2986 wret
= fixup_low_keys(trans
, root
,
2987 path
, &disk_key
, 1);
2992 btrfs_mark_buffer_dirty(right
);
2996 ret
= copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
3000 BUG_ON(num_doubles
!= 0);
3009 * This function splits a single item into two items,
3010 * giving 'new_key' to the new item and splitting the
3011 * old one at split_offset (from the start of the item).
3013 * The path may be released by this operation. After
3014 * the split, the path is pointing to the old item. The
3015 * new item is going to be in the same node as the old one.
3017 * Note, the item being split must be smaller enough to live alone on
3018 * a tree block with room for one extra struct btrfs_item
3020 * This allows us to split the item in place, keeping a lock on the
3021 * leaf the entire time.
3023 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
3024 struct btrfs_root
*root
,
3025 struct btrfs_path
*path
,
3026 struct btrfs_key
*new_key
,
3027 unsigned long split_offset
)
3030 struct extent_buffer
*leaf
;
3031 struct btrfs_key orig_key
;
3032 struct btrfs_item
*item
;
3033 struct btrfs_item
*new_item
;
3038 struct btrfs_disk_key disk_key
;
3041 leaf
= path
->nodes
[0];
3042 btrfs_item_key_to_cpu(leaf
, &orig_key
, path
->slots
[0]);
3043 if (btrfs_leaf_free_space(root
, leaf
) >= sizeof(struct btrfs_item
))
3046 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3047 btrfs_release_path(root
, path
);
3049 path
->search_for_split
= 1;
3050 path
->keep_locks
= 1;
3052 ret
= btrfs_search_slot(trans
, root
, &orig_key
, path
, 0, 1);
3053 path
->search_for_split
= 0;
3055 /* if our item isn't there or got smaller, return now */
3056 if (ret
!= 0 || item_size
!= btrfs_item_size_nr(path
->nodes
[0],
3058 path
->keep_locks
= 0;
3062 btrfs_set_path_blocking(path
);
3063 ret
= split_leaf(trans
, root
, &orig_key
, path
,
3064 sizeof(struct btrfs_item
), 1);
3065 path
->keep_locks
= 0;
3068 btrfs_unlock_up_safe(path
, 1);
3069 leaf
= path
->nodes
[0];
3070 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
3074 * make sure any changes to the path from split_leaf leave it
3075 * in a blocking state
3077 btrfs_set_path_blocking(path
);
3079 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
3080 orig_offset
= btrfs_item_offset(leaf
, item
);
3081 item_size
= btrfs_item_size(leaf
, item
);
3083 buf
= kmalloc(item_size
, GFP_NOFS
);
3084 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
3085 path
->slots
[0]), item_size
);
3086 slot
= path
->slots
[0] + 1;
3087 leaf
= path
->nodes
[0];
3089 nritems
= btrfs_header_nritems(leaf
);
3091 if (slot
!= nritems
) {
3092 /* shift the items */
3093 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
3094 btrfs_item_nr_offset(slot
),
3095 (nritems
- slot
) * sizeof(struct btrfs_item
));
3099 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3100 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3102 new_item
= btrfs_item_nr(leaf
, slot
);
3104 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
3105 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
3107 btrfs_set_item_offset(leaf
, item
,
3108 orig_offset
+ item_size
- split_offset
);
3109 btrfs_set_item_size(leaf
, item
, split_offset
);
3111 btrfs_set_header_nritems(leaf
, nritems
+ 1);
3113 /* write the data for the start of the original item */
3114 write_extent_buffer(leaf
, buf
,
3115 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3118 /* write the data for the new item */
3119 write_extent_buffer(leaf
, buf
+ split_offset
,
3120 btrfs_item_ptr_offset(leaf
, slot
),
3121 item_size
- split_offset
);
3122 btrfs_mark_buffer_dirty(leaf
);
3125 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3126 btrfs_print_leaf(root
, leaf
);
3134 * make the item pointed to by the path smaller. new_size indicates
3135 * how small to make it, and from_end tells us if we just chop bytes
3136 * off the end of the item or if we shift the item to chop bytes off
3139 int btrfs_truncate_item(struct btrfs_trans_handle
*trans
,
3140 struct btrfs_root
*root
,
3141 struct btrfs_path
*path
,
3142 u32 new_size
, int from_end
)
3147 struct extent_buffer
*leaf
;
3148 struct btrfs_item
*item
;
3150 unsigned int data_end
;
3151 unsigned int old_data_start
;
3152 unsigned int old_size
;
3153 unsigned int size_diff
;
3156 slot_orig
= path
->slots
[0];
3157 leaf
= path
->nodes
[0];
3158 slot
= path
->slots
[0];
3160 old_size
= btrfs_item_size_nr(leaf
, slot
);
3161 if (old_size
== new_size
)
3164 nritems
= btrfs_header_nritems(leaf
);
3165 data_end
= leaf_data_end(root
, leaf
);
3167 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
3169 size_diff
= old_size
- new_size
;
3172 BUG_ON(slot
>= nritems
);
3175 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3177 /* first correct the data pointers */
3178 for (i
= slot
; i
< nritems
; i
++) {
3180 item
= btrfs_item_nr(leaf
, i
);
3182 if (!leaf
->map_token
) {
3183 map_extent_buffer(leaf
, (unsigned long)item
,
3184 sizeof(struct btrfs_item
),
3185 &leaf
->map_token
, &leaf
->kaddr
,
3186 &leaf
->map_start
, &leaf
->map_len
,
3190 ioff
= btrfs_item_offset(leaf
, item
);
3191 btrfs_set_item_offset(leaf
, item
, ioff
+ size_diff
);
3194 if (leaf
->map_token
) {
3195 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3196 leaf
->map_token
= NULL
;
3199 /* shift the data */
3201 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3202 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3203 data_end
, old_data_start
+ new_size
- data_end
);
3205 struct btrfs_disk_key disk_key
;
3208 btrfs_item_key(leaf
, &disk_key
, slot
);
3210 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
3212 struct btrfs_file_extent_item
*fi
;
3214 fi
= btrfs_item_ptr(leaf
, slot
,
3215 struct btrfs_file_extent_item
);
3216 fi
= (struct btrfs_file_extent_item
*)(
3217 (unsigned long)fi
- size_diff
);
3219 if (btrfs_file_extent_type(leaf
, fi
) ==
3220 BTRFS_FILE_EXTENT_INLINE
) {
3221 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3222 memmove_extent_buffer(leaf
, ptr
,
3224 offsetof(struct btrfs_file_extent_item
,
3229 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3230 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3231 data_end
, old_data_start
- data_end
);
3233 offset
= btrfs_disk_key_offset(&disk_key
);
3234 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
3235 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3237 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3240 item
= btrfs_item_nr(leaf
, slot
);
3241 btrfs_set_item_size(leaf
, item
, new_size
);
3242 btrfs_mark_buffer_dirty(leaf
);
3245 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3246 btrfs_print_leaf(root
, leaf
);
3253 * make the item pointed to by the path bigger, data_size is the new size.
3255 int btrfs_extend_item(struct btrfs_trans_handle
*trans
,
3256 struct btrfs_root
*root
, struct btrfs_path
*path
,
3262 struct extent_buffer
*leaf
;
3263 struct btrfs_item
*item
;
3265 unsigned int data_end
;
3266 unsigned int old_data
;
3267 unsigned int old_size
;
3270 slot_orig
= path
->slots
[0];
3271 leaf
= path
->nodes
[0];
3273 nritems
= btrfs_header_nritems(leaf
);
3274 data_end
= leaf_data_end(root
, leaf
);
3276 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
3277 btrfs_print_leaf(root
, leaf
);
3280 slot
= path
->slots
[0];
3281 old_data
= btrfs_item_end_nr(leaf
, slot
);
3284 if (slot
>= nritems
) {
3285 btrfs_print_leaf(root
, leaf
);
3286 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
3292 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3294 /* first correct the data pointers */
3295 for (i
= slot
; i
< nritems
; i
++) {
3297 item
= btrfs_item_nr(leaf
, i
);
3299 if (!leaf
->map_token
) {
3300 map_extent_buffer(leaf
, (unsigned long)item
,
3301 sizeof(struct btrfs_item
),
3302 &leaf
->map_token
, &leaf
->kaddr
,
3303 &leaf
->map_start
, &leaf
->map_len
,
3306 ioff
= btrfs_item_offset(leaf
, item
);
3307 btrfs_set_item_offset(leaf
, item
, ioff
- data_size
);
3310 if (leaf
->map_token
) {
3311 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3312 leaf
->map_token
= NULL
;
3315 /* shift the data */
3316 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3317 data_end
- data_size
, btrfs_leaf_data(leaf
) +
3318 data_end
, old_data
- data_end
);
3320 data_end
= old_data
;
3321 old_size
= btrfs_item_size_nr(leaf
, slot
);
3322 item
= btrfs_item_nr(leaf
, slot
);
3323 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
3324 btrfs_mark_buffer_dirty(leaf
);
3327 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3328 btrfs_print_leaf(root
, leaf
);
3335 * Given a key and some data, insert items into the tree.
3336 * This does all the path init required, making room in the tree if needed.
3337 * Returns the number of keys that were inserted.
3339 int btrfs_insert_some_items(struct btrfs_trans_handle
*trans
,
3340 struct btrfs_root
*root
,
3341 struct btrfs_path
*path
,
3342 struct btrfs_key
*cpu_key
, u32
*data_size
,
3345 struct extent_buffer
*leaf
;
3346 struct btrfs_item
*item
;
3353 unsigned int data_end
;
3354 struct btrfs_disk_key disk_key
;
3355 struct btrfs_key found_key
;
3357 for (i
= 0; i
< nr
; i
++) {
3358 if (total_size
+ data_size
[i
] + sizeof(struct btrfs_item
) >
3359 BTRFS_LEAF_DATA_SIZE(root
)) {
3363 total_data
+= data_size
[i
];
3364 total_size
+= data_size
[i
] + sizeof(struct btrfs_item
);
3368 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3374 leaf
= path
->nodes
[0];
3376 nritems
= btrfs_header_nritems(leaf
);
3377 data_end
= leaf_data_end(root
, leaf
);
3379 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3380 for (i
= nr
; i
>= 0; i
--) {
3381 total_data
-= data_size
[i
];
3382 total_size
-= data_size
[i
] + sizeof(struct btrfs_item
);
3383 if (total_size
< btrfs_leaf_free_space(root
, leaf
))
3389 slot
= path
->slots
[0];
3392 if (slot
!= nritems
) {
3393 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3395 item
= btrfs_item_nr(leaf
, slot
);
3396 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3398 /* figure out how many keys we can insert in here */
3399 total_data
= data_size
[0];
3400 for (i
= 1; i
< nr
; i
++) {
3401 if (btrfs_comp_cpu_keys(&found_key
, cpu_key
+ i
) <= 0)
3403 total_data
+= data_size
[i
];
3407 if (old_data
< data_end
) {
3408 btrfs_print_leaf(root
, leaf
);
3409 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3410 slot
, old_data
, data_end
);
3414 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3416 /* first correct the data pointers */
3417 WARN_ON(leaf
->map_token
);
3418 for (i
= slot
; i
< nritems
; i
++) {
3421 item
= btrfs_item_nr(leaf
, i
);
3422 if (!leaf
->map_token
) {
3423 map_extent_buffer(leaf
, (unsigned long)item
,
3424 sizeof(struct btrfs_item
),
3425 &leaf
->map_token
, &leaf
->kaddr
,
3426 &leaf
->map_start
, &leaf
->map_len
,
3430 ioff
= btrfs_item_offset(leaf
, item
);
3431 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3433 if (leaf
->map_token
) {
3434 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3435 leaf
->map_token
= NULL
;
3438 /* shift the items */
3439 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3440 btrfs_item_nr_offset(slot
),
3441 (nritems
- slot
) * sizeof(struct btrfs_item
));
3443 /* shift the data */
3444 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3445 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3446 data_end
, old_data
- data_end
);
3447 data_end
= old_data
;
3450 * this sucks but it has to be done, if we are inserting at
3451 * the end of the leaf only insert 1 of the items, since we
3452 * have no way of knowing whats on the next leaf and we'd have
3453 * to drop our current locks to figure it out
3458 /* setup the item for the new data */
3459 for (i
= 0; i
< nr
; i
++) {
3460 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3461 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3462 item
= btrfs_item_nr(leaf
, slot
+ i
);
3463 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3464 data_end
-= data_size
[i
];
3465 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3467 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3468 btrfs_mark_buffer_dirty(leaf
);
3472 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3473 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3476 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3477 btrfs_print_leaf(root
, leaf
);
3487 * this is a helper for btrfs_insert_empty_items, the main goal here is
3488 * to save stack depth by doing the bulk of the work in a function
3489 * that doesn't call btrfs_search_slot
3491 static noinline_for_stack
int
3492 setup_items_for_insert(struct btrfs_trans_handle
*trans
,
3493 struct btrfs_root
*root
, struct btrfs_path
*path
,
3494 struct btrfs_key
*cpu_key
, u32
*data_size
,
3495 u32 total_data
, u32 total_size
, int nr
)
3497 struct btrfs_item
*item
;
3500 unsigned int data_end
;
3501 struct btrfs_disk_key disk_key
;
3503 struct extent_buffer
*leaf
;
3506 leaf
= path
->nodes
[0];
3507 slot
= path
->slots
[0];
3509 nritems
= btrfs_header_nritems(leaf
);
3510 data_end
= leaf_data_end(root
, leaf
);
3512 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3513 btrfs_print_leaf(root
, leaf
);
3514 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
3515 total_size
, btrfs_leaf_free_space(root
, leaf
));
3519 if (slot
!= nritems
) {
3520 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3522 if (old_data
< data_end
) {
3523 btrfs_print_leaf(root
, leaf
);
3524 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3525 slot
, old_data
, data_end
);
3529 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3531 /* first correct the data pointers */
3532 WARN_ON(leaf
->map_token
);
3533 for (i
= slot
; i
< nritems
; i
++) {
3536 item
= btrfs_item_nr(leaf
, i
);
3537 if (!leaf
->map_token
) {
3538 map_extent_buffer(leaf
, (unsigned long)item
,
3539 sizeof(struct btrfs_item
),
3540 &leaf
->map_token
, &leaf
->kaddr
,
3541 &leaf
->map_start
, &leaf
->map_len
,
3545 ioff
= btrfs_item_offset(leaf
, item
);
3546 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3548 if (leaf
->map_token
) {
3549 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3550 leaf
->map_token
= NULL
;
3553 /* shift the items */
3554 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3555 btrfs_item_nr_offset(slot
),
3556 (nritems
- slot
) * sizeof(struct btrfs_item
));
3558 /* shift the data */
3559 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3560 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3561 data_end
, old_data
- data_end
);
3562 data_end
= old_data
;
3565 /* setup the item for the new data */
3566 for (i
= 0; i
< nr
; i
++) {
3567 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3568 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3569 item
= btrfs_item_nr(leaf
, slot
+ i
);
3570 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3571 data_end
-= data_size
[i
];
3572 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3575 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3579 struct btrfs_disk_key disk_key
;
3580 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3581 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3583 btrfs_unlock_up_safe(path
, 1);
3584 btrfs_mark_buffer_dirty(leaf
);
3586 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3587 btrfs_print_leaf(root
, leaf
);
3594 * Given a key and some data, insert items into the tree.
3595 * This does all the path init required, making room in the tree if needed.
3597 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
3598 struct btrfs_root
*root
,
3599 struct btrfs_path
*path
,
3600 struct btrfs_key
*cpu_key
, u32
*data_size
,
3603 struct extent_buffer
*leaf
;
3610 for (i
= 0; i
< nr
; i
++)
3611 total_data
+= data_size
[i
];
3613 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
3614 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3620 leaf
= path
->nodes
[0];
3621 slot
= path
->slots
[0];
3624 ret
= setup_items_for_insert(trans
, root
, path
, cpu_key
, data_size
,
3625 total_data
, total_size
, nr
);
3632 * Given a key and some data, insert an item into the tree.
3633 * This does all the path init required, making room in the tree if needed.
3635 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
3636 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
3640 struct btrfs_path
*path
;
3641 struct extent_buffer
*leaf
;
3644 path
= btrfs_alloc_path();
3646 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
3648 leaf
= path
->nodes
[0];
3649 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3650 write_extent_buffer(leaf
, data
, ptr
, data_size
);
3651 btrfs_mark_buffer_dirty(leaf
);
3653 btrfs_free_path(path
);
3658 * delete the pointer from a given node.
3660 * the tree should have been previously balanced so the deletion does not
3663 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3664 struct btrfs_path
*path
, int level
, int slot
)
3666 struct extent_buffer
*parent
= path
->nodes
[level
];
3671 nritems
= btrfs_header_nritems(parent
);
3672 if (slot
!= nritems
- 1) {
3673 memmove_extent_buffer(parent
,
3674 btrfs_node_key_ptr_offset(slot
),
3675 btrfs_node_key_ptr_offset(slot
+ 1),
3676 sizeof(struct btrfs_key_ptr
) *
3677 (nritems
- slot
- 1));
3680 btrfs_set_header_nritems(parent
, nritems
);
3681 if (nritems
== 0 && parent
== root
->node
) {
3682 BUG_ON(btrfs_header_level(root
->node
) != 1);
3683 /* just turn the root into a leaf and break */
3684 btrfs_set_header_level(root
->node
, 0);
3685 } else if (slot
== 0) {
3686 struct btrfs_disk_key disk_key
;
3688 btrfs_node_key(parent
, &disk_key
, 0);
3689 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, level
+ 1);
3693 btrfs_mark_buffer_dirty(parent
);
3698 * a helper function to delete the leaf pointed to by path->slots[1] and
3701 * This deletes the pointer in path->nodes[1] and frees the leaf
3702 * block extent. zero is returned if it all worked out, < 0 otherwise.
3704 * The path must have already been setup for deleting the leaf, including
3705 * all the proper balancing. path->nodes[1] must be locked.
3707 static noinline
int btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
3708 struct btrfs_root
*root
,
3709 struct btrfs_path
*path
,
3710 struct extent_buffer
*leaf
)
3714 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
3715 ret
= del_ptr(trans
, root
, path
, 1, path
->slots
[1]);
3720 * btrfs_free_extent is expensive, we want to make sure we
3721 * aren't holding any locks when we call it
3723 btrfs_unlock_up_safe(path
, 0);
3725 ret
= btrfs_free_extent(trans
, root
, leaf
->start
, leaf
->len
,
3726 0, root
->root_key
.objectid
, 0, 0);
3730 * delete the item at the leaf level in path. If that empties
3731 * the leaf, remove it from the tree
3733 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3734 struct btrfs_path
*path
, int slot
, int nr
)
3736 struct extent_buffer
*leaf
;
3737 struct btrfs_item
*item
;
3745 leaf
= path
->nodes
[0];
3746 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
3748 for (i
= 0; i
< nr
; i
++)
3749 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
3751 nritems
= btrfs_header_nritems(leaf
);
3753 if (slot
+ nr
!= nritems
) {
3754 int data_end
= leaf_data_end(root
, leaf
);
3756 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3758 btrfs_leaf_data(leaf
) + data_end
,
3759 last_off
- data_end
);
3761 for (i
= slot
+ nr
; i
< nritems
; i
++) {
3764 item
= btrfs_item_nr(leaf
, i
);
3765 if (!leaf
->map_token
) {
3766 map_extent_buffer(leaf
, (unsigned long)item
,
3767 sizeof(struct btrfs_item
),
3768 &leaf
->map_token
, &leaf
->kaddr
,
3769 &leaf
->map_start
, &leaf
->map_len
,
3772 ioff
= btrfs_item_offset(leaf
, item
);
3773 btrfs_set_item_offset(leaf
, item
, ioff
+ dsize
);
3776 if (leaf
->map_token
) {
3777 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3778 leaf
->map_token
= NULL
;
3781 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
3782 btrfs_item_nr_offset(slot
+ nr
),
3783 sizeof(struct btrfs_item
) *
3784 (nritems
- slot
- nr
));
3786 btrfs_set_header_nritems(leaf
, nritems
- nr
);
3789 /* delete the leaf if we've emptied it */
3791 if (leaf
== root
->node
) {
3792 btrfs_set_header_level(leaf
, 0);
3794 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
);
3798 int used
= leaf_space_used(leaf
, 0, nritems
);
3800 struct btrfs_disk_key disk_key
;
3802 btrfs_item_key(leaf
, &disk_key
, 0);
3803 wret
= fixup_low_keys(trans
, root
, path
,
3809 /* delete the leaf if it is mostly empty */
3810 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 2) {
3811 /* push_leaf_left fixes the path.
3812 * make sure the path still points to our leaf
3813 * for possible call to del_ptr below
3815 slot
= path
->slots
[1];
3816 extent_buffer_get(leaf
);
3818 btrfs_set_path_blocking(path
);
3819 wret
= push_leaf_left(trans
, root
, path
, 1, 1);
3820 if (wret
< 0 && wret
!= -ENOSPC
)
3823 if (path
->nodes
[0] == leaf
&&
3824 btrfs_header_nritems(leaf
)) {
3825 wret
= push_leaf_right(trans
, root
, path
, 1, 1);
3826 if (wret
< 0 && wret
!= -ENOSPC
)
3830 if (btrfs_header_nritems(leaf
) == 0) {
3831 path
->slots
[1] = slot
;
3832 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
);
3834 free_extent_buffer(leaf
);
3836 /* if we're still in the path, make sure
3837 * we're dirty. Otherwise, one of the
3838 * push_leaf functions must have already
3839 * dirtied this buffer
3841 if (path
->nodes
[0] == leaf
)
3842 btrfs_mark_buffer_dirty(leaf
);
3843 free_extent_buffer(leaf
);
3846 btrfs_mark_buffer_dirty(leaf
);
3853 * search the tree again to find a leaf with lesser keys
3854 * returns 0 if it found something or 1 if there are no lesser leaves.
3855 * returns < 0 on io errors.
3857 * This may release the path, and so you may lose any locks held at the
3860 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
3862 struct btrfs_key key
;
3863 struct btrfs_disk_key found_key
;
3866 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
3870 else if (key
.type
> 0)
3872 else if (key
.objectid
> 0)
3877 btrfs_release_path(root
, path
);
3878 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3881 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
3882 ret
= comp_keys(&found_key
, &key
);
3889 * A helper function to walk down the tree starting at min_key, and looking
3890 * for nodes or leaves that are either in cache or have a minimum
3891 * transaction id. This is used by the btree defrag code, and tree logging
3893 * This does not cow, but it does stuff the starting key it finds back
3894 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3895 * key and get a writable path.
3897 * This does lock as it descends, and path->keep_locks should be set
3898 * to 1 by the caller.
3900 * This honors path->lowest_level to prevent descent past a given level
3903 * min_trans indicates the oldest transaction that you are interested
3904 * in walking through. Any nodes or leaves older than min_trans are
3905 * skipped over (without reading them).
3907 * returns zero if something useful was found, < 0 on error and 1 if there
3908 * was nothing in the tree that matched the search criteria.
3910 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
3911 struct btrfs_key
*max_key
,
3912 struct btrfs_path
*path
, int cache_only
,
3915 struct extent_buffer
*cur
;
3916 struct btrfs_key found_key
;
3923 WARN_ON(!path
->keep_locks
);
3925 cur
= btrfs_lock_root_node(root
);
3926 level
= btrfs_header_level(cur
);
3927 WARN_ON(path
->nodes
[level
]);
3928 path
->nodes
[level
] = cur
;
3929 path
->locks
[level
] = 1;
3931 if (btrfs_header_generation(cur
) < min_trans
) {
3936 nritems
= btrfs_header_nritems(cur
);
3937 level
= btrfs_header_level(cur
);
3938 sret
= bin_search(cur
, min_key
, level
, &slot
);
3940 /* at the lowest level, we're done, setup the path and exit */
3941 if (level
== path
->lowest_level
) {
3942 if (slot
>= nritems
)
3945 path
->slots
[level
] = slot
;
3946 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
3949 if (sret
&& slot
> 0)
3952 * check this node pointer against the cache_only and
3953 * min_trans parameters. If it isn't in cache or is too
3954 * old, skip to the next one.
3956 while (slot
< nritems
) {
3959 struct extent_buffer
*tmp
;
3960 struct btrfs_disk_key disk_key
;
3962 blockptr
= btrfs_node_blockptr(cur
, slot
);
3963 gen
= btrfs_node_ptr_generation(cur
, slot
);
3964 if (gen
< min_trans
) {
3972 btrfs_node_key(cur
, &disk_key
, slot
);
3973 if (comp_keys(&disk_key
, max_key
) >= 0) {
3979 tmp
= btrfs_find_tree_block(root
, blockptr
,
3980 btrfs_level_size(root
, level
- 1));
3982 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
3983 free_extent_buffer(tmp
);
3987 free_extent_buffer(tmp
);
3992 * we didn't find a candidate key in this node, walk forward
3993 * and find another one
3995 if (slot
>= nritems
) {
3996 path
->slots
[level
] = slot
;
3997 btrfs_set_path_blocking(path
);
3998 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
3999 cache_only
, min_trans
);
4001 btrfs_release_path(root
, path
);
4007 /* save our key for returning back */
4008 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
4009 path
->slots
[level
] = slot
;
4010 if (level
== path
->lowest_level
) {
4012 unlock_up(path
, level
, 1);
4015 btrfs_set_path_blocking(path
);
4016 cur
= read_node_slot(root
, cur
, slot
);
4018 btrfs_tree_lock(cur
);
4020 path
->locks
[level
- 1] = 1;
4021 path
->nodes
[level
- 1] = cur
;
4022 unlock_up(path
, level
, 1);
4023 btrfs_clear_path_blocking(path
, NULL
);
4027 memcpy(min_key
, &found_key
, sizeof(found_key
));
4028 btrfs_set_path_blocking(path
);
4033 * this is similar to btrfs_next_leaf, but does not try to preserve
4034 * and fixup the path. It looks for and returns the next key in the
4035 * tree based on the current path and the cache_only and min_trans
4038 * 0 is returned if another key is found, < 0 if there are any errors
4039 * and 1 is returned if there are no higher keys in the tree
4041 * path->keep_locks should be set to 1 on the search made before
4042 * calling this function.
4044 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
4045 struct btrfs_key
*key
, int lowest_level
,
4046 int cache_only
, u64 min_trans
)
4048 int level
= lowest_level
;
4050 struct extent_buffer
*c
;
4052 WARN_ON(!path
->keep_locks
);
4053 while (level
< BTRFS_MAX_LEVEL
) {
4054 if (!path
->nodes
[level
])
4057 slot
= path
->slots
[level
] + 1;
4058 c
= path
->nodes
[level
];
4060 if (slot
>= btrfs_header_nritems(c
)) {
4062 if (level
== BTRFS_MAX_LEVEL
)
4067 btrfs_item_key_to_cpu(c
, key
, slot
);
4069 u64 blockptr
= btrfs_node_blockptr(c
, slot
);
4070 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
4073 struct extent_buffer
*cur
;
4074 cur
= btrfs_find_tree_block(root
, blockptr
,
4075 btrfs_level_size(root
, level
- 1));
4076 if (!cur
|| !btrfs_buffer_uptodate(cur
, gen
)) {
4079 free_extent_buffer(cur
);
4082 free_extent_buffer(cur
);
4084 if (gen
< min_trans
) {
4088 btrfs_node_key_to_cpu(c
, key
, slot
);
4096 * search the tree again to find a leaf with greater keys
4097 * returns 0 if it found something or 1 if there are no greater leaves.
4098 * returns < 0 on io errors.
4100 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4104 struct extent_buffer
*c
;
4105 struct extent_buffer
*next
;
4106 struct btrfs_key key
;
4109 int old_spinning
= path
->leave_spinning
;
4110 int force_blocking
= 0;
4112 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4117 * we take the blocks in an order that upsets lockdep. Using
4118 * blocking mode is the only way around it.
4120 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4124 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
4128 btrfs_release_path(root
, path
);
4130 path
->keep_locks
= 1;
4132 if (!force_blocking
)
4133 path
->leave_spinning
= 1;
4135 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4136 path
->keep_locks
= 0;
4141 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4143 * by releasing the path above we dropped all our locks. A balance
4144 * could have added more items next to the key that used to be
4145 * at the very end of the block. So, check again here and
4146 * advance the path if there are now more items available.
4148 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
4154 while (level
< BTRFS_MAX_LEVEL
) {
4155 if (!path
->nodes
[level
]) {
4160 slot
= path
->slots
[level
] + 1;
4161 c
= path
->nodes
[level
];
4162 if (slot
>= btrfs_header_nritems(c
)) {
4164 if (level
== BTRFS_MAX_LEVEL
) {
4172 btrfs_tree_unlock(next
);
4173 free_extent_buffer(next
);
4177 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4183 btrfs_release_path(root
, path
);
4187 if (!path
->skip_locking
) {
4188 ret
= btrfs_try_spin_lock(next
);
4190 btrfs_set_path_blocking(path
);
4191 btrfs_tree_lock(next
);
4192 if (!force_blocking
)
4193 btrfs_clear_path_blocking(path
, next
);
4196 btrfs_set_lock_blocking(next
);
4200 path
->slots
[level
] = slot
;
4203 c
= path
->nodes
[level
];
4204 if (path
->locks
[level
])
4205 btrfs_tree_unlock(c
);
4207 free_extent_buffer(c
);
4208 path
->nodes
[level
] = next
;
4209 path
->slots
[level
] = 0;
4210 if (!path
->skip_locking
)
4211 path
->locks
[level
] = 1;
4216 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4222 btrfs_release_path(root
, path
);
4226 if (!path
->skip_locking
) {
4227 btrfs_assert_tree_locked(path
->nodes
[level
]);
4228 ret
= btrfs_try_spin_lock(next
);
4230 btrfs_set_path_blocking(path
);
4231 btrfs_tree_lock(next
);
4232 if (!force_blocking
)
4233 btrfs_clear_path_blocking(path
, next
);
4236 btrfs_set_lock_blocking(next
);
4241 unlock_up(path
, 0, 1);
4242 path
->leave_spinning
= old_spinning
;
4244 btrfs_set_path_blocking(path
);
4250 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4251 * searching until it gets past min_objectid or finds an item of 'type'
4253 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4255 int btrfs_previous_item(struct btrfs_root
*root
,
4256 struct btrfs_path
*path
, u64 min_objectid
,
4259 struct btrfs_key found_key
;
4260 struct extent_buffer
*leaf
;
4265 if (path
->slots
[0] == 0) {
4266 btrfs_set_path_blocking(path
);
4267 ret
= btrfs_prev_leaf(root
, path
);
4273 leaf
= path
->nodes
[0];
4274 nritems
= btrfs_header_nritems(leaf
);
4277 if (path
->slots
[0] == nritems
)
4280 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4281 if (found_key
.type
== type
)
4283 if (found_key
.objectid
< min_objectid
)
4285 if (found_key
.objectid
== min_objectid
&&
4286 found_key
.type
< type
)