2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 #include "transaction.h"
23 #include "print-tree.h"
26 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
27 *root
, struct btrfs_path
*path
, int level
);
28 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_key
*ins_key
,
30 struct btrfs_path
*path
, int data_size
, int extend
);
31 static int push_node_left(struct btrfs_trans_handle
*trans
,
32 struct btrfs_root
*root
, struct extent_buffer
*dst
,
33 struct extent_buffer
*src
, int empty
);
34 static int balance_node_right(struct btrfs_trans_handle
*trans
,
35 struct btrfs_root
*root
,
36 struct extent_buffer
*dst_buf
,
37 struct extent_buffer
*src_buf
);
38 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
39 struct btrfs_path
*path
, int level
, int slot
);
41 struct btrfs_path
*btrfs_alloc_path(void)
43 struct btrfs_path
*path
;
44 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
54 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
57 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
58 if (p
->nodes
[i
] && p
->locks
[i
])
59 btrfs_set_lock_blocking(p
->nodes
[i
]);
64 * reset all the locked nodes in the patch to spinning locks.
66 * held is used to keep lockdep happy, when lockdep is enabled
67 * we set held to a blocking lock before we go around and
68 * retake all the spinlocks in the path. You can safely use NULL
71 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
72 struct extent_buffer
*held
)
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /* lockdep really cares that we take all of these spinlocks
78 * in the right order. If any of the locks in the path are not
79 * currently blocking, it is going to complain. So, make really
80 * really sure by forcing the path to blocking before we clear
84 btrfs_set_lock_blocking(held
);
85 btrfs_set_path_blocking(p
);
88 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
89 if (p
->nodes
[i
] && p
->locks
[i
])
90 btrfs_clear_lock_blocking(p
->nodes
[i
]);
93 #ifdef CONFIG_DEBUG_LOCK_ALLOC
95 btrfs_clear_lock_blocking(held
);
99 /* this also releases the path */
100 void btrfs_free_path(struct btrfs_path
*p
)
102 btrfs_release_path(NULL
, p
);
103 kmem_cache_free(btrfs_path_cachep
, p
);
107 * path release drops references on the extent buffers in the path
108 * and it drops any locks held by this path
110 * It is safe to call this on paths that no locks or extent buffers held.
112 noinline
void btrfs_release_path(struct btrfs_root
*root
, struct btrfs_path
*p
)
116 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
121 btrfs_tree_unlock(p
->nodes
[i
]);
124 free_extent_buffer(p
->nodes
[i
]);
130 * safely gets a reference on the root node of a tree. A lock
131 * is not taken, so a concurrent writer may put a different node
132 * at the root of the tree. See btrfs_lock_root_node for the
135 * The extent buffer returned by this has a reference taken, so
136 * it won't disappear. It may stop being the root of the tree
137 * at any time because there are no locks held.
139 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
141 struct extent_buffer
*eb
;
142 spin_lock(&root
->node_lock
);
144 extent_buffer_get(eb
);
145 spin_unlock(&root
->node_lock
);
149 /* loop around taking references on and locking the root node of the
150 * tree until you end up with a lock on the root. A locked buffer
151 * is returned, with a reference held.
153 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
155 struct extent_buffer
*eb
;
158 eb
= btrfs_root_node(root
);
161 spin_lock(&root
->node_lock
);
162 if (eb
== root
->node
) {
163 spin_unlock(&root
->node_lock
);
166 spin_unlock(&root
->node_lock
);
168 btrfs_tree_unlock(eb
);
169 free_extent_buffer(eb
);
174 /* cowonly root (everything not a reference counted cow subvolume), just get
175 * put onto a simple dirty list. transaction.c walks this to make sure they
176 * get properly updated on disk.
178 static void add_root_to_dirty_list(struct btrfs_root
*root
)
180 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
181 list_add(&root
->dirty_list
,
182 &root
->fs_info
->dirty_cowonly_roots
);
187 * used by snapshot creation to make a copy of a root for a tree with
188 * a given objectid. The buffer with the new root node is returned in
189 * cow_ret, and this func returns zero on success or a negative error code.
191 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
192 struct btrfs_root
*root
,
193 struct extent_buffer
*buf
,
194 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
196 struct extent_buffer
*cow
;
200 struct btrfs_root
*new_root
;
202 new_root
= kmalloc(sizeof(*new_root
), GFP_NOFS
);
206 memcpy(new_root
, root
, sizeof(*new_root
));
207 new_root
->root_key
.objectid
= new_root_objectid
;
209 WARN_ON(root
->ref_cows
&& trans
->transid
!=
210 root
->fs_info
->running_transaction
->transid
);
211 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
213 level
= btrfs_header_level(buf
);
214 nritems
= btrfs_header_nritems(buf
);
216 cow
= btrfs_alloc_free_block(trans
, new_root
, buf
->len
, 0,
217 new_root_objectid
, trans
->transid
,
218 level
, buf
->start
, 0);
224 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
225 btrfs_set_header_bytenr(cow
, cow
->start
);
226 btrfs_set_header_generation(cow
, trans
->transid
);
227 btrfs_set_header_owner(cow
, new_root_objectid
);
228 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
);
230 write_extent_buffer(cow
, root
->fs_info
->fsid
,
231 (unsigned long)btrfs_header_fsid(cow
),
234 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
235 ret
= btrfs_inc_ref(trans
, new_root
, buf
, cow
, NULL
);
241 btrfs_mark_buffer_dirty(cow
);
247 * does the dirty work in cow of a single block. The parent block (if
248 * supplied) is updated to point to the new cow copy. The new buffer is marked
249 * dirty and returned locked. If you modify the block it needs to be marked
252 * search_start -- an allocation hint for the new block
254 * empty_size -- a hint that you plan on doing more cow. This is the size in
255 * bytes the allocator should try to find free next to the block it returns.
256 * This is just a hint and may be ignored by the allocator.
258 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
259 struct btrfs_root
*root
,
260 struct extent_buffer
*buf
,
261 struct extent_buffer
*parent
, int parent_slot
,
262 struct extent_buffer
**cow_ret
,
263 u64 search_start
, u64 empty_size
)
266 struct extent_buffer
*cow
;
275 btrfs_assert_tree_locked(buf
);
278 parent_start
= parent
->start
;
282 WARN_ON(root
->ref_cows
&& trans
->transid
!=
283 root
->fs_info
->running_transaction
->transid
);
284 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
286 level
= btrfs_header_level(buf
);
287 nritems
= btrfs_header_nritems(buf
);
289 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
,
290 parent_start
, root
->root_key
.objectid
,
291 trans
->transid
, level
,
292 search_start
, empty_size
);
296 /* cow is set to blocking by btrfs_init_new_buffer */
298 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
299 btrfs_set_header_bytenr(cow
, cow
->start
);
300 btrfs_set_header_generation(cow
, trans
->transid
);
301 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
302 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
);
304 write_extent_buffer(cow
, root
->fs_info
->fsid
,
305 (unsigned long)btrfs_header_fsid(cow
),
308 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
309 if (btrfs_header_generation(buf
) != trans
->transid
) {
311 ret
= btrfs_inc_ref(trans
, root
, buf
, cow
, &nr_extents
);
315 ret
= btrfs_cache_ref(trans
, root
, buf
, nr_extents
);
317 } else if (btrfs_header_owner(buf
) == BTRFS_TREE_RELOC_OBJECTID
) {
319 * There are only two places that can drop reference to
320 * tree blocks owned by living reloc trees, one is here,
321 * the other place is btrfs_drop_subtree. In both places,
322 * we check reference count while tree block is locked.
323 * Furthermore, if reference count is one, it won't get
324 * increased by someone else.
327 ret
= btrfs_lookup_extent_ref(trans
, root
, buf
->start
,
331 ret
= btrfs_update_ref(trans
, root
, buf
, cow
,
333 clean_tree_block(trans
, root
, buf
);
335 ret
= btrfs_inc_ref(trans
, root
, buf
, cow
, NULL
);
339 ret
= btrfs_update_ref(trans
, root
, buf
, cow
, 0, nritems
);
342 clean_tree_block(trans
, root
, buf
);
345 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
346 ret
= btrfs_reloc_tree_cache_ref(trans
, root
, cow
, buf
->start
);
350 if (buf
== root
->node
) {
351 WARN_ON(parent
&& parent
!= buf
);
353 spin_lock(&root
->node_lock
);
355 extent_buffer_get(cow
);
356 spin_unlock(&root
->node_lock
);
358 if (buf
!= root
->commit_root
) {
359 btrfs_free_extent(trans
, root
, buf
->start
,
360 buf
->len
, buf
->start
,
361 root
->root_key
.objectid
,
362 btrfs_header_generation(buf
),
365 free_extent_buffer(buf
);
366 add_root_to_dirty_list(root
);
368 btrfs_set_node_blockptr(parent
, parent_slot
,
370 WARN_ON(trans
->transid
== 0);
371 btrfs_set_node_ptr_generation(parent
, parent_slot
,
373 btrfs_mark_buffer_dirty(parent
);
374 WARN_ON(btrfs_header_generation(parent
) != trans
->transid
);
375 btrfs_free_extent(trans
, root
, buf
->start
, buf
->len
,
376 parent_start
, btrfs_header_owner(parent
),
377 btrfs_header_generation(parent
), level
, 1);
380 btrfs_tree_unlock(buf
);
381 free_extent_buffer(buf
);
382 btrfs_mark_buffer_dirty(cow
);
388 * cows a single block, see __btrfs_cow_block for the real work.
389 * This version of it has extra checks so that a block isn't cow'd more than
390 * once per transaction, as long as it hasn't been written yet
392 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
393 struct btrfs_root
*root
, struct extent_buffer
*buf
,
394 struct extent_buffer
*parent
, int parent_slot
,
395 struct extent_buffer
**cow_ret
)
400 if (trans
->transaction
!= root
->fs_info
->running_transaction
) {
401 printk(KERN_CRIT
"trans %llu running %llu\n",
402 (unsigned long long)trans
->transid
,
404 root
->fs_info
->running_transaction
->transid
);
407 if (trans
->transid
!= root
->fs_info
->generation
) {
408 printk(KERN_CRIT
"trans %llu running %llu\n",
409 (unsigned long long)trans
->transid
,
410 (unsigned long long)root
->fs_info
->generation
);
414 if (btrfs_header_generation(buf
) == trans
->transid
&&
415 btrfs_header_owner(buf
) == root
->root_key
.objectid
&&
416 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
421 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
424 btrfs_set_lock_blocking(parent
);
425 btrfs_set_lock_blocking(buf
);
427 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
428 parent_slot
, cow_ret
, search_start
, 0);
433 * helper function for defrag to decide if two blocks pointed to by a
434 * node are actually close by
436 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
438 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
440 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
446 * compare two keys in a memcmp fashion
448 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
452 btrfs_disk_key_to_cpu(&k1
, disk
);
454 if (k1
.objectid
> k2
->objectid
)
456 if (k1
.objectid
< k2
->objectid
)
458 if (k1
.type
> k2
->type
)
460 if (k1
.type
< k2
->type
)
462 if (k1
.offset
> k2
->offset
)
464 if (k1
.offset
< k2
->offset
)
470 * same as comp_keys only with two btrfs_key's
472 static int comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
474 if (k1
->objectid
> k2
->objectid
)
476 if (k1
->objectid
< k2
->objectid
)
478 if (k1
->type
> k2
->type
)
480 if (k1
->type
< k2
->type
)
482 if (k1
->offset
> k2
->offset
)
484 if (k1
->offset
< k2
->offset
)
490 * this is used by the defrag code to go through all the
491 * leaves pointed to by a node and reallocate them so that
492 * disk order is close to key order
494 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
495 struct btrfs_root
*root
, struct extent_buffer
*parent
,
496 int start_slot
, int cache_only
, u64
*last_ret
,
497 struct btrfs_key
*progress
)
499 struct extent_buffer
*cur
;
502 u64 search_start
= *last_ret
;
512 int progress_passed
= 0;
513 struct btrfs_disk_key disk_key
;
515 parent_level
= btrfs_header_level(parent
);
516 if (cache_only
&& parent_level
!= 1)
519 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
521 if (trans
->transid
!= root
->fs_info
->generation
)
524 parent_nritems
= btrfs_header_nritems(parent
);
525 blocksize
= btrfs_level_size(root
, parent_level
- 1);
526 end_slot
= parent_nritems
;
528 if (parent_nritems
== 1)
531 btrfs_set_lock_blocking(parent
);
533 for (i
= start_slot
; i
< end_slot
; i
++) {
536 if (!parent
->map_token
) {
537 map_extent_buffer(parent
,
538 btrfs_node_key_ptr_offset(i
),
539 sizeof(struct btrfs_key_ptr
),
540 &parent
->map_token
, &parent
->kaddr
,
541 &parent
->map_start
, &parent
->map_len
,
544 btrfs_node_key(parent
, &disk_key
, i
);
545 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
549 blocknr
= btrfs_node_blockptr(parent
, i
);
550 gen
= btrfs_node_ptr_generation(parent
, i
);
552 last_block
= blocknr
;
555 other
= btrfs_node_blockptr(parent
, i
- 1);
556 close
= close_blocks(blocknr
, other
, blocksize
);
558 if (!close
&& i
< end_slot
- 2) {
559 other
= btrfs_node_blockptr(parent
, i
+ 1);
560 close
= close_blocks(blocknr
, other
, blocksize
);
563 last_block
= blocknr
;
566 if (parent
->map_token
) {
567 unmap_extent_buffer(parent
, parent
->map_token
,
569 parent
->map_token
= NULL
;
572 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
574 uptodate
= btrfs_buffer_uptodate(cur
, gen
);
577 if (!cur
|| !uptodate
) {
579 free_extent_buffer(cur
);
583 cur
= read_tree_block(root
, blocknr
,
585 } else if (!uptodate
) {
586 btrfs_read_buffer(cur
, gen
);
589 if (search_start
== 0)
590 search_start
= last_block
;
592 btrfs_tree_lock(cur
);
593 btrfs_set_lock_blocking(cur
);
594 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
597 (end_slot
- i
) * blocksize
));
599 btrfs_tree_unlock(cur
);
600 free_extent_buffer(cur
);
603 search_start
= cur
->start
;
604 last_block
= cur
->start
;
605 *last_ret
= search_start
;
606 btrfs_tree_unlock(cur
);
607 free_extent_buffer(cur
);
609 if (parent
->map_token
) {
610 unmap_extent_buffer(parent
, parent
->map_token
,
612 parent
->map_token
= NULL
;
618 * The leaf data grows from end-to-front in the node.
619 * this returns the address of the start of the last item,
620 * which is the stop of the leaf data stack
622 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
623 struct extent_buffer
*leaf
)
625 u32 nr
= btrfs_header_nritems(leaf
);
627 return BTRFS_LEAF_DATA_SIZE(root
);
628 return btrfs_item_offset_nr(leaf
, nr
- 1);
632 * extra debugging checks to make sure all the items in a key are
633 * well formed and in the proper order
635 static int check_node(struct btrfs_root
*root
, struct btrfs_path
*path
,
638 struct extent_buffer
*parent
= NULL
;
639 struct extent_buffer
*node
= path
->nodes
[level
];
640 struct btrfs_disk_key parent_key
;
641 struct btrfs_disk_key node_key
;
644 struct btrfs_key cpukey
;
645 u32 nritems
= btrfs_header_nritems(node
);
647 if (path
->nodes
[level
+ 1])
648 parent
= path
->nodes
[level
+ 1];
650 slot
= path
->slots
[level
];
651 BUG_ON(nritems
== 0);
653 parent_slot
= path
->slots
[level
+ 1];
654 btrfs_node_key(parent
, &parent_key
, parent_slot
);
655 btrfs_node_key(node
, &node_key
, 0);
656 BUG_ON(memcmp(&parent_key
, &node_key
,
657 sizeof(struct btrfs_disk_key
)));
658 BUG_ON(btrfs_node_blockptr(parent
, parent_slot
) !=
659 btrfs_header_bytenr(node
));
661 BUG_ON(nritems
> BTRFS_NODEPTRS_PER_BLOCK(root
));
663 btrfs_node_key_to_cpu(node
, &cpukey
, slot
- 1);
664 btrfs_node_key(node
, &node_key
, slot
);
665 BUG_ON(comp_keys(&node_key
, &cpukey
) <= 0);
667 if (slot
< nritems
- 1) {
668 btrfs_node_key_to_cpu(node
, &cpukey
, slot
+ 1);
669 btrfs_node_key(node
, &node_key
, slot
);
670 BUG_ON(comp_keys(&node_key
, &cpukey
) >= 0);
676 * extra checking to make sure all the items in a leaf are
677 * well formed and in the proper order
679 static int check_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
682 struct extent_buffer
*leaf
= path
->nodes
[level
];
683 struct extent_buffer
*parent
= NULL
;
685 struct btrfs_key cpukey
;
686 struct btrfs_disk_key parent_key
;
687 struct btrfs_disk_key leaf_key
;
688 int slot
= path
->slots
[0];
690 u32 nritems
= btrfs_header_nritems(leaf
);
692 if (path
->nodes
[level
+ 1])
693 parent
= path
->nodes
[level
+ 1];
699 parent_slot
= path
->slots
[level
+ 1];
700 btrfs_node_key(parent
, &parent_key
, parent_slot
);
701 btrfs_item_key(leaf
, &leaf_key
, 0);
703 BUG_ON(memcmp(&parent_key
, &leaf_key
,
704 sizeof(struct btrfs_disk_key
)));
705 BUG_ON(btrfs_node_blockptr(parent
, parent_slot
) !=
706 btrfs_header_bytenr(leaf
));
708 if (slot
!= 0 && slot
< nritems
- 1) {
709 btrfs_item_key(leaf
, &leaf_key
, slot
);
710 btrfs_item_key_to_cpu(leaf
, &cpukey
, slot
- 1);
711 if (comp_keys(&leaf_key
, &cpukey
) <= 0) {
712 btrfs_print_leaf(root
, leaf
);
713 printk(KERN_CRIT
"slot %d offset bad key\n", slot
);
716 if (btrfs_item_offset_nr(leaf
, slot
- 1) !=
717 btrfs_item_end_nr(leaf
, slot
)) {
718 btrfs_print_leaf(root
, leaf
);
719 printk(KERN_CRIT
"slot %d offset bad\n", slot
);
723 if (slot
< nritems
- 1) {
724 btrfs_item_key(leaf
, &leaf_key
, slot
);
725 btrfs_item_key_to_cpu(leaf
, &cpukey
, slot
+ 1);
726 BUG_ON(comp_keys(&leaf_key
, &cpukey
) >= 0);
727 if (btrfs_item_offset_nr(leaf
, slot
) !=
728 btrfs_item_end_nr(leaf
, slot
+ 1)) {
729 btrfs_print_leaf(root
, leaf
);
730 printk(KERN_CRIT
"slot %d offset bad\n", slot
);
734 BUG_ON(btrfs_item_offset_nr(leaf
, 0) +
735 btrfs_item_size_nr(leaf
, 0) != BTRFS_LEAF_DATA_SIZE(root
));
739 static noinline
int check_block(struct btrfs_root
*root
,
740 struct btrfs_path
*path
, int level
)
744 return check_leaf(root
, path
, level
);
745 return check_node(root
, path
, level
);
749 * search for key in the extent_buffer. The items start at offset p,
750 * and they are item_size apart. There are 'max' items in p.
752 * the slot in the array is returned via slot, and it points to
753 * the place where you would insert key if it is not found in
756 * slot may point to max if the key is bigger than all of the keys
758 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
760 int item_size
, struct btrfs_key
*key
,
767 struct btrfs_disk_key
*tmp
= NULL
;
768 struct btrfs_disk_key unaligned
;
769 unsigned long offset
;
770 char *map_token
= NULL
;
772 unsigned long map_start
= 0;
773 unsigned long map_len
= 0;
777 mid
= (low
+ high
) / 2;
778 offset
= p
+ mid
* item_size
;
780 if (!map_token
|| offset
< map_start
||
781 (offset
+ sizeof(struct btrfs_disk_key
)) >
782 map_start
+ map_len
) {
784 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
788 err
= map_private_extent_buffer(eb
, offset
,
789 sizeof(struct btrfs_disk_key
),
791 &map_start
, &map_len
, KM_USER0
);
794 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
797 read_extent_buffer(eb
, &unaligned
,
798 offset
, sizeof(unaligned
));
803 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
806 ret
= comp_keys(tmp
, key
);
815 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
821 unmap_extent_buffer(eb
, map_token
, KM_USER0
);
826 * simple bin_search frontend that does the right thing for
829 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
830 int level
, int *slot
)
833 return generic_bin_search(eb
,
834 offsetof(struct btrfs_leaf
, items
),
835 sizeof(struct btrfs_item
),
836 key
, btrfs_header_nritems(eb
),
839 return generic_bin_search(eb
,
840 offsetof(struct btrfs_node
, ptrs
),
841 sizeof(struct btrfs_key_ptr
),
842 key
, btrfs_header_nritems(eb
),
848 /* given a node and slot number, this reads the blocks it points to. The
849 * extent buffer is returned with a reference taken (but unlocked).
850 * NULL is returned on error.
852 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
853 struct extent_buffer
*parent
, int slot
)
855 int level
= btrfs_header_level(parent
);
858 if (slot
>= btrfs_header_nritems(parent
))
863 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
864 btrfs_level_size(root
, level
- 1),
865 btrfs_node_ptr_generation(parent
, slot
));
869 * node level balancing, used to make sure nodes are in proper order for
870 * item deletion. We balance from the top down, so we have to make sure
871 * that a deletion won't leave an node completely empty later on.
873 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
874 struct btrfs_root
*root
,
875 struct btrfs_path
*path
, int level
)
877 struct extent_buffer
*right
= NULL
;
878 struct extent_buffer
*mid
;
879 struct extent_buffer
*left
= NULL
;
880 struct extent_buffer
*parent
= NULL
;
884 int orig_slot
= path
->slots
[level
];
885 int err_on_enospc
= 0;
891 mid
= path
->nodes
[level
];
893 WARN_ON(!path
->locks
[level
]);
894 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
896 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
898 if (level
< BTRFS_MAX_LEVEL
- 1)
899 parent
= path
->nodes
[level
+ 1];
900 pslot
= path
->slots
[level
+ 1];
903 * deal with the case where there is only one pointer in the root
904 * by promoting the node below to a root
907 struct extent_buffer
*child
;
909 if (btrfs_header_nritems(mid
) != 1)
912 /* promote the child to a root */
913 child
= read_node_slot(root
, mid
, 0);
915 btrfs_tree_lock(child
);
916 btrfs_set_lock_blocking(child
);
917 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
920 spin_lock(&root
->node_lock
);
922 spin_unlock(&root
->node_lock
);
924 ret
= btrfs_update_extent_ref(trans
, root
, child
->start
,
926 mid
->start
, child
->start
,
927 root
->root_key
.objectid
,
928 trans
->transid
, level
- 1);
931 add_root_to_dirty_list(root
);
932 btrfs_tree_unlock(child
);
934 path
->locks
[level
] = 0;
935 path
->nodes
[level
] = NULL
;
936 clean_tree_block(trans
, root
, mid
);
937 btrfs_tree_unlock(mid
);
938 /* once for the path */
939 free_extent_buffer(mid
);
940 ret
= btrfs_free_extent(trans
, root
, mid
->start
, mid
->len
,
941 mid
->start
, root
->root_key
.objectid
,
942 btrfs_header_generation(mid
),
944 /* once for the root ptr */
945 free_extent_buffer(mid
);
948 if (btrfs_header_nritems(mid
) >
949 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
952 if (trans
->transaction
->delayed_refs
.flushing
&&
953 btrfs_header_nritems(mid
) > 2)
956 if (btrfs_header_nritems(mid
) < 2)
959 left
= read_node_slot(root
, parent
, pslot
- 1);
961 btrfs_tree_lock(left
);
962 btrfs_set_lock_blocking(left
);
963 wret
= btrfs_cow_block(trans
, root
, left
,
964 parent
, pslot
- 1, &left
);
970 right
= read_node_slot(root
, parent
, pslot
+ 1);
972 btrfs_tree_lock(right
);
973 btrfs_set_lock_blocking(right
);
974 wret
= btrfs_cow_block(trans
, root
, right
,
975 parent
, pslot
+ 1, &right
);
982 /* first, try to make some room in the middle buffer */
984 orig_slot
+= btrfs_header_nritems(left
);
985 wret
= push_node_left(trans
, root
, left
, mid
, 1);
988 if (btrfs_header_nritems(mid
) < 2)
993 * then try to empty the right most buffer into the middle
996 wret
= push_node_left(trans
, root
, mid
, right
, 1);
997 if (wret
< 0 && wret
!= -ENOSPC
)
999 if (btrfs_header_nritems(right
) == 0) {
1000 u64 bytenr
= right
->start
;
1001 u64 generation
= btrfs_header_generation(parent
);
1002 u32 blocksize
= right
->len
;
1004 clean_tree_block(trans
, root
, right
);
1005 btrfs_tree_unlock(right
);
1006 free_extent_buffer(right
);
1008 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
+
1012 wret
= btrfs_free_extent(trans
, root
, bytenr
,
1013 blocksize
, parent
->start
,
1014 btrfs_header_owner(parent
),
1015 generation
, level
, 1);
1019 struct btrfs_disk_key right_key
;
1020 btrfs_node_key(right
, &right_key
, 0);
1021 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1022 btrfs_mark_buffer_dirty(parent
);
1025 if (btrfs_header_nritems(mid
) == 1) {
1027 * we're not allowed to leave a node with one item in the
1028 * tree during a delete. A deletion from lower in the tree
1029 * could try to delete the only pointer in this node.
1030 * So, pull some keys from the left.
1031 * There has to be a left pointer at this point because
1032 * otherwise we would have pulled some pointers from the
1036 wret
= balance_node_right(trans
, root
, mid
, left
);
1042 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1048 if (btrfs_header_nritems(mid
) == 0) {
1049 /* we've managed to empty the middle node, drop it */
1050 u64 root_gen
= btrfs_header_generation(parent
);
1051 u64 bytenr
= mid
->start
;
1052 u32 blocksize
= mid
->len
;
1054 clean_tree_block(trans
, root
, mid
);
1055 btrfs_tree_unlock(mid
);
1056 free_extent_buffer(mid
);
1058 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
);
1061 wret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
1063 btrfs_header_owner(parent
),
1064 root_gen
, level
, 1);
1068 /* update the parent key to reflect our changes */
1069 struct btrfs_disk_key mid_key
;
1070 btrfs_node_key(mid
, &mid_key
, 0);
1071 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1072 btrfs_mark_buffer_dirty(parent
);
1075 /* update the path */
1077 if (btrfs_header_nritems(left
) > orig_slot
) {
1078 extent_buffer_get(left
);
1079 /* left was locked after cow */
1080 path
->nodes
[level
] = left
;
1081 path
->slots
[level
+ 1] -= 1;
1082 path
->slots
[level
] = orig_slot
;
1084 btrfs_tree_unlock(mid
);
1085 free_extent_buffer(mid
);
1088 orig_slot
-= btrfs_header_nritems(left
);
1089 path
->slots
[level
] = orig_slot
;
1092 /* double check we haven't messed things up */
1093 check_block(root
, path
, level
);
1095 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1099 btrfs_tree_unlock(right
);
1100 free_extent_buffer(right
);
1103 if (path
->nodes
[level
] != left
)
1104 btrfs_tree_unlock(left
);
1105 free_extent_buffer(left
);
1110 /* Node balancing for insertion. Here we only split or push nodes around
1111 * when they are completely full. This is also done top down, so we
1112 * have to be pessimistic.
1114 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1115 struct btrfs_root
*root
,
1116 struct btrfs_path
*path
, int level
)
1118 struct extent_buffer
*right
= NULL
;
1119 struct extent_buffer
*mid
;
1120 struct extent_buffer
*left
= NULL
;
1121 struct extent_buffer
*parent
= NULL
;
1125 int orig_slot
= path
->slots
[level
];
1131 mid
= path
->nodes
[level
];
1132 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1133 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1135 if (level
< BTRFS_MAX_LEVEL
- 1)
1136 parent
= path
->nodes
[level
+ 1];
1137 pslot
= path
->slots
[level
+ 1];
1142 left
= read_node_slot(root
, parent
, pslot
- 1);
1144 /* first, try to make some room in the middle buffer */
1148 btrfs_tree_lock(left
);
1149 btrfs_set_lock_blocking(left
);
1151 left_nr
= btrfs_header_nritems(left
);
1152 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1155 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1160 wret
= push_node_left(trans
, root
,
1167 struct btrfs_disk_key disk_key
;
1168 orig_slot
+= left_nr
;
1169 btrfs_node_key(mid
, &disk_key
, 0);
1170 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1171 btrfs_mark_buffer_dirty(parent
);
1172 if (btrfs_header_nritems(left
) > orig_slot
) {
1173 path
->nodes
[level
] = left
;
1174 path
->slots
[level
+ 1] -= 1;
1175 path
->slots
[level
] = orig_slot
;
1176 btrfs_tree_unlock(mid
);
1177 free_extent_buffer(mid
);
1180 btrfs_header_nritems(left
);
1181 path
->slots
[level
] = orig_slot
;
1182 btrfs_tree_unlock(left
);
1183 free_extent_buffer(left
);
1187 btrfs_tree_unlock(left
);
1188 free_extent_buffer(left
);
1190 right
= read_node_slot(root
, parent
, pslot
+ 1);
1193 * then try to empty the right most buffer into the middle
1198 btrfs_tree_lock(right
);
1199 btrfs_set_lock_blocking(right
);
1201 right_nr
= btrfs_header_nritems(right
);
1202 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1205 ret
= btrfs_cow_block(trans
, root
, right
,
1211 wret
= balance_node_right(trans
, root
,
1218 struct btrfs_disk_key disk_key
;
1220 btrfs_node_key(right
, &disk_key
, 0);
1221 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
1222 btrfs_mark_buffer_dirty(parent
);
1224 if (btrfs_header_nritems(mid
) <= orig_slot
) {
1225 path
->nodes
[level
] = right
;
1226 path
->slots
[level
+ 1] += 1;
1227 path
->slots
[level
] = orig_slot
-
1228 btrfs_header_nritems(mid
);
1229 btrfs_tree_unlock(mid
);
1230 free_extent_buffer(mid
);
1232 btrfs_tree_unlock(right
);
1233 free_extent_buffer(right
);
1237 btrfs_tree_unlock(right
);
1238 free_extent_buffer(right
);
1244 * readahead one full node of leaves, finding things that are close
1245 * to the block in 'slot', and triggering ra on them.
1247 static noinline
void reada_for_search(struct btrfs_root
*root
,
1248 struct btrfs_path
*path
,
1249 int level
, int slot
, u64 objectid
)
1251 struct extent_buffer
*node
;
1252 struct btrfs_disk_key disk_key
;
1257 int direction
= path
->reada
;
1258 struct extent_buffer
*eb
;
1266 if (!path
->nodes
[level
])
1269 node
= path
->nodes
[level
];
1271 search
= btrfs_node_blockptr(node
, slot
);
1272 blocksize
= btrfs_level_size(root
, level
- 1);
1273 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
1275 free_extent_buffer(eb
);
1281 nritems
= btrfs_header_nritems(node
);
1284 if (direction
< 0) {
1288 } else if (direction
> 0) {
1293 if (path
->reada
< 0 && objectid
) {
1294 btrfs_node_key(node
, &disk_key
, nr
);
1295 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
1298 search
= btrfs_node_blockptr(node
, nr
);
1299 if ((search
<= target
&& target
- search
<= 65536) ||
1300 (search
> target
&& search
- target
<= 65536)) {
1301 readahead_tree_block(root
, search
, blocksize
,
1302 btrfs_node_ptr_generation(node
, nr
));
1306 if ((nread
> 65536 || nscan
> 32))
1312 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1315 static noinline
int reada_for_balance(struct btrfs_root
*root
,
1316 struct btrfs_path
*path
, int level
)
1320 struct extent_buffer
*parent
;
1321 struct extent_buffer
*eb
;
1328 parent
= path
->nodes
[level
- 1];
1332 nritems
= btrfs_header_nritems(parent
);
1333 slot
= path
->slots
[level
];
1334 blocksize
= btrfs_level_size(root
, level
);
1337 block1
= btrfs_node_blockptr(parent
, slot
- 1);
1338 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
1339 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
1340 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1342 free_extent_buffer(eb
);
1344 if (slot
< nritems
) {
1345 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
1346 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
1347 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
1348 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1350 free_extent_buffer(eb
);
1352 if (block1
|| block2
) {
1354 btrfs_release_path(root
, path
);
1356 readahead_tree_block(root
, block1
, blocksize
, 0);
1358 readahead_tree_block(root
, block2
, blocksize
, 0);
1361 eb
= read_tree_block(root
, block1
, blocksize
, 0);
1362 free_extent_buffer(eb
);
1365 eb
= read_tree_block(root
, block2
, blocksize
, 0);
1366 free_extent_buffer(eb
);
1374 * when we walk down the tree, it is usually safe to unlock the higher layers
1375 * in the tree. The exceptions are when our path goes through slot 0, because
1376 * operations on the tree might require changing key pointers higher up in the
1379 * callers might also have set path->keep_locks, which tells this code to keep
1380 * the lock if the path points to the last slot in the block. This is part of
1381 * walking through the tree, and selecting the next slot in the higher block.
1383 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1384 * if lowest_unlock is 1, level 0 won't be unlocked
1386 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
1390 int skip_level
= level
;
1392 struct extent_buffer
*t
;
1394 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1395 if (!path
->nodes
[i
])
1397 if (!path
->locks
[i
])
1399 if (!no_skips
&& path
->slots
[i
] == 0) {
1403 if (!no_skips
&& path
->keep_locks
) {
1406 nritems
= btrfs_header_nritems(t
);
1407 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
1412 if (skip_level
< i
&& i
>= lowest_unlock
)
1416 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
1417 btrfs_tree_unlock(t
);
1424 * This releases any locks held in the path starting at level and
1425 * going all the way up to the root.
1427 * btrfs_search_slot will keep the lock held on higher nodes in a few
1428 * corner cases, such as COW of the block at slot zero in the node. This
1429 * ignores those rules, and it should only be called when there are no
1430 * more updates to be done higher up in the tree.
1432 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
1436 if (path
->keep_locks
|| path
->lowest_level
)
1439 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1440 if (!path
->nodes
[i
])
1442 if (!path
->locks
[i
])
1444 btrfs_tree_unlock(path
->nodes
[i
]);
1450 * look for key in the tree. path is filled in with nodes along the way
1451 * if key is found, we return zero and you can find the item in the leaf
1452 * level of the path (level 0)
1454 * If the key isn't found, the path points to the slot where it should
1455 * be inserted, and 1 is returned. If there are other errors during the
1456 * search a negative error number is returned.
1458 * if ins_len > 0, nodes and leaves will be split as we walk down the
1459 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1462 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
1463 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
1466 struct extent_buffer
*b
;
1467 struct extent_buffer
*tmp
;
1471 int should_reada
= p
->reada
;
1472 int lowest_unlock
= 1;
1474 u8 lowest_level
= 0;
1478 lowest_level
= p
->lowest_level
;
1479 WARN_ON(lowest_level
&& ins_len
> 0);
1480 WARN_ON(p
->nodes
[0] != NULL
);
1486 if (p
->skip_locking
)
1487 b
= btrfs_root_node(root
);
1489 b
= btrfs_lock_root_node(root
);
1492 level
= btrfs_header_level(b
);
1495 * setup the path here so we can release it under lock
1496 * contention with the cow code
1498 p
->nodes
[level
] = b
;
1499 if (!p
->skip_locking
)
1500 p
->locks
[level
] = 1;
1505 /* is a cow on this block not required */
1506 if (btrfs_header_generation(b
) == trans
->transid
&&
1507 btrfs_header_owner(b
) == root
->root_key
.objectid
&&
1508 !btrfs_header_flag(b
, BTRFS_HEADER_FLAG_WRITTEN
)) {
1511 btrfs_set_path_blocking(p
);
1513 wret
= btrfs_cow_block(trans
, root
, b
,
1514 p
->nodes
[level
+ 1],
1515 p
->slots
[level
+ 1], &b
);
1517 free_extent_buffer(b
);
1523 BUG_ON(!cow
&& ins_len
);
1524 if (level
!= btrfs_header_level(b
))
1526 level
= btrfs_header_level(b
);
1528 p
->nodes
[level
] = b
;
1529 if (!p
->skip_locking
)
1530 p
->locks
[level
] = 1;
1532 btrfs_clear_path_blocking(p
, NULL
);
1535 * we have a lock on b and as long as we aren't changing
1536 * the tree, there is no way to for the items in b to change.
1537 * It is safe to drop the lock on our parent before we
1538 * go through the expensive btree search on b.
1540 * If cow is true, then we might be changing slot zero,
1541 * which may require changing the parent. So, we can't
1542 * drop the lock until after we know which slot we're
1546 btrfs_unlock_up_safe(p
, level
+ 1);
1548 ret
= check_block(root
, p
, level
);
1554 ret
= bin_search(b
, key
, level
, &slot
);
1557 if (ret
&& slot
> 0)
1559 p
->slots
[level
] = slot
;
1560 if ((p
->search_for_split
|| ins_len
> 0) &&
1561 btrfs_header_nritems(b
) >=
1562 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
1565 sret
= reada_for_balance(root
, p
, level
);
1569 btrfs_set_path_blocking(p
);
1570 sret
= split_node(trans
, root
, p
, level
);
1571 btrfs_clear_path_blocking(p
, NULL
);
1578 b
= p
->nodes
[level
];
1579 slot
= p
->slots
[level
];
1580 } else if (ins_len
< 0 &&
1581 btrfs_header_nritems(b
) <
1582 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4) {
1585 sret
= reada_for_balance(root
, p
, level
);
1589 btrfs_set_path_blocking(p
);
1590 sret
= balance_level(trans
, root
, p
, level
);
1591 btrfs_clear_path_blocking(p
, NULL
);
1597 b
= p
->nodes
[level
];
1599 btrfs_release_path(NULL
, p
);
1602 slot
= p
->slots
[level
];
1603 BUG_ON(btrfs_header_nritems(b
) == 1);
1605 unlock_up(p
, level
, lowest_unlock
);
1607 /* this is only true while dropping a snapshot */
1608 if (level
== lowest_level
) {
1613 blocknr
= btrfs_node_blockptr(b
, slot
);
1614 gen
= btrfs_node_ptr_generation(b
, slot
);
1615 blocksize
= btrfs_level_size(root
, level
- 1);
1617 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1618 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
1622 * reduce lock contention at high levels
1623 * of the btree by dropping locks before
1627 btrfs_release_path(NULL
, p
);
1629 free_extent_buffer(tmp
);
1631 reada_for_search(root
, p
,
1635 tmp
= read_tree_block(root
, blocknr
,
1638 free_extent_buffer(tmp
);
1641 btrfs_set_path_blocking(p
);
1643 free_extent_buffer(tmp
);
1645 reada_for_search(root
, p
,
1648 b
= read_node_slot(root
, b
, slot
);
1651 if (!p
->skip_locking
) {
1654 btrfs_clear_path_blocking(p
, NULL
);
1655 lret
= btrfs_try_spin_lock(b
);
1658 btrfs_set_path_blocking(p
);
1660 btrfs_clear_path_blocking(p
, b
);
1664 p
->slots
[level
] = slot
;
1666 btrfs_leaf_free_space(root
, b
) < ins_len
) {
1669 btrfs_set_path_blocking(p
);
1670 sret
= split_leaf(trans
, root
, key
,
1671 p
, ins_len
, ret
== 0);
1672 btrfs_clear_path_blocking(p
, NULL
);
1680 if (!p
->search_for_split
)
1681 unlock_up(p
, level
, lowest_unlock
);
1688 * we don't really know what they plan on doing with the path
1689 * from here on, so for now just mark it as blocking
1691 if (!p
->leave_spinning
)
1692 btrfs_set_path_blocking(p
);
1696 int btrfs_merge_path(struct btrfs_trans_handle
*trans
,
1697 struct btrfs_root
*root
,
1698 struct btrfs_key
*node_keys
,
1699 u64
*nodes
, int lowest_level
)
1701 struct extent_buffer
*eb
;
1702 struct extent_buffer
*parent
;
1703 struct btrfs_key key
;
1712 eb
= btrfs_lock_root_node(root
);
1713 ret
= btrfs_cow_block(trans
, root
, eb
, NULL
, 0, &eb
);
1716 btrfs_set_lock_blocking(eb
);
1720 level
= btrfs_header_level(parent
);
1721 if (level
== 0 || level
<= lowest_level
)
1724 ret
= bin_search(parent
, &node_keys
[lowest_level
], level
,
1726 if (ret
&& slot
> 0)
1729 bytenr
= btrfs_node_blockptr(parent
, slot
);
1730 if (nodes
[level
- 1] == bytenr
)
1733 blocksize
= btrfs_level_size(root
, level
- 1);
1734 generation
= btrfs_node_ptr_generation(parent
, slot
);
1735 btrfs_node_key_to_cpu(eb
, &key
, slot
);
1736 key_match
= !memcmp(&key
, &node_keys
[level
- 1], sizeof(key
));
1738 if (generation
== trans
->transid
) {
1739 eb
= read_tree_block(root
, bytenr
, blocksize
,
1741 btrfs_tree_lock(eb
);
1742 btrfs_set_lock_blocking(eb
);
1746 * if node keys match and node pointer hasn't been modified
1747 * in the running transaction, we can merge the path. for
1748 * blocks owened by reloc trees, the node pointer check is
1749 * skipped, this is because these blocks are fully controlled
1750 * by the space balance code, no one else can modify them.
1752 if (!nodes
[level
- 1] || !key_match
||
1753 (generation
== trans
->transid
&&
1754 btrfs_header_owner(eb
) != BTRFS_TREE_RELOC_OBJECTID
)) {
1755 if (level
== 1 || level
== lowest_level
+ 1) {
1756 if (generation
== trans
->transid
) {
1757 btrfs_tree_unlock(eb
);
1758 free_extent_buffer(eb
);
1763 if (generation
!= trans
->transid
) {
1764 eb
= read_tree_block(root
, bytenr
, blocksize
,
1766 btrfs_tree_lock(eb
);
1767 btrfs_set_lock_blocking(eb
);
1770 ret
= btrfs_cow_block(trans
, root
, eb
, parent
, slot
,
1774 if (root
->root_key
.objectid
==
1775 BTRFS_TREE_RELOC_OBJECTID
) {
1776 if (!nodes
[level
- 1]) {
1777 nodes
[level
- 1] = eb
->start
;
1778 memcpy(&node_keys
[level
- 1], &key
,
1779 sizeof(node_keys
[0]));
1785 btrfs_tree_unlock(parent
);
1786 free_extent_buffer(parent
);
1791 btrfs_set_node_blockptr(parent
, slot
, nodes
[level
- 1]);
1792 btrfs_set_node_ptr_generation(parent
, slot
, trans
->transid
);
1793 btrfs_mark_buffer_dirty(parent
);
1795 ret
= btrfs_inc_extent_ref(trans
, root
,
1797 blocksize
, parent
->start
,
1798 btrfs_header_owner(parent
),
1799 btrfs_header_generation(parent
),
1804 * If the block was created in the running transaction,
1805 * it's possible this is the last reference to it, so we
1806 * should drop the subtree.
1808 if (generation
== trans
->transid
) {
1809 ret
= btrfs_drop_subtree(trans
, root
, eb
, parent
);
1811 btrfs_tree_unlock(eb
);
1812 free_extent_buffer(eb
);
1814 ret
= btrfs_free_extent(trans
, root
, bytenr
,
1815 blocksize
, parent
->start
,
1816 btrfs_header_owner(parent
),
1817 btrfs_header_generation(parent
),
1823 btrfs_tree_unlock(parent
);
1824 free_extent_buffer(parent
);
1829 * adjust the pointers going up the tree, starting at level
1830 * making sure the right key of each node is points to 'key'.
1831 * This is used after shifting pointers to the left, so it stops
1832 * fixing up pointers when a given leaf/node is not in slot 0 of the
1835 * If this fails to write a tree block, it returns -1, but continues
1836 * fixing up the blocks in ram so the tree is consistent.
1838 static int fixup_low_keys(struct btrfs_trans_handle
*trans
,
1839 struct btrfs_root
*root
, struct btrfs_path
*path
,
1840 struct btrfs_disk_key
*key
, int level
)
1844 struct extent_buffer
*t
;
1846 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1847 int tslot
= path
->slots
[i
];
1848 if (!path
->nodes
[i
])
1851 btrfs_set_node_key(t
, key
, tslot
);
1852 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
1862 * This function isn't completely safe. It's the caller's responsibility
1863 * that the new key won't break the order
1865 int btrfs_set_item_key_safe(struct btrfs_trans_handle
*trans
,
1866 struct btrfs_root
*root
, struct btrfs_path
*path
,
1867 struct btrfs_key
*new_key
)
1869 struct btrfs_disk_key disk_key
;
1870 struct extent_buffer
*eb
;
1873 eb
= path
->nodes
[0];
1874 slot
= path
->slots
[0];
1876 btrfs_item_key(eb
, &disk_key
, slot
- 1);
1877 if (comp_keys(&disk_key
, new_key
) >= 0)
1880 if (slot
< btrfs_header_nritems(eb
) - 1) {
1881 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
1882 if (comp_keys(&disk_key
, new_key
) <= 0)
1886 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
1887 btrfs_set_item_key(eb
, &disk_key
, slot
);
1888 btrfs_mark_buffer_dirty(eb
);
1890 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
1895 * try to push data from one node into the next node left in the
1898 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1899 * error, and > 0 if there was no room in the left hand block.
1901 static int push_node_left(struct btrfs_trans_handle
*trans
,
1902 struct btrfs_root
*root
, struct extent_buffer
*dst
,
1903 struct extent_buffer
*src
, int empty
)
1910 src_nritems
= btrfs_header_nritems(src
);
1911 dst_nritems
= btrfs_header_nritems(dst
);
1912 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
1913 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
1914 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
1916 if (!empty
&& src_nritems
<= 8)
1919 if (push_items
<= 0)
1923 push_items
= min(src_nritems
, push_items
);
1924 if (push_items
< src_nritems
) {
1925 /* leave at least 8 pointers in the node if
1926 * we aren't going to empty it
1928 if (src_nritems
- push_items
< 8) {
1929 if (push_items
<= 8)
1935 push_items
= min(src_nritems
- 8, push_items
);
1937 copy_extent_buffer(dst
, src
,
1938 btrfs_node_key_ptr_offset(dst_nritems
),
1939 btrfs_node_key_ptr_offset(0),
1940 push_items
* sizeof(struct btrfs_key_ptr
));
1942 if (push_items
< src_nritems
) {
1943 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
1944 btrfs_node_key_ptr_offset(push_items
),
1945 (src_nritems
- push_items
) *
1946 sizeof(struct btrfs_key_ptr
));
1948 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
1949 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
1950 btrfs_mark_buffer_dirty(src
);
1951 btrfs_mark_buffer_dirty(dst
);
1953 ret
= btrfs_update_ref(trans
, root
, src
, dst
, dst_nritems
, push_items
);
1960 * try to push data from one node into the next node right in the
1963 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1964 * error, and > 0 if there was no room in the right hand block.
1966 * this will only push up to 1/2 the contents of the left node over
1968 static int balance_node_right(struct btrfs_trans_handle
*trans
,
1969 struct btrfs_root
*root
,
1970 struct extent_buffer
*dst
,
1971 struct extent_buffer
*src
)
1979 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
1980 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
1982 src_nritems
= btrfs_header_nritems(src
);
1983 dst_nritems
= btrfs_header_nritems(dst
);
1984 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
1985 if (push_items
<= 0)
1988 if (src_nritems
< 4)
1991 max_push
= src_nritems
/ 2 + 1;
1992 /* don't try to empty the node */
1993 if (max_push
>= src_nritems
)
1996 if (max_push
< push_items
)
1997 push_items
= max_push
;
1999 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
2000 btrfs_node_key_ptr_offset(0),
2002 sizeof(struct btrfs_key_ptr
));
2004 copy_extent_buffer(dst
, src
,
2005 btrfs_node_key_ptr_offset(0),
2006 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
2007 push_items
* sizeof(struct btrfs_key_ptr
));
2009 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2010 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2012 btrfs_mark_buffer_dirty(src
);
2013 btrfs_mark_buffer_dirty(dst
);
2015 ret
= btrfs_update_ref(trans
, root
, src
, dst
, 0, push_items
);
2022 * helper function to insert a new root level in the tree.
2023 * A new node is allocated, and a single item is inserted to
2024 * point to the existing root
2026 * returns zero on success or < 0 on failure.
2028 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
2029 struct btrfs_root
*root
,
2030 struct btrfs_path
*path
, int level
)
2033 struct extent_buffer
*lower
;
2034 struct extent_buffer
*c
;
2035 struct extent_buffer
*old
;
2036 struct btrfs_disk_key lower_key
;
2039 BUG_ON(path
->nodes
[level
]);
2040 BUG_ON(path
->nodes
[level
-1] != root
->node
);
2042 lower
= path
->nodes
[level
-1];
2044 btrfs_item_key(lower
, &lower_key
, 0);
2046 btrfs_node_key(lower
, &lower_key
, 0);
2048 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2049 root
->root_key
.objectid
, trans
->transid
,
2050 level
, root
->node
->start
, 0);
2054 memset_extent_buffer(c
, 0, 0, root
->nodesize
);
2055 btrfs_set_header_nritems(c
, 1);
2056 btrfs_set_header_level(c
, level
);
2057 btrfs_set_header_bytenr(c
, c
->start
);
2058 btrfs_set_header_generation(c
, trans
->transid
);
2059 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
2061 write_extent_buffer(c
, root
->fs_info
->fsid
,
2062 (unsigned long)btrfs_header_fsid(c
),
2065 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
2066 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
2069 btrfs_set_node_key(c
, &lower_key
, 0);
2070 btrfs_set_node_blockptr(c
, 0, lower
->start
);
2071 lower_gen
= btrfs_header_generation(lower
);
2072 WARN_ON(lower_gen
!= trans
->transid
);
2074 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
2076 btrfs_mark_buffer_dirty(c
);
2078 spin_lock(&root
->node_lock
);
2081 spin_unlock(&root
->node_lock
);
2083 ret
= btrfs_update_extent_ref(trans
, root
, lower
->start
,
2084 lower
->len
, lower
->start
, c
->start
,
2085 root
->root_key
.objectid
,
2086 trans
->transid
, level
- 1);
2089 /* the super has an extra ref to root->node */
2090 free_extent_buffer(old
);
2092 add_root_to_dirty_list(root
);
2093 extent_buffer_get(c
);
2094 path
->nodes
[level
] = c
;
2095 path
->locks
[level
] = 1;
2096 path
->slots
[level
] = 0;
2101 * worker function to insert a single pointer in a node.
2102 * the node should have enough room for the pointer already
2104 * slot and level indicate where you want the key to go, and
2105 * blocknr is the block the key points to.
2107 * returns zero on success and < 0 on any error
2109 static int insert_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
2110 *root
, struct btrfs_path
*path
, struct btrfs_disk_key
2111 *key
, u64 bytenr
, int slot
, int level
)
2113 struct extent_buffer
*lower
;
2116 BUG_ON(!path
->nodes
[level
]);
2117 lower
= path
->nodes
[level
];
2118 nritems
= btrfs_header_nritems(lower
);
2121 if (nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
))
2123 if (slot
!= nritems
) {
2124 memmove_extent_buffer(lower
,
2125 btrfs_node_key_ptr_offset(slot
+ 1),
2126 btrfs_node_key_ptr_offset(slot
),
2127 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
2129 btrfs_set_node_key(lower
, key
, slot
);
2130 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
2131 WARN_ON(trans
->transid
== 0);
2132 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
2133 btrfs_set_header_nritems(lower
, nritems
+ 1);
2134 btrfs_mark_buffer_dirty(lower
);
2139 * split the node at the specified level in path in two.
2140 * The path is corrected to point to the appropriate node after the split
2142 * Before splitting this tries to make some room in the node by pushing
2143 * left and right, if either one works, it returns right away.
2145 * returns 0 on success and < 0 on failure
2147 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
2148 struct btrfs_root
*root
,
2149 struct btrfs_path
*path
, int level
)
2151 struct extent_buffer
*c
;
2152 struct extent_buffer
*split
;
2153 struct btrfs_disk_key disk_key
;
2159 c
= path
->nodes
[level
];
2160 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
2161 if (c
== root
->node
) {
2162 /* trying to split the root, lets make a new one */
2163 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
2166 } else if (!trans
->transaction
->delayed_refs
.flushing
) {
2167 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
2168 c
= path
->nodes
[level
];
2169 if (!ret
&& btrfs_header_nritems(c
) <
2170 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
2176 c_nritems
= btrfs_header_nritems(c
);
2178 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
,
2179 path
->nodes
[level
+ 1]->start
,
2180 root
->root_key
.objectid
,
2181 trans
->transid
, level
, c
->start
, 0);
2183 return PTR_ERR(split
);
2185 btrfs_set_header_flags(split
, btrfs_header_flags(c
));
2186 btrfs_set_header_level(split
, btrfs_header_level(c
));
2187 btrfs_set_header_bytenr(split
, split
->start
);
2188 btrfs_set_header_generation(split
, trans
->transid
);
2189 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
2190 btrfs_set_header_flags(split
, 0);
2191 write_extent_buffer(split
, root
->fs_info
->fsid
,
2192 (unsigned long)btrfs_header_fsid(split
),
2194 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
2195 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
2198 mid
= (c_nritems
+ 1) / 2;
2200 copy_extent_buffer(split
, c
,
2201 btrfs_node_key_ptr_offset(0),
2202 btrfs_node_key_ptr_offset(mid
),
2203 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
2204 btrfs_set_header_nritems(split
, c_nritems
- mid
);
2205 btrfs_set_header_nritems(c
, mid
);
2208 btrfs_mark_buffer_dirty(c
);
2209 btrfs_mark_buffer_dirty(split
);
2211 btrfs_node_key(split
, &disk_key
, 0);
2212 wret
= insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
2213 path
->slots
[level
+ 1] + 1,
2218 ret
= btrfs_update_ref(trans
, root
, c
, split
, 0, c_nritems
- mid
);
2221 if (path
->slots
[level
] >= mid
) {
2222 path
->slots
[level
] -= mid
;
2223 btrfs_tree_unlock(c
);
2224 free_extent_buffer(c
);
2225 path
->nodes
[level
] = split
;
2226 path
->slots
[level
+ 1] += 1;
2228 btrfs_tree_unlock(split
);
2229 free_extent_buffer(split
);
2235 * how many bytes are required to store the items in a leaf. start
2236 * and nr indicate which items in the leaf to check. This totals up the
2237 * space used both by the item structs and the item data
2239 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
2242 int nritems
= btrfs_header_nritems(l
);
2243 int end
= min(nritems
, start
+ nr
) - 1;
2247 data_len
= btrfs_item_end_nr(l
, start
);
2248 data_len
= data_len
- btrfs_item_offset_nr(l
, end
);
2249 data_len
+= sizeof(struct btrfs_item
) * nr
;
2250 WARN_ON(data_len
< 0);
2255 * The space between the end of the leaf items and
2256 * the start of the leaf data. IOW, how much room
2257 * the leaf has left for both items and data
2259 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
2260 struct extent_buffer
*leaf
)
2262 int nritems
= btrfs_header_nritems(leaf
);
2264 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
2266 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
2267 "used %d nritems %d\n",
2268 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
2269 leaf_space_used(leaf
, 0, nritems
), nritems
);
2274 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
2275 struct btrfs_root
*root
,
2276 struct btrfs_path
*path
,
2277 int data_size
, int empty
,
2278 struct extent_buffer
*right
,
2279 int free_space
, u32 left_nritems
)
2281 struct extent_buffer
*left
= path
->nodes
[0];
2282 struct extent_buffer
*upper
= path
->nodes
[1];
2283 struct btrfs_disk_key disk_key
;
2288 struct btrfs_item
*item
;
2300 if (path
->slots
[0] >= left_nritems
)
2301 push_space
+= data_size
;
2303 slot
= path
->slots
[1];
2304 i
= left_nritems
- 1;
2306 item
= btrfs_item_nr(left
, i
);
2308 if (!empty
&& push_items
> 0) {
2309 if (path
->slots
[0] > i
)
2311 if (path
->slots
[0] == i
) {
2312 int space
= btrfs_leaf_free_space(root
, left
);
2313 if (space
+ push_space
* 2 > free_space
)
2318 if (path
->slots
[0] == i
)
2319 push_space
+= data_size
;
2321 if (!left
->map_token
) {
2322 map_extent_buffer(left
, (unsigned long)item
,
2323 sizeof(struct btrfs_item
),
2324 &left
->map_token
, &left
->kaddr
,
2325 &left
->map_start
, &left
->map_len
,
2329 this_item_size
= btrfs_item_size(left
, item
);
2330 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2334 push_space
+= this_item_size
+ sizeof(*item
);
2339 if (left
->map_token
) {
2340 unmap_extent_buffer(left
, left
->map_token
, KM_USER1
);
2341 left
->map_token
= NULL
;
2344 if (push_items
== 0)
2347 if (!empty
&& push_items
== left_nritems
)
2350 /* push left to right */
2351 right_nritems
= btrfs_header_nritems(right
);
2353 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
2354 push_space
-= leaf_data_end(root
, left
);
2356 /* make room in the right data area */
2357 data_end
= leaf_data_end(root
, right
);
2358 memmove_extent_buffer(right
,
2359 btrfs_leaf_data(right
) + data_end
- push_space
,
2360 btrfs_leaf_data(right
) + data_end
,
2361 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
2363 /* copy from the left data area */
2364 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
2365 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2366 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
2369 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
2370 btrfs_item_nr_offset(0),
2371 right_nritems
* sizeof(struct btrfs_item
));
2373 /* copy the items from left to right */
2374 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
2375 btrfs_item_nr_offset(left_nritems
- push_items
),
2376 push_items
* sizeof(struct btrfs_item
));
2378 /* update the item pointers */
2379 right_nritems
+= push_items
;
2380 btrfs_set_header_nritems(right
, right_nritems
);
2381 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2382 for (i
= 0; i
< right_nritems
; i
++) {
2383 item
= btrfs_item_nr(right
, i
);
2384 if (!right
->map_token
) {
2385 map_extent_buffer(right
, (unsigned long)item
,
2386 sizeof(struct btrfs_item
),
2387 &right
->map_token
, &right
->kaddr
,
2388 &right
->map_start
, &right
->map_len
,
2391 push_space
-= btrfs_item_size(right
, item
);
2392 btrfs_set_item_offset(right
, item
, push_space
);
2395 if (right
->map_token
) {
2396 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2397 right
->map_token
= NULL
;
2399 left_nritems
-= push_items
;
2400 btrfs_set_header_nritems(left
, left_nritems
);
2403 btrfs_mark_buffer_dirty(left
);
2404 btrfs_mark_buffer_dirty(right
);
2406 ret
= btrfs_update_ref(trans
, root
, left
, right
, 0, push_items
);
2409 btrfs_item_key(right
, &disk_key
, 0);
2410 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
2411 btrfs_mark_buffer_dirty(upper
);
2413 /* then fixup the leaf pointer in the path */
2414 if (path
->slots
[0] >= left_nritems
) {
2415 path
->slots
[0] -= left_nritems
;
2416 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2417 clean_tree_block(trans
, root
, path
->nodes
[0]);
2418 btrfs_tree_unlock(path
->nodes
[0]);
2419 free_extent_buffer(path
->nodes
[0]);
2420 path
->nodes
[0] = right
;
2421 path
->slots
[1] += 1;
2423 btrfs_tree_unlock(right
);
2424 free_extent_buffer(right
);
2429 btrfs_tree_unlock(right
);
2430 free_extent_buffer(right
);
2435 * push some data in the path leaf to the right, trying to free up at
2436 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2438 * returns 1 if the push failed because the other node didn't have enough
2439 * room, 0 if everything worked out and < 0 if there were major errors.
2441 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
2442 *root
, struct btrfs_path
*path
, int data_size
,
2445 struct extent_buffer
*left
= path
->nodes
[0];
2446 struct extent_buffer
*right
;
2447 struct extent_buffer
*upper
;
2453 if (!path
->nodes
[1])
2456 slot
= path
->slots
[1];
2457 upper
= path
->nodes
[1];
2458 if (slot
>= btrfs_header_nritems(upper
) - 1)
2461 btrfs_assert_tree_locked(path
->nodes
[1]);
2463 right
= read_node_slot(root
, upper
, slot
+ 1);
2464 btrfs_tree_lock(right
);
2465 btrfs_set_lock_blocking(right
);
2467 free_space
= btrfs_leaf_free_space(root
, right
);
2468 if (free_space
< data_size
)
2471 /* cow and double check */
2472 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
2477 free_space
= btrfs_leaf_free_space(root
, right
);
2478 if (free_space
< data_size
)
2481 left_nritems
= btrfs_header_nritems(left
);
2482 if (left_nritems
== 0)
2485 return __push_leaf_right(trans
, root
, path
, data_size
, empty
,
2486 right
, free_space
, left_nritems
);
2488 btrfs_tree_unlock(right
);
2489 free_extent_buffer(right
);
2494 * push some data in the path leaf to the left, trying to free up at
2495 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2497 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
2498 struct btrfs_root
*root
,
2499 struct btrfs_path
*path
, int data_size
,
2500 int empty
, struct extent_buffer
*left
,
2501 int free_space
, int right_nritems
)
2503 struct btrfs_disk_key disk_key
;
2504 struct extent_buffer
*right
= path
->nodes
[0];
2509 struct btrfs_item
*item
;
2510 u32 old_left_nritems
;
2515 u32 old_left_item_size
;
2517 slot
= path
->slots
[1];
2522 nr
= right_nritems
- 1;
2524 for (i
= 0; i
< nr
; i
++) {
2525 item
= btrfs_item_nr(right
, i
);
2526 if (!right
->map_token
) {
2527 map_extent_buffer(right
, (unsigned long)item
,
2528 sizeof(struct btrfs_item
),
2529 &right
->map_token
, &right
->kaddr
,
2530 &right
->map_start
, &right
->map_len
,
2534 if (!empty
&& push_items
> 0) {
2535 if (path
->slots
[0] < i
)
2537 if (path
->slots
[0] == i
) {
2538 int space
= btrfs_leaf_free_space(root
, right
);
2539 if (space
+ push_space
* 2 > free_space
)
2544 if (path
->slots
[0] == i
)
2545 push_space
+= data_size
;
2547 this_item_size
= btrfs_item_size(right
, item
);
2548 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2552 push_space
+= this_item_size
+ sizeof(*item
);
2555 if (right
->map_token
) {
2556 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2557 right
->map_token
= NULL
;
2560 if (push_items
== 0) {
2564 if (!empty
&& push_items
== btrfs_header_nritems(right
))
2567 /* push data from right to left */
2568 copy_extent_buffer(left
, right
,
2569 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
2570 btrfs_item_nr_offset(0),
2571 push_items
* sizeof(struct btrfs_item
));
2573 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
2574 btrfs_item_offset_nr(right
, push_items
- 1);
2576 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
2577 leaf_data_end(root
, left
) - push_space
,
2578 btrfs_leaf_data(right
) +
2579 btrfs_item_offset_nr(right
, push_items
- 1),
2581 old_left_nritems
= btrfs_header_nritems(left
);
2582 BUG_ON(old_left_nritems
<= 0);
2584 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
2585 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
2588 item
= btrfs_item_nr(left
, i
);
2589 if (!left
->map_token
) {
2590 map_extent_buffer(left
, (unsigned long)item
,
2591 sizeof(struct btrfs_item
),
2592 &left
->map_token
, &left
->kaddr
,
2593 &left
->map_start
, &left
->map_len
,
2597 ioff
= btrfs_item_offset(left
, item
);
2598 btrfs_set_item_offset(left
, item
,
2599 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
));
2601 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
2602 if (left
->map_token
) {
2603 unmap_extent_buffer(left
, left
->map_token
, KM_USER1
);
2604 left
->map_token
= NULL
;
2607 /* fixup right node */
2608 if (push_items
> right_nritems
) {
2609 printk(KERN_CRIT
"push items %d nr %u\n", push_items
,
2614 if (push_items
< right_nritems
) {
2615 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
2616 leaf_data_end(root
, right
);
2617 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
2618 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2619 btrfs_leaf_data(right
) +
2620 leaf_data_end(root
, right
), push_space
);
2622 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
2623 btrfs_item_nr_offset(push_items
),
2624 (btrfs_header_nritems(right
) - push_items
) *
2625 sizeof(struct btrfs_item
));
2627 right_nritems
-= push_items
;
2628 btrfs_set_header_nritems(right
, right_nritems
);
2629 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2630 for (i
= 0; i
< right_nritems
; i
++) {
2631 item
= btrfs_item_nr(right
, i
);
2633 if (!right
->map_token
) {
2634 map_extent_buffer(right
, (unsigned long)item
,
2635 sizeof(struct btrfs_item
),
2636 &right
->map_token
, &right
->kaddr
,
2637 &right
->map_start
, &right
->map_len
,
2641 push_space
= push_space
- btrfs_item_size(right
, item
);
2642 btrfs_set_item_offset(right
, item
, push_space
);
2644 if (right
->map_token
) {
2645 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2646 right
->map_token
= NULL
;
2649 btrfs_mark_buffer_dirty(left
);
2651 btrfs_mark_buffer_dirty(right
);
2653 ret
= btrfs_update_ref(trans
, root
, right
, left
,
2654 old_left_nritems
, push_items
);
2657 btrfs_item_key(right
, &disk_key
, 0);
2658 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
2662 /* then fixup the leaf pointer in the path */
2663 if (path
->slots
[0] < push_items
) {
2664 path
->slots
[0] += old_left_nritems
;
2665 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2666 clean_tree_block(trans
, root
, path
->nodes
[0]);
2667 btrfs_tree_unlock(path
->nodes
[0]);
2668 free_extent_buffer(path
->nodes
[0]);
2669 path
->nodes
[0] = left
;
2670 path
->slots
[1] -= 1;
2672 btrfs_tree_unlock(left
);
2673 free_extent_buffer(left
);
2674 path
->slots
[0] -= push_items
;
2676 BUG_ON(path
->slots
[0] < 0);
2679 btrfs_tree_unlock(left
);
2680 free_extent_buffer(left
);
2685 * push some data in the path leaf to the left, trying to free up at
2686 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2688 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
2689 *root
, struct btrfs_path
*path
, int data_size
,
2692 struct extent_buffer
*right
= path
->nodes
[0];
2693 struct extent_buffer
*left
;
2699 slot
= path
->slots
[1];
2702 if (!path
->nodes
[1])
2705 right_nritems
= btrfs_header_nritems(right
);
2706 if (right_nritems
== 0)
2709 btrfs_assert_tree_locked(path
->nodes
[1]);
2711 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
2712 btrfs_tree_lock(left
);
2713 btrfs_set_lock_blocking(left
);
2715 free_space
= btrfs_leaf_free_space(root
, left
);
2716 if (free_space
< data_size
) {
2721 /* cow and double check */
2722 ret
= btrfs_cow_block(trans
, root
, left
,
2723 path
->nodes
[1], slot
- 1, &left
);
2725 /* we hit -ENOSPC, but it isn't fatal here */
2730 free_space
= btrfs_leaf_free_space(root
, left
);
2731 if (free_space
< data_size
) {
2736 return __push_leaf_left(trans
, root
, path
, data_size
,
2737 empty
, left
, free_space
, right_nritems
);
2739 btrfs_tree_unlock(left
);
2740 free_extent_buffer(left
);
2745 * split the path's leaf in two, making sure there is at least data_size
2746 * available for the resulting leaf level of the path.
2748 * returns 0 if all went well and < 0 on failure.
2750 static noinline
int copy_for_split(struct btrfs_trans_handle
*trans
,
2751 struct btrfs_root
*root
,
2752 struct btrfs_path
*path
,
2753 struct extent_buffer
*l
,
2754 struct extent_buffer
*right
,
2755 int slot
, int mid
, int nritems
)
2762 struct btrfs_disk_key disk_key
;
2764 nritems
= nritems
- mid
;
2765 btrfs_set_header_nritems(right
, nritems
);
2766 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
2768 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
2769 btrfs_item_nr_offset(mid
),
2770 nritems
* sizeof(struct btrfs_item
));
2772 copy_extent_buffer(right
, l
,
2773 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
2774 data_copy_size
, btrfs_leaf_data(l
) +
2775 leaf_data_end(root
, l
), data_copy_size
);
2777 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
2778 btrfs_item_end_nr(l
, mid
);
2780 for (i
= 0; i
< nritems
; i
++) {
2781 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
2784 if (!right
->map_token
) {
2785 map_extent_buffer(right
, (unsigned long)item
,
2786 sizeof(struct btrfs_item
),
2787 &right
->map_token
, &right
->kaddr
,
2788 &right
->map_start
, &right
->map_len
,
2792 ioff
= btrfs_item_offset(right
, item
);
2793 btrfs_set_item_offset(right
, item
, ioff
+ rt_data_off
);
2796 if (right
->map_token
) {
2797 unmap_extent_buffer(right
, right
->map_token
, KM_USER1
);
2798 right
->map_token
= NULL
;
2801 btrfs_set_header_nritems(l
, mid
);
2803 btrfs_item_key(right
, &disk_key
, 0);
2804 wret
= insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
2805 path
->slots
[1] + 1, 1);
2809 btrfs_mark_buffer_dirty(right
);
2810 btrfs_mark_buffer_dirty(l
);
2811 BUG_ON(path
->slots
[0] != slot
);
2813 ret
= btrfs_update_ref(trans
, root
, l
, right
, 0, nritems
);
2817 btrfs_tree_unlock(path
->nodes
[0]);
2818 free_extent_buffer(path
->nodes
[0]);
2819 path
->nodes
[0] = right
;
2820 path
->slots
[0] -= mid
;
2821 path
->slots
[1] += 1;
2823 btrfs_tree_unlock(right
);
2824 free_extent_buffer(right
);
2827 BUG_ON(path
->slots
[0] < 0);
2833 * split the path's leaf in two, making sure there is at least data_size
2834 * available for the resulting leaf level of the path.
2836 * returns 0 if all went well and < 0 on failure.
2838 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
2839 struct btrfs_root
*root
,
2840 struct btrfs_key
*ins_key
,
2841 struct btrfs_path
*path
, int data_size
,
2844 struct extent_buffer
*l
;
2848 struct extent_buffer
*right
;
2852 int num_doubles
= 0;
2854 /* first try to make some room by pushing left and right */
2855 if (data_size
&& ins_key
->type
!= BTRFS_DIR_ITEM_KEY
&&
2856 !trans
->transaction
->delayed_refs
.flushing
) {
2857 wret
= push_leaf_right(trans
, root
, path
, data_size
, 0);
2861 wret
= push_leaf_left(trans
, root
, path
, data_size
, 0);
2867 /* did the pushes work? */
2868 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
2872 if (!path
->nodes
[1]) {
2873 ret
= insert_new_root(trans
, root
, path
, 1);
2880 slot
= path
->slots
[0];
2881 nritems
= btrfs_header_nritems(l
);
2882 mid
= (nritems
+ 1) / 2;
2884 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
,
2885 path
->nodes
[1]->start
,
2886 root
->root_key
.objectid
,
2887 trans
->transid
, 0, l
->start
, 0);
2888 if (IS_ERR(right
)) {
2890 return PTR_ERR(right
);
2893 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
2894 btrfs_set_header_bytenr(right
, right
->start
);
2895 btrfs_set_header_generation(right
, trans
->transid
);
2896 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
2897 btrfs_set_header_level(right
, 0);
2898 write_extent_buffer(right
, root
->fs_info
->fsid
,
2899 (unsigned long)btrfs_header_fsid(right
),
2902 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
2903 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
2908 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
2909 BTRFS_LEAF_DATA_SIZE(root
)) {
2910 if (slot
>= nritems
) {
2911 struct btrfs_disk_key disk_key
;
2913 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
2914 btrfs_set_header_nritems(right
, 0);
2915 wret
= insert_ptr(trans
, root
, path
,
2916 &disk_key
, right
->start
,
2917 path
->slots
[1] + 1, 1);
2921 btrfs_tree_unlock(path
->nodes
[0]);
2922 free_extent_buffer(path
->nodes
[0]);
2923 path
->nodes
[0] = right
;
2925 path
->slots
[1] += 1;
2926 btrfs_mark_buffer_dirty(right
);
2930 if (mid
!= nritems
&&
2931 leaf_space_used(l
, mid
, nritems
- mid
) +
2932 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2937 if (leaf_space_used(l
, 0, mid
) + data_size
>
2938 BTRFS_LEAF_DATA_SIZE(root
)) {
2939 if (!extend
&& data_size
&& slot
== 0) {
2940 struct btrfs_disk_key disk_key
;
2942 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
2943 btrfs_set_header_nritems(right
, 0);
2944 wret
= insert_ptr(trans
, root
, path
,
2950 btrfs_tree_unlock(path
->nodes
[0]);
2951 free_extent_buffer(path
->nodes
[0]);
2952 path
->nodes
[0] = right
;
2954 if (path
->slots
[1] == 0) {
2955 wret
= fixup_low_keys(trans
, root
,
2956 path
, &disk_key
, 1);
2960 btrfs_mark_buffer_dirty(right
);
2962 } else if ((extend
|| !data_size
) && slot
== 0) {
2966 if (mid
!= nritems
&&
2967 leaf_space_used(l
, mid
, nritems
- mid
) +
2968 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2975 ret
= copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
2979 BUG_ON(num_doubles
!= 0);
2988 * This function splits a single item into two items,
2989 * giving 'new_key' to the new item and splitting the
2990 * old one at split_offset (from the start of the item).
2992 * The path may be released by this operation. After
2993 * the split, the path is pointing to the old item. The
2994 * new item is going to be in the same node as the old one.
2996 * Note, the item being split must be smaller enough to live alone on
2997 * a tree block with room for one extra struct btrfs_item
2999 * This allows us to split the item in place, keeping a lock on the
3000 * leaf the entire time.
3002 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
3003 struct btrfs_root
*root
,
3004 struct btrfs_path
*path
,
3005 struct btrfs_key
*new_key
,
3006 unsigned long split_offset
)
3009 struct extent_buffer
*leaf
;
3010 struct btrfs_key orig_key
;
3011 struct btrfs_item
*item
;
3012 struct btrfs_item
*new_item
;
3017 struct btrfs_disk_key disk_key
;
3020 leaf
= path
->nodes
[0];
3021 btrfs_item_key_to_cpu(leaf
, &orig_key
, path
->slots
[0]);
3022 if (btrfs_leaf_free_space(root
, leaf
) >= sizeof(struct btrfs_item
))
3025 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3026 btrfs_release_path(root
, path
);
3028 path
->search_for_split
= 1;
3029 path
->keep_locks
= 1;
3031 ret
= btrfs_search_slot(trans
, root
, &orig_key
, path
, 0, 1);
3032 path
->search_for_split
= 0;
3034 /* if our item isn't there or got smaller, return now */
3035 if (ret
!= 0 || item_size
!= btrfs_item_size_nr(path
->nodes
[0],
3037 path
->keep_locks
= 0;
3041 btrfs_set_path_blocking(path
);
3042 ret
= split_leaf(trans
, root
, &orig_key
, path
,
3043 sizeof(struct btrfs_item
), 1);
3044 path
->keep_locks
= 0;
3047 btrfs_unlock_up_safe(path
, 1);
3048 leaf
= path
->nodes
[0];
3049 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
3053 * make sure any changes to the path from split_leaf leave it
3054 * in a blocking state
3056 btrfs_set_path_blocking(path
);
3058 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
3059 orig_offset
= btrfs_item_offset(leaf
, item
);
3060 item_size
= btrfs_item_size(leaf
, item
);
3062 buf
= kmalloc(item_size
, GFP_NOFS
);
3063 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
3064 path
->slots
[0]), item_size
);
3065 slot
= path
->slots
[0] + 1;
3066 leaf
= path
->nodes
[0];
3068 nritems
= btrfs_header_nritems(leaf
);
3070 if (slot
!= nritems
) {
3071 /* shift the items */
3072 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
3073 btrfs_item_nr_offset(slot
),
3074 (nritems
- slot
) * sizeof(struct btrfs_item
));
3078 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3079 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3081 new_item
= btrfs_item_nr(leaf
, slot
);
3083 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
3084 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
3086 btrfs_set_item_offset(leaf
, item
,
3087 orig_offset
+ item_size
- split_offset
);
3088 btrfs_set_item_size(leaf
, item
, split_offset
);
3090 btrfs_set_header_nritems(leaf
, nritems
+ 1);
3092 /* write the data for the start of the original item */
3093 write_extent_buffer(leaf
, buf
,
3094 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3097 /* write the data for the new item */
3098 write_extent_buffer(leaf
, buf
+ split_offset
,
3099 btrfs_item_ptr_offset(leaf
, slot
),
3100 item_size
- split_offset
);
3101 btrfs_mark_buffer_dirty(leaf
);
3104 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3105 btrfs_print_leaf(root
, leaf
);
3113 * make the item pointed to by the path smaller. new_size indicates
3114 * how small to make it, and from_end tells us if we just chop bytes
3115 * off the end of the item or if we shift the item to chop bytes off
3118 int btrfs_truncate_item(struct btrfs_trans_handle
*trans
,
3119 struct btrfs_root
*root
,
3120 struct btrfs_path
*path
,
3121 u32 new_size
, int from_end
)
3126 struct extent_buffer
*leaf
;
3127 struct btrfs_item
*item
;
3129 unsigned int data_end
;
3130 unsigned int old_data_start
;
3131 unsigned int old_size
;
3132 unsigned int size_diff
;
3135 slot_orig
= path
->slots
[0];
3136 leaf
= path
->nodes
[0];
3137 slot
= path
->slots
[0];
3139 old_size
= btrfs_item_size_nr(leaf
, slot
);
3140 if (old_size
== new_size
)
3143 nritems
= btrfs_header_nritems(leaf
);
3144 data_end
= leaf_data_end(root
, leaf
);
3146 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
3148 size_diff
= old_size
- new_size
;
3151 BUG_ON(slot
>= nritems
);
3154 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3156 /* first correct the data pointers */
3157 for (i
= slot
; i
< nritems
; i
++) {
3159 item
= btrfs_item_nr(leaf
, i
);
3161 if (!leaf
->map_token
) {
3162 map_extent_buffer(leaf
, (unsigned long)item
,
3163 sizeof(struct btrfs_item
),
3164 &leaf
->map_token
, &leaf
->kaddr
,
3165 &leaf
->map_start
, &leaf
->map_len
,
3169 ioff
= btrfs_item_offset(leaf
, item
);
3170 btrfs_set_item_offset(leaf
, item
, ioff
+ size_diff
);
3173 if (leaf
->map_token
) {
3174 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3175 leaf
->map_token
= NULL
;
3178 /* shift the data */
3180 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3181 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3182 data_end
, old_data_start
+ new_size
- data_end
);
3184 struct btrfs_disk_key disk_key
;
3187 btrfs_item_key(leaf
, &disk_key
, slot
);
3189 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
3191 struct btrfs_file_extent_item
*fi
;
3193 fi
= btrfs_item_ptr(leaf
, slot
,
3194 struct btrfs_file_extent_item
);
3195 fi
= (struct btrfs_file_extent_item
*)(
3196 (unsigned long)fi
- size_diff
);
3198 if (btrfs_file_extent_type(leaf
, fi
) ==
3199 BTRFS_FILE_EXTENT_INLINE
) {
3200 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3201 memmove_extent_buffer(leaf
, ptr
,
3203 offsetof(struct btrfs_file_extent_item
,
3208 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3209 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3210 data_end
, old_data_start
- data_end
);
3212 offset
= btrfs_disk_key_offset(&disk_key
);
3213 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
3214 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3216 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3219 item
= btrfs_item_nr(leaf
, slot
);
3220 btrfs_set_item_size(leaf
, item
, new_size
);
3221 btrfs_mark_buffer_dirty(leaf
);
3224 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3225 btrfs_print_leaf(root
, leaf
);
3232 * make the item pointed to by the path bigger, data_size is the new size.
3234 int btrfs_extend_item(struct btrfs_trans_handle
*trans
,
3235 struct btrfs_root
*root
, struct btrfs_path
*path
,
3241 struct extent_buffer
*leaf
;
3242 struct btrfs_item
*item
;
3244 unsigned int data_end
;
3245 unsigned int old_data
;
3246 unsigned int old_size
;
3249 slot_orig
= path
->slots
[0];
3250 leaf
= path
->nodes
[0];
3252 nritems
= btrfs_header_nritems(leaf
);
3253 data_end
= leaf_data_end(root
, leaf
);
3255 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
3256 btrfs_print_leaf(root
, leaf
);
3259 slot
= path
->slots
[0];
3260 old_data
= btrfs_item_end_nr(leaf
, slot
);
3263 if (slot
>= nritems
) {
3264 btrfs_print_leaf(root
, leaf
);
3265 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
3271 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3273 /* first correct the data pointers */
3274 for (i
= slot
; i
< nritems
; i
++) {
3276 item
= btrfs_item_nr(leaf
, i
);
3278 if (!leaf
->map_token
) {
3279 map_extent_buffer(leaf
, (unsigned long)item
,
3280 sizeof(struct btrfs_item
),
3281 &leaf
->map_token
, &leaf
->kaddr
,
3282 &leaf
->map_start
, &leaf
->map_len
,
3285 ioff
= btrfs_item_offset(leaf
, item
);
3286 btrfs_set_item_offset(leaf
, item
, ioff
- data_size
);
3289 if (leaf
->map_token
) {
3290 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3291 leaf
->map_token
= NULL
;
3294 /* shift the data */
3295 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3296 data_end
- data_size
, btrfs_leaf_data(leaf
) +
3297 data_end
, old_data
- data_end
);
3299 data_end
= old_data
;
3300 old_size
= btrfs_item_size_nr(leaf
, slot
);
3301 item
= btrfs_item_nr(leaf
, slot
);
3302 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
3303 btrfs_mark_buffer_dirty(leaf
);
3306 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3307 btrfs_print_leaf(root
, leaf
);
3314 * Given a key and some data, insert items into the tree.
3315 * This does all the path init required, making room in the tree if needed.
3316 * Returns the number of keys that were inserted.
3318 int btrfs_insert_some_items(struct btrfs_trans_handle
*trans
,
3319 struct btrfs_root
*root
,
3320 struct btrfs_path
*path
,
3321 struct btrfs_key
*cpu_key
, u32
*data_size
,
3324 struct extent_buffer
*leaf
;
3325 struct btrfs_item
*item
;
3332 unsigned int data_end
;
3333 struct btrfs_disk_key disk_key
;
3334 struct btrfs_key found_key
;
3336 for (i
= 0; i
< nr
; i
++) {
3337 if (total_size
+ data_size
[i
] + sizeof(struct btrfs_item
) >
3338 BTRFS_LEAF_DATA_SIZE(root
)) {
3342 total_data
+= data_size
[i
];
3343 total_size
+= data_size
[i
] + sizeof(struct btrfs_item
);
3347 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3353 leaf
= path
->nodes
[0];
3355 nritems
= btrfs_header_nritems(leaf
);
3356 data_end
= leaf_data_end(root
, leaf
);
3358 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3359 for (i
= nr
; i
>= 0; i
--) {
3360 total_data
-= data_size
[i
];
3361 total_size
-= data_size
[i
] + sizeof(struct btrfs_item
);
3362 if (total_size
< btrfs_leaf_free_space(root
, leaf
))
3368 slot
= path
->slots
[0];
3371 if (slot
!= nritems
) {
3372 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3374 item
= btrfs_item_nr(leaf
, slot
);
3375 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3377 /* figure out how many keys we can insert in here */
3378 total_data
= data_size
[0];
3379 for (i
= 1; i
< nr
; i
++) {
3380 if (comp_cpu_keys(&found_key
, cpu_key
+ i
) <= 0)
3382 total_data
+= data_size
[i
];
3386 if (old_data
< data_end
) {
3387 btrfs_print_leaf(root
, leaf
);
3388 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3389 slot
, old_data
, data_end
);
3393 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3395 /* first correct the data pointers */
3396 WARN_ON(leaf
->map_token
);
3397 for (i
= slot
; i
< nritems
; i
++) {
3400 item
= btrfs_item_nr(leaf
, i
);
3401 if (!leaf
->map_token
) {
3402 map_extent_buffer(leaf
, (unsigned long)item
,
3403 sizeof(struct btrfs_item
),
3404 &leaf
->map_token
, &leaf
->kaddr
,
3405 &leaf
->map_start
, &leaf
->map_len
,
3409 ioff
= btrfs_item_offset(leaf
, item
);
3410 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3412 if (leaf
->map_token
) {
3413 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3414 leaf
->map_token
= NULL
;
3417 /* shift the items */
3418 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3419 btrfs_item_nr_offset(slot
),
3420 (nritems
- slot
) * sizeof(struct btrfs_item
));
3422 /* shift the data */
3423 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3424 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3425 data_end
, old_data
- data_end
);
3426 data_end
= old_data
;
3429 * this sucks but it has to be done, if we are inserting at
3430 * the end of the leaf only insert 1 of the items, since we
3431 * have no way of knowing whats on the next leaf and we'd have
3432 * to drop our current locks to figure it out
3437 /* setup the item for the new data */
3438 for (i
= 0; i
< nr
; i
++) {
3439 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3440 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3441 item
= btrfs_item_nr(leaf
, slot
+ i
);
3442 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3443 data_end
-= data_size
[i
];
3444 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3446 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3447 btrfs_mark_buffer_dirty(leaf
);
3451 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3452 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3455 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3456 btrfs_print_leaf(root
, leaf
);
3466 * this is a helper for btrfs_insert_empty_items, the main goal here is
3467 * to save stack depth by doing the bulk of the work in a function
3468 * that doesn't call btrfs_search_slot
3470 static noinline_for_stack
int
3471 setup_items_for_insert(struct btrfs_trans_handle
*trans
,
3472 struct btrfs_root
*root
, struct btrfs_path
*path
,
3473 struct btrfs_key
*cpu_key
, u32
*data_size
,
3474 u32 total_data
, u32 total_size
, int nr
)
3476 struct btrfs_item
*item
;
3479 unsigned int data_end
;
3480 struct btrfs_disk_key disk_key
;
3482 struct extent_buffer
*leaf
;
3485 leaf
= path
->nodes
[0];
3486 slot
= path
->slots
[0];
3488 nritems
= btrfs_header_nritems(leaf
);
3489 data_end
= leaf_data_end(root
, leaf
);
3491 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3492 btrfs_print_leaf(root
, leaf
);
3493 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
3494 total_size
, btrfs_leaf_free_space(root
, leaf
));
3498 if (slot
!= nritems
) {
3499 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3501 if (old_data
< data_end
) {
3502 btrfs_print_leaf(root
, leaf
);
3503 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3504 slot
, old_data
, data_end
);
3508 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3510 /* first correct the data pointers */
3511 WARN_ON(leaf
->map_token
);
3512 for (i
= slot
; i
< nritems
; i
++) {
3515 item
= btrfs_item_nr(leaf
, i
);
3516 if (!leaf
->map_token
) {
3517 map_extent_buffer(leaf
, (unsigned long)item
,
3518 sizeof(struct btrfs_item
),
3519 &leaf
->map_token
, &leaf
->kaddr
,
3520 &leaf
->map_start
, &leaf
->map_len
,
3524 ioff
= btrfs_item_offset(leaf
, item
);
3525 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3527 if (leaf
->map_token
) {
3528 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3529 leaf
->map_token
= NULL
;
3532 /* shift the items */
3533 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3534 btrfs_item_nr_offset(slot
),
3535 (nritems
- slot
) * sizeof(struct btrfs_item
));
3537 /* shift the data */
3538 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3539 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3540 data_end
, old_data
- data_end
);
3541 data_end
= old_data
;
3544 /* setup the item for the new data */
3545 for (i
= 0; i
< nr
; i
++) {
3546 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3547 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3548 item
= btrfs_item_nr(leaf
, slot
+ i
);
3549 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3550 data_end
-= data_size
[i
];
3551 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3554 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3558 struct btrfs_disk_key disk_key
;
3559 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3560 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3562 btrfs_unlock_up_safe(path
, 1);
3563 btrfs_mark_buffer_dirty(leaf
);
3565 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3566 btrfs_print_leaf(root
, leaf
);
3573 * Given a key and some data, insert items into the tree.
3574 * This does all the path init required, making room in the tree if needed.
3576 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
3577 struct btrfs_root
*root
,
3578 struct btrfs_path
*path
,
3579 struct btrfs_key
*cpu_key
, u32
*data_size
,
3582 struct extent_buffer
*leaf
;
3589 for (i
= 0; i
< nr
; i
++)
3590 total_data
+= data_size
[i
];
3592 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
3593 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3599 leaf
= path
->nodes
[0];
3600 slot
= path
->slots
[0];
3603 ret
= setup_items_for_insert(trans
, root
, path
, cpu_key
, data_size
,
3604 total_data
, total_size
, nr
);
3611 * Given a key and some data, insert an item into the tree.
3612 * This does all the path init required, making room in the tree if needed.
3614 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
3615 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
3619 struct btrfs_path
*path
;
3620 struct extent_buffer
*leaf
;
3623 path
= btrfs_alloc_path();
3625 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
3627 leaf
= path
->nodes
[0];
3628 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3629 write_extent_buffer(leaf
, data
, ptr
, data_size
);
3630 btrfs_mark_buffer_dirty(leaf
);
3632 btrfs_free_path(path
);
3637 * delete the pointer from a given node.
3639 * the tree should have been previously balanced so the deletion does not
3642 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3643 struct btrfs_path
*path
, int level
, int slot
)
3645 struct extent_buffer
*parent
= path
->nodes
[level
];
3650 nritems
= btrfs_header_nritems(parent
);
3651 if (slot
!= nritems
- 1) {
3652 memmove_extent_buffer(parent
,
3653 btrfs_node_key_ptr_offset(slot
),
3654 btrfs_node_key_ptr_offset(slot
+ 1),
3655 sizeof(struct btrfs_key_ptr
) *
3656 (nritems
- slot
- 1));
3659 btrfs_set_header_nritems(parent
, nritems
);
3660 if (nritems
== 0 && parent
== root
->node
) {
3661 BUG_ON(btrfs_header_level(root
->node
) != 1);
3662 /* just turn the root into a leaf and break */
3663 btrfs_set_header_level(root
->node
, 0);
3664 } else if (slot
== 0) {
3665 struct btrfs_disk_key disk_key
;
3667 btrfs_node_key(parent
, &disk_key
, 0);
3668 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, level
+ 1);
3672 btrfs_mark_buffer_dirty(parent
);
3677 * a helper function to delete the leaf pointed to by path->slots[1] and
3678 * path->nodes[1]. bytenr is the node block pointer, but since the callers
3679 * already know it, it is faster to have them pass it down than to
3680 * read it out of the node again.
3682 * This deletes the pointer in path->nodes[1] and frees the leaf
3683 * block extent. zero is returned if it all worked out, < 0 otherwise.
3685 * The path must have already been setup for deleting the leaf, including
3686 * all the proper balancing. path->nodes[1] must be locked.
3688 noinline
int btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
3689 struct btrfs_root
*root
,
3690 struct btrfs_path
*path
, u64 bytenr
)
3693 u64 root_gen
= btrfs_header_generation(path
->nodes
[1]);
3694 u64 parent_start
= path
->nodes
[1]->start
;
3695 u64 parent_owner
= btrfs_header_owner(path
->nodes
[1]);
3697 ret
= del_ptr(trans
, root
, path
, 1, path
->slots
[1]);
3702 * btrfs_free_extent is expensive, we want to make sure we
3703 * aren't holding any locks when we call it
3705 btrfs_unlock_up_safe(path
, 0);
3707 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3708 btrfs_level_size(root
, 0),
3709 parent_start
, parent_owner
,
3714 * delete the item at the leaf level in path. If that empties
3715 * the leaf, remove it from the tree
3717 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3718 struct btrfs_path
*path
, int slot
, int nr
)
3720 struct extent_buffer
*leaf
;
3721 struct btrfs_item
*item
;
3729 leaf
= path
->nodes
[0];
3730 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
3732 for (i
= 0; i
< nr
; i
++)
3733 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
3735 nritems
= btrfs_header_nritems(leaf
);
3737 if (slot
+ nr
!= nritems
) {
3738 int data_end
= leaf_data_end(root
, leaf
);
3740 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3742 btrfs_leaf_data(leaf
) + data_end
,
3743 last_off
- data_end
);
3745 for (i
= slot
+ nr
; i
< nritems
; i
++) {
3748 item
= btrfs_item_nr(leaf
, i
);
3749 if (!leaf
->map_token
) {
3750 map_extent_buffer(leaf
, (unsigned long)item
,
3751 sizeof(struct btrfs_item
),
3752 &leaf
->map_token
, &leaf
->kaddr
,
3753 &leaf
->map_start
, &leaf
->map_len
,
3756 ioff
= btrfs_item_offset(leaf
, item
);
3757 btrfs_set_item_offset(leaf
, item
, ioff
+ dsize
);
3760 if (leaf
->map_token
) {
3761 unmap_extent_buffer(leaf
, leaf
->map_token
, KM_USER1
);
3762 leaf
->map_token
= NULL
;
3765 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
3766 btrfs_item_nr_offset(slot
+ nr
),
3767 sizeof(struct btrfs_item
) *
3768 (nritems
- slot
- nr
));
3770 btrfs_set_header_nritems(leaf
, nritems
- nr
);
3773 /* delete the leaf if we've emptied it */
3775 if (leaf
== root
->node
) {
3776 btrfs_set_header_level(leaf
, 0);
3778 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
->start
);
3782 int used
= leaf_space_used(leaf
, 0, nritems
);
3784 struct btrfs_disk_key disk_key
;
3786 btrfs_item_key(leaf
, &disk_key
, 0);
3787 wret
= fixup_low_keys(trans
, root
, path
,
3793 /* delete the leaf if it is mostly empty */
3794 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 4 &&
3795 !trans
->transaction
->delayed_refs
.flushing
) {
3796 /* push_leaf_left fixes the path.
3797 * make sure the path still points to our leaf
3798 * for possible call to del_ptr below
3800 slot
= path
->slots
[1];
3801 extent_buffer_get(leaf
);
3803 btrfs_set_path_blocking(path
);
3804 wret
= push_leaf_left(trans
, root
, path
, 1, 1);
3805 if (wret
< 0 && wret
!= -ENOSPC
)
3808 if (path
->nodes
[0] == leaf
&&
3809 btrfs_header_nritems(leaf
)) {
3810 wret
= push_leaf_right(trans
, root
, path
, 1, 1);
3811 if (wret
< 0 && wret
!= -ENOSPC
)
3815 if (btrfs_header_nritems(leaf
) == 0) {
3816 path
->slots
[1] = slot
;
3817 ret
= btrfs_del_leaf(trans
, root
, path
,
3820 free_extent_buffer(leaf
);
3822 /* if we're still in the path, make sure
3823 * we're dirty. Otherwise, one of the
3824 * push_leaf functions must have already
3825 * dirtied this buffer
3827 if (path
->nodes
[0] == leaf
)
3828 btrfs_mark_buffer_dirty(leaf
);
3829 free_extent_buffer(leaf
);
3832 btrfs_mark_buffer_dirty(leaf
);
3839 * search the tree again to find a leaf with lesser keys
3840 * returns 0 if it found something or 1 if there are no lesser leaves.
3841 * returns < 0 on io errors.
3843 * This may release the path, and so you may lose any locks held at the
3846 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
3848 struct btrfs_key key
;
3849 struct btrfs_disk_key found_key
;
3852 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
3856 else if (key
.type
> 0)
3858 else if (key
.objectid
> 0)
3863 btrfs_release_path(root
, path
);
3864 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3867 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
3868 ret
= comp_keys(&found_key
, &key
);
3875 * A helper function to walk down the tree starting at min_key, and looking
3876 * for nodes or leaves that are either in cache or have a minimum
3877 * transaction id. This is used by the btree defrag code, and tree logging
3879 * This does not cow, but it does stuff the starting key it finds back
3880 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3881 * key and get a writable path.
3883 * This does lock as it descends, and path->keep_locks should be set
3884 * to 1 by the caller.
3886 * This honors path->lowest_level to prevent descent past a given level
3889 * min_trans indicates the oldest transaction that you are interested
3890 * in walking through. Any nodes or leaves older than min_trans are
3891 * skipped over (without reading them).
3893 * returns zero if something useful was found, < 0 on error and 1 if there
3894 * was nothing in the tree that matched the search criteria.
3896 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
3897 struct btrfs_key
*max_key
,
3898 struct btrfs_path
*path
, int cache_only
,
3901 struct extent_buffer
*cur
;
3902 struct btrfs_key found_key
;
3909 WARN_ON(!path
->keep_locks
);
3911 cur
= btrfs_lock_root_node(root
);
3912 level
= btrfs_header_level(cur
);
3913 WARN_ON(path
->nodes
[level
]);
3914 path
->nodes
[level
] = cur
;
3915 path
->locks
[level
] = 1;
3917 if (btrfs_header_generation(cur
) < min_trans
) {
3922 nritems
= btrfs_header_nritems(cur
);
3923 level
= btrfs_header_level(cur
);
3924 sret
= bin_search(cur
, min_key
, level
, &slot
);
3926 /* at the lowest level, we're done, setup the path and exit */
3927 if (level
== path
->lowest_level
) {
3928 if (slot
>= nritems
)
3931 path
->slots
[level
] = slot
;
3932 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
3935 if (sret
&& slot
> 0)
3938 * check this node pointer against the cache_only and
3939 * min_trans parameters. If it isn't in cache or is too
3940 * old, skip to the next one.
3942 while (slot
< nritems
) {
3945 struct extent_buffer
*tmp
;
3946 struct btrfs_disk_key disk_key
;
3948 blockptr
= btrfs_node_blockptr(cur
, slot
);
3949 gen
= btrfs_node_ptr_generation(cur
, slot
);
3950 if (gen
< min_trans
) {
3958 btrfs_node_key(cur
, &disk_key
, slot
);
3959 if (comp_keys(&disk_key
, max_key
) >= 0) {
3965 tmp
= btrfs_find_tree_block(root
, blockptr
,
3966 btrfs_level_size(root
, level
- 1));
3968 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
3969 free_extent_buffer(tmp
);
3973 free_extent_buffer(tmp
);
3978 * we didn't find a candidate key in this node, walk forward
3979 * and find another one
3981 if (slot
>= nritems
) {
3982 path
->slots
[level
] = slot
;
3983 btrfs_set_path_blocking(path
);
3984 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
3985 cache_only
, min_trans
);
3987 btrfs_release_path(root
, path
);
3993 /* save our key for returning back */
3994 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
3995 path
->slots
[level
] = slot
;
3996 if (level
== path
->lowest_level
) {
3998 unlock_up(path
, level
, 1);
4001 btrfs_set_path_blocking(path
);
4002 cur
= read_node_slot(root
, cur
, slot
);
4004 btrfs_tree_lock(cur
);
4006 path
->locks
[level
- 1] = 1;
4007 path
->nodes
[level
- 1] = cur
;
4008 unlock_up(path
, level
, 1);
4009 btrfs_clear_path_blocking(path
, NULL
);
4013 memcpy(min_key
, &found_key
, sizeof(found_key
));
4014 btrfs_set_path_blocking(path
);
4019 * this is similar to btrfs_next_leaf, but does not try to preserve
4020 * and fixup the path. It looks for and returns the next key in the
4021 * tree based on the current path and the cache_only and min_trans
4024 * 0 is returned if another key is found, < 0 if there are any errors
4025 * and 1 is returned if there are no higher keys in the tree
4027 * path->keep_locks should be set to 1 on the search made before
4028 * calling this function.
4030 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
4031 struct btrfs_key
*key
, int lowest_level
,
4032 int cache_only
, u64 min_trans
)
4034 int level
= lowest_level
;
4036 struct extent_buffer
*c
;
4038 WARN_ON(!path
->keep_locks
);
4039 while (level
< BTRFS_MAX_LEVEL
) {
4040 if (!path
->nodes
[level
])
4043 slot
= path
->slots
[level
] + 1;
4044 c
= path
->nodes
[level
];
4046 if (slot
>= btrfs_header_nritems(c
)) {
4048 if (level
== BTRFS_MAX_LEVEL
)
4053 btrfs_item_key_to_cpu(c
, key
, slot
);
4055 u64 blockptr
= btrfs_node_blockptr(c
, slot
);
4056 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
4059 struct extent_buffer
*cur
;
4060 cur
= btrfs_find_tree_block(root
, blockptr
,
4061 btrfs_level_size(root
, level
- 1));
4062 if (!cur
|| !btrfs_buffer_uptodate(cur
, gen
)) {
4065 free_extent_buffer(cur
);
4068 free_extent_buffer(cur
);
4070 if (gen
< min_trans
) {
4074 btrfs_node_key_to_cpu(c
, key
, slot
);
4082 * search the tree again to find a leaf with greater keys
4083 * returns 0 if it found something or 1 if there are no greater leaves.
4084 * returns < 0 on io errors.
4086 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4090 struct extent_buffer
*c
;
4091 struct extent_buffer
*next
= NULL
;
4092 struct btrfs_key key
;
4096 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4100 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
4102 btrfs_release_path(root
, path
);
4103 path
->keep_locks
= 1;
4104 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4105 path
->keep_locks
= 0;
4110 btrfs_set_path_blocking(path
);
4111 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4113 * by releasing the path above we dropped all our locks. A balance
4114 * could have added more items next to the key that used to be
4115 * at the very end of the block. So, check again here and
4116 * advance the path if there are now more items available.
4118 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
4123 while (level
< BTRFS_MAX_LEVEL
) {
4124 if (!path
->nodes
[level
])
4127 slot
= path
->slots
[level
] + 1;
4128 c
= path
->nodes
[level
];
4129 if (slot
>= btrfs_header_nritems(c
)) {
4131 if (level
== BTRFS_MAX_LEVEL
)
4137 btrfs_tree_unlock(next
);
4138 free_extent_buffer(next
);
4141 /* the path was set to blocking above */
4142 if (level
== 1 && (path
->locks
[1] || path
->skip_locking
) &&
4144 reada_for_search(root
, path
, level
, slot
, 0);
4146 next
= read_node_slot(root
, c
, slot
);
4147 if (!path
->skip_locking
) {
4148 btrfs_assert_tree_locked(c
);
4149 btrfs_tree_lock(next
);
4150 btrfs_set_lock_blocking(next
);
4154 path
->slots
[level
] = slot
;
4157 c
= path
->nodes
[level
];
4158 if (path
->locks
[level
])
4159 btrfs_tree_unlock(c
);
4160 free_extent_buffer(c
);
4161 path
->nodes
[level
] = next
;
4162 path
->slots
[level
] = 0;
4163 if (!path
->skip_locking
)
4164 path
->locks
[level
] = 1;
4168 btrfs_set_path_blocking(path
);
4169 if (level
== 1 && path
->locks
[1] && path
->reada
)
4170 reada_for_search(root
, path
, level
, slot
, 0);
4171 next
= read_node_slot(root
, next
, 0);
4172 if (!path
->skip_locking
) {
4173 btrfs_assert_tree_locked(path
->nodes
[level
]);
4174 btrfs_tree_lock(next
);
4175 btrfs_set_lock_blocking(next
);
4179 unlock_up(path
, 0, 1);
4184 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4185 * searching until it gets past min_objectid or finds an item of 'type'
4187 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4189 int btrfs_previous_item(struct btrfs_root
*root
,
4190 struct btrfs_path
*path
, u64 min_objectid
,
4193 struct btrfs_key found_key
;
4194 struct extent_buffer
*leaf
;
4199 if (path
->slots
[0] == 0) {
4200 btrfs_set_path_blocking(path
);
4201 ret
= btrfs_prev_leaf(root
, path
);
4207 leaf
= path
->nodes
[0];
4208 nritems
= btrfs_header_nritems(leaf
);
4211 if (path
->slots
[0] == nritems
)
4214 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4215 if (found_key
.type
== type
)
4217 if (found_key
.objectid
< min_objectid
)
4219 if (found_key
.objectid
== min_objectid
&&
4220 found_key
.type
< type
)