1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017 Christoph Hellwig.
6 #include <linux/cache.h>
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
10 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_inode.h"
14 #include "xfs_inode_fork.h"
15 #include "xfs_trans_resv.h"
16 #include "xfs_mount.h"
18 #include "xfs_trace.h"
21 * In-core extent record layout:
23 * +-------+----------------------------+
24 * | 00:53 | all 54 bits of startoff |
25 * | 54:63 | low 10 bits of startblock |
26 * +-------+----------------------------+
27 * | 00:20 | all 21 bits of length |
28 * | 21 | unwritten extent bit |
29 * | 22:63 | high 42 bits of startblock |
30 * +-------+----------------------------+
32 #define XFS_IEXT_STARTOFF_MASK xfs_mask64lo(BMBT_STARTOFF_BITLEN)
33 #define XFS_IEXT_LENGTH_MASK xfs_mask64lo(BMBT_BLOCKCOUNT_BITLEN)
34 #define XFS_IEXT_STARTBLOCK_MASK xfs_mask64lo(BMBT_STARTBLOCK_BITLEN)
42 * Given that the length can't be a zero, only an empty hi value indicates an
45 static bool xfs_iext_rec_is_empty(struct xfs_iext_rec
*rec
)
50 static inline void xfs_iext_rec_clear(struct xfs_iext_rec
*rec
)
58 struct xfs_iext_rec
*rec
,
59 struct xfs_bmbt_irec
*irec
)
61 ASSERT((irec
->br_startoff
& ~XFS_IEXT_STARTOFF_MASK
) == 0);
62 ASSERT((irec
->br_blockcount
& ~XFS_IEXT_LENGTH_MASK
) == 0);
63 ASSERT((irec
->br_startblock
& ~XFS_IEXT_STARTBLOCK_MASK
) == 0);
65 rec
->lo
= irec
->br_startoff
& XFS_IEXT_STARTOFF_MASK
;
66 rec
->hi
= irec
->br_blockcount
& XFS_IEXT_LENGTH_MASK
;
68 rec
->lo
|= (irec
->br_startblock
<< 54);
69 rec
->hi
|= ((irec
->br_startblock
& ~xfs_mask64lo(10)) << (22 - 10));
71 if (irec
->br_state
== XFS_EXT_UNWRITTEN
)
77 struct xfs_bmbt_irec
*irec
,
78 struct xfs_iext_rec
*rec
)
80 irec
->br_startoff
= rec
->lo
& XFS_IEXT_STARTOFF_MASK
;
81 irec
->br_blockcount
= rec
->hi
& XFS_IEXT_LENGTH_MASK
;
83 irec
->br_startblock
= rec
->lo
>> 54;
84 irec
->br_startblock
|= (rec
->hi
& xfs_mask64hi(42)) >> (22 - 10);
86 if (rec
->hi
& (1 << 21))
87 irec
->br_state
= XFS_EXT_UNWRITTEN
;
89 irec
->br_state
= XFS_EXT_NORM
;
94 KEYS_PER_NODE
= NODE_SIZE
/ (sizeof(uint64_t) + sizeof(void *)),
95 RECS_PER_LEAF
= (NODE_SIZE
- (2 * sizeof(struct xfs_iext_leaf
*))) /
96 sizeof(struct xfs_iext_rec
),
100 * In-core extent btree block layout:
102 * There are two types of blocks in the btree: leaf and inner (non-leaf) blocks.
104 * The leaf blocks are made up by %KEYS_PER_NODE extent records, which each
105 * contain the startoffset, blockcount, startblock and unwritten extent flag.
106 * See above for the exact format, followed by pointers to the previous and next
107 * leaf blocks (if there are any).
109 * The inner (non-leaf) blocks first contain KEYS_PER_NODE lookup keys, followed
110 * by an equal number of pointers to the btree blocks at the next lower level.
112 * +-------+-------+-------+-------+-------+----------+----------+
113 * Leaf: | rec 1 | rec 2 | rec 3 | rec 4 | rec N | prev-ptr | next-ptr |
114 * +-------+-------+-------+-------+-------+----------+----------+
116 * +-------+-------+-------+-------+-------+-------+------+-------+
117 * Inner: | key 1 | key 2 | key 3 | key N | ptr 1 | ptr 2 | ptr3 | ptr N |
118 * +-------+-------+-------+-------+-------+-------+------+-------+
120 struct xfs_iext_node
{
121 uint64_t keys
[KEYS_PER_NODE
];
122 #define XFS_IEXT_KEY_INVALID (1ULL << 63)
123 void *ptrs
[KEYS_PER_NODE
];
126 struct xfs_iext_leaf
{
127 struct xfs_iext_rec recs
[RECS_PER_LEAF
];
128 struct xfs_iext_leaf
*prev
;
129 struct xfs_iext_leaf
*next
;
132 inline xfs_extnum_t
xfs_iext_count(struct xfs_ifork
*ifp
)
134 return ifp
->if_bytes
/ sizeof(struct xfs_iext_rec
);
137 static inline int xfs_iext_max_recs(struct xfs_ifork
*ifp
)
139 if (ifp
->if_height
== 1)
140 return xfs_iext_count(ifp
);
141 return RECS_PER_LEAF
;
144 static inline struct xfs_iext_rec
*cur_rec(struct xfs_iext_cursor
*cur
)
146 return &cur
->leaf
->recs
[cur
->pos
];
149 static inline bool xfs_iext_valid(struct xfs_ifork
*ifp
,
150 struct xfs_iext_cursor
*cur
)
154 if (cur
->pos
< 0 || cur
->pos
>= xfs_iext_max_recs(ifp
))
156 if (xfs_iext_rec_is_empty(cur_rec(cur
)))
162 xfs_iext_find_first_leaf(
163 struct xfs_ifork
*ifp
)
165 struct xfs_iext_node
*node
= ifp
->if_u1
.if_root
;
171 for (height
= ifp
->if_height
; height
> 1; height
--) {
172 node
= node
->ptrs
[0];
180 xfs_iext_find_last_leaf(
181 struct xfs_ifork
*ifp
)
183 struct xfs_iext_node
*node
= ifp
->if_u1
.if_root
;
189 for (height
= ifp
->if_height
; height
> 1; height
--) {
190 for (i
= 1; i
< KEYS_PER_NODE
; i
++)
193 node
= node
->ptrs
[i
- 1];
202 struct xfs_ifork
*ifp
,
203 struct xfs_iext_cursor
*cur
)
206 cur
->leaf
= xfs_iext_find_first_leaf(ifp
);
211 struct xfs_ifork
*ifp
,
212 struct xfs_iext_cursor
*cur
)
216 cur
->leaf
= xfs_iext_find_last_leaf(ifp
);
222 for (i
= 1; i
< xfs_iext_max_recs(ifp
); i
++) {
223 if (xfs_iext_rec_is_empty(&cur
->leaf
->recs
[i
]))
231 struct xfs_ifork
*ifp
,
232 struct xfs_iext_cursor
*cur
)
235 ASSERT(cur
->pos
<= 0 || cur
->pos
>= RECS_PER_LEAF
);
236 xfs_iext_first(ifp
, cur
);
240 ASSERT(cur
->pos
>= 0);
241 ASSERT(cur
->pos
< xfs_iext_max_recs(ifp
));
244 if (ifp
->if_height
> 1 && !xfs_iext_valid(ifp
, cur
) &&
246 cur
->leaf
= cur
->leaf
->next
;
253 struct xfs_ifork
*ifp
,
254 struct xfs_iext_cursor
*cur
)
257 ASSERT(cur
->pos
<= 0 || cur
->pos
>= RECS_PER_LEAF
);
258 xfs_iext_last(ifp
, cur
);
262 ASSERT(cur
->pos
>= 0);
263 ASSERT(cur
->pos
<= RECS_PER_LEAF
);
268 if (xfs_iext_valid(ifp
, cur
))
270 } while (cur
->pos
> 0);
272 if (ifp
->if_height
> 1 && cur
->leaf
->prev
) {
273 cur
->leaf
= cur
->leaf
->prev
;
274 cur
->pos
= RECS_PER_LEAF
;
281 struct xfs_iext_node
*node
,
283 xfs_fileoff_t offset
)
285 if (node
->keys
[n
] > offset
)
287 if (node
->keys
[n
] < offset
)
294 struct xfs_iext_rec
*rec
,
295 xfs_fileoff_t offset
)
297 uint64_t rec_offset
= rec
->lo
& XFS_IEXT_STARTOFF_MASK
;
298 uint32_t rec_len
= rec
->hi
& XFS_IEXT_LENGTH_MASK
;
300 if (rec_offset
> offset
)
302 if (rec_offset
+ rec_len
<= offset
)
309 struct xfs_ifork
*ifp
,
310 xfs_fileoff_t offset
,
313 struct xfs_iext_node
*node
= ifp
->if_u1
.if_root
;
319 for (height
= ifp
->if_height
; height
> level
; height
--) {
320 for (i
= 1; i
< KEYS_PER_NODE
; i
++)
321 if (xfs_iext_key_cmp(node
, i
, offset
) > 0)
324 node
= node
->ptrs
[i
- 1];
334 struct xfs_iext_node
*node
,
335 xfs_fileoff_t offset
)
339 for (i
= 1; i
< KEYS_PER_NODE
; i
++) {
340 if (xfs_iext_key_cmp(node
, i
, offset
) > 0)
348 xfs_iext_node_insert_pos(
349 struct xfs_iext_node
*node
,
350 xfs_fileoff_t offset
)
354 for (i
= 0; i
< KEYS_PER_NODE
; i
++) {
355 if (xfs_iext_key_cmp(node
, i
, offset
) > 0)
359 return KEYS_PER_NODE
;
363 xfs_iext_node_nr_entries(
364 struct xfs_iext_node
*node
,
369 for (i
= start
; i
< KEYS_PER_NODE
; i
++) {
370 if (node
->keys
[i
] == XFS_IEXT_KEY_INVALID
)
378 xfs_iext_leaf_nr_entries(
379 struct xfs_ifork
*ifp
,
380 struct xfs_iext_leaf
*leaf
,
385 for (i
= start
; i
< xfs_iext_max_recs(ifp
); i
++) {
386 if (xfs_iext_rec_is_empty(&leaf
->recs
[i
]))
393 static inline uint64_t
395 struct xfs_iext_leaf
*leaf
,
398 return leaf
->recs
[n
].lo
& XFS_IEXT_STARTOFF_MASK
;
403 struct xfs_ifork
*ifp
)
405 struct xfs_iext_node
*node
= kmem_zalloc(NODE_SIZE
, KM_NOFS
);
408 if (ifp
->if_height
== 1) {
409 struct xfs_iext_leaf
*prev
= ifp
->if_u1
.if_root
;
411 node
->keys
[0] = xfs_iext_leaf_key(prev
, 0);
412 node
->ptrs
[0] = prev
;
414 struct xfs_iext_node
*prev
= ifp
->if_u1
.if_root
;
416 ASSERT(ifp
->if_height
> 1);
418 node
->keys
[0] = prev
->keys
[0];
419 node
->ptrs
[0] = prev
;
422 for (i
= 1; i
< KEYS_PER_NODE
; i
++)
423 node
->keys
[i
] = XFS_IEXT_KEY_INVALID
;
425 ifp
->if_u1
.if_root
= node
;
430 xfs_iext_update_node(
431 struct xfs_ifork
*ifp
,
432 xfs_fileoff_t old_offset
,
433 xfs_fileoff_t new_offset
,
437 struct xfs_iext_node
*node
= ifp
->if_u1
.if_root
;
440 for (height
= ifp
->if_height
; height
> level
; height
--) {
441 for (i
= 0; i
< KEYS_PER_NODE
; i
++) {
442 if (i
> 0 && xfs_iext_key_cmp(node
, i
, old_offset
) > 0)
444 if (node
->keys
[i
] == old_offset
)
445 node
->keys
[i
] = new_offset
;
447 node
= node
->ptrs
[i
- 1];
454 static struct xfs_iext_node
*
456 struct xfs_iext_node
**nodep
,
460 struct xfs_iext_node
*node
= *nodep
;
461 struct xfs_iext_node
*new = kmem_zalloc(NODE_SIZE
, KM_NOFS
);
462 const int nr_move
= KEYS_PER_NODE
/ 2;
463 int nr_keep
= nr_move
+ (KEYS_PER_NODE
& 1);
466 /* for sequential append operations just spill over into the new node */
467 if (*pos
== KEYS_PER_NODE
) {
475 for (i
= 0; i
< nr_move
; i
++) {
476 new->keys
[i
] = node
->keys
[nr_keep
+ i
];
477 new->ptrs
[i
] = node
->ptrs
[nr_keep
+ i
];
479 node
->keys
[nr_keep
+ i
] = XFS_IEXT_KEY_INVALID
;
480 node
->ptrs
[nr_keep
+ i
] = NULL
;
483 if (*pos
>= nr_keep
) {
486 *nr_entries
= nr_move
;
488 *nr_entries
= nr_keep
;
491 for (; i
< KEYS_PER_NODE
; i
++)
492 new->keys
[i
] = XFS_IEXT_KEY_INVALID
;
497 xfs_iext_insert_node(
498 struct xfs_ifork
*ifp
,
503 struct xfs_iext_node
*node
, *new;
504 int i
, pos
, nr_entries
;
507 if (ifp
->if_height
< level
)
511 node
= xfs_iext_find_level(ifp
, offset
, level
);
512 pos
= xfs_iext_node_insert_pos(node
, offset
);
513 nr_entries
= xfs_iext_node_nr_entries(node
, pos
);
515 ASSERT(pos
>= nr_entries
|| xfs_iext_key_cmp(node
, pos
, offset
) != 0);
516 ASSERT(nr_entries
<= KEYS_PER_NODE
);
518 if (nr_entries
== KEYS_PER_NODE
)
519 new = xfs_iext_split_node(&node
, &pos
, &nr_entries
);
522 * Update the pointers in higher levels if the first entry changes
523 * in an existing node.
525 if (node
!= new && pos
== 0 && nr_entries
> 0)
526 xfs_iext_update_node(ifp
, node
->keys
[0], offset
, level
, node
);
528 for (i
= nr_entries
; i
> pos
; i
--) {
529 node
->keys
[i
] = node
->keys
[i
- 1];
530 node
->ptrs
[i
] = node
->ptrs
[i
- 1];
532 node
->keys
[pos
] = offset
;
533 node
->ptrs
[pos
] = ptr
;
536 offset
= new->keys
[0];
543 static struct xfs_iext_leaf
*
545 struct xfs_iext_cursor
*cur
,
548 struct xfs_iext_leaf
*leaf
= cur
->leaf
;
549 struct xfs_iext_leaf
*new = kmem_zalloc(NODE_SIZE
, KM_NOFS
);
550 const int nr_move
= RECS_PER_LEAF
/ 2;
551 int nr_keep
= nr_move
+ (RECS_PER_LEAF
& 1);
554 /* for sequential append operations just spill over into the new node */
555 if (cur
->pos
== RECS_PER_LEAF
) {
562 for (i
= 0; i
< nr_move
; i
++) {
563 new->recs
[i
] = leaf
->recs
[nr_keep
+ i
];
564 xfs_iext_rec_clear(&leaf
->recs
[nr_keep
+ i
]);
567 if (cur
->pos
>= nr_keep
) {
570 *nr_entries
= nr_move
;
572 *nr_entries
= nr_keep
;
576 leaf
->next
->prev
= new;
577 new->next
= leaf
->next
;
585 struct xfs_ifork
*ifp
,
586 struct xfs_iext_cursor
*cur
)
588 ASSERT(ifp
->if_bytes
== 0);
590 ifp
->if_u1
.if_root
= kmem_zalloc(sizeof(struct xfs_iext_rec
), KM_NOFS
);
593 /* now that we have a node step into it */
594 cur
->leaf
= ifp
->if_u1
.if_root
;
599 xfs_iext_realloc_root(
600 struct xfs_ifork
*ifp
,
601 struct xfs_iext_cursor
*cur
)
603 size_t new_size
= ifp
->if_bytes
+ sizeof(struct xfs_iext_rec
);
606 /* account for the prev/next pointers */
607 if (new_size
/ sizeof(struct xfs_iext_rec
) == RECS_PER_LEAF
)
608 new_size
= NODE_SIZE
;
610 new = kmem_realloc(ifp
->if_u1
.if_root
, new_size
, KM_NOFS
);
611 memset(new + ifp
->if_bytes
, 0, new_size
- ifp
->if_bytes
);
612 ifp
->if_u1
.if_root
= new;
617 * Increment the sequence counter on extent tree changes. If we are on a COW
618 * fork, this allows the writeback code to skip looking for a COW extent if the
619 * COW fork hasn't changed. We use WRITE_ONCE here to ensure the update to the
620 * sequence counter is seen before the modifications to the extent tree itself
623 static inline void xfs_iext_inc_seq(struct xfs_ifork
*ifp
, int state
)
625 WRITE_ONCE(ifp
->if_seq
, READ_ONCE(ifp
->if_seq
) + 1);
630 struct xfs_inode
*ip
,
631 struct xfs_iext_cursor
*cur
,
632 struct xfs_bmbt_irec
*irec
,
635 struct xfs_ifork
*ifp
= xfs_iext_state_to_fork(ip
, state
);
636 xfs_fileoff_t offset
= irec
->br_startoff
;
637 struct xfs_iext_leaf
*new = NULL
;
640 xfs_iext_inc_seq(ifp
, state
);
642 if (ifp
->if_height
== 0)
643 xfs_iext_alloc_root(ifp
, cur
);
644 else if (ifp
->if_height
== 1)
645 xfs_iext_realloc_root(ifp
, cur
);
647 nr_entries
= xfs_iext_leaf_nr_entries(ifp
, cur
->leaf
, cur
->pos
);
648 ASSERT(nr_entries
<= RECS_PER_LEAF
);
649 ASSERT(cur
->pos
>= nr_entries
||
650 xfs_iext_rec_cmp(cur_rec(cur
), irec
->br_startoff
) != 0);
652 if (nr_entries
== RECS_PER_LEAF
)
653 new = xfs_iext_split_leaf(cur
, &nr_entries
);
656 * Update the pointers in higher levels if the first entry changes
657 * in an existing node.
659 if (cur
->leaf
!= new && cur
->pos
== 0 && nr_entries
> 0) {
660 xfs_iext_update_node(ifp
, xfs_iext_leaf_key(cur
->leaf
, 0),
661 offset
, 1, cur
->leaf
);
664 for (i
= nr_entries
; i
> cur
->pos
; i
--)
665 cur
->leaf
->recs
[i
] = cur
->leaf
->recs
[i
- 1];
666 xfs_iext_set(cur_rec(cur
), irec
);
667 ifp
->if_bytes
+= sizeof(struct xfs_iext_rec
);
669 trace_xfs_iext_insert(ip
, cur
, state
, _RET_IP_
);
672 xfs_iext_insert_node(ifp
, xfs_iext_leaf_key(new, 0), new, 2);
675 static struct xfs_iext_node
*
676 xfs_iext_rebalance_node(
677 struct xfs_iext_node
*parent
,
679 struct xfs_iext_node
*node
,
683 * If the neighbouring nodes are completely full, or have different
684 * parents, we might never be able to merge our node, and will only
685 * delete it once the number of entries hits zero.
691 struct xfs_iext_node
*prev
= parent
->ptrs
[*pos
- 1];
692 int nr_prev
= xfs_iext_node_nr_entries(prev
, 0), i
;
694 if (nr_prev
+ nr_entries
<= KEYS_PER_NODE
) {
695 for (i
= 0; i
< nr_entries
; i
++) {
696 prev
->keys
[nr_prev
+ i
] = node
->keys
[i
];
697 prev
->ptrs
[nr_prev
+ i
] = node
->ptrs
[i
];
703 if (*pos
+ 1 < xfs_iext_node_nr_entries(parent
, *pos
)) {
704 struct xfs_iext_node
*next
= parent
->ptrs
[*pos
+ 1];
705 int nr_next
= xfs_iext_node_nr_entries(next
, 0), i
;
707 if (nr_entries
+ nr_next
<= KEYS_PER_NODE
) {
709 * Merge the next node into this node so that we don't
710 * have to do an additional update of the keys in the
713 for (i
= 0; i
< nr_next
; i
++) {
714 node
->keys
[nr_entries
+ i
] = next
->keys
[i
];
715 node
->ptrs
[nr_entries
+ i
] = next
->ptrs
[i
];
727 xfs_iext_remove_node(
728 struct xfs_ifork
*ifp
,
729 xfs_fileoff_t offset
,
732 struct xfs_iext_node
*node
, *parent
;
733 int level
= 2, pos
, nr_entries
, i
;
735 ASSERT(level
<= ifp
->if_height
);
736 node
= xfs_iext_find_level(ifp
, offset
, level
);
737 pos
= xfs_iext_node_pos(node
, offset
);
739 ASSERT(node
->ptrs
[pos
]);
740 ASSERT(node
->ptrs
[pos
] == victim
);
743 nr_entries
= xfs_iext_node_nr_entries(node
, pos
) - 1;
744 offset
= node
->keys
[0];
745 for (i
= pos
; i
< nr_entries
; i
++) {
746 node
->keys
[i
] = node
->keys
[i
+ 1];
747 node
->ptrs
[i
] = node
->ptrs
[i
+ 1];
749 node
->keys
[nr_entries
] = XFS_IEXT_KEY_INVALID
;
750 node
->ptrs
[nr_entries
] = NULL
;
752 if (pos
== 0 && nr_entries
> 0) {
753 xfs_iext_update_node(ifp
, offset
, node
->keys
[0], level
, node
);
754 offset
= node
->keys
[0];
757 if (nr_entries
>= KEYS_PER_NODE
/ 2)
760 if (level
< ifp
->if_height
) {
762 * If we aren't at the root yet try to find a neighbour node to
763 * merge with (or delete the node if it is empty), and then
764 * recurse up to the next level.
767 parent
= xfs_iext_find_level(ifp
, offset
, level
);
768 pos
= xfs_iext_node_pos(parent
, offset
);
770 ASSERT(pos
!= KEYS_PER_NODE
);
771 ASSERT(parent
->ptrs
[pos
] == node
);
773 node
= xfs_iext_rebalance_node(parent
, &pos
, node
, nr_entries
);
779 } else if (nr_entries
== 1) {
781 * If we are at the root and only one entry is left we can just
782 * free this node and update the root pointer.
784 ASSERT(node
== ifp
->if_u1
.if_root
);
785 ifp
->if_u1
.if_root
= node
->ptrs
[0];
792 xfs_iext_rebalance_leaf(
793 struct xfs_ifork
*ifp
,
794 struct xfs_iext_cursor
*cur
,
795 struct xfs_iext_leaf
*leaf
,
796 xfs_fileoff_t offset
,
800 * If the neighbouring nodes are completely full we might never be able
801 * to merge our node, and will only delete it once the number of
808 int nr_prev
= xfs_iext_leaf_nr_entries(ifp
, leaf
->prev
, 0), i
;
810 if (nr_prev
+ nr_entries
<= RECS_PER_LEAF
) {
811 for (i
= 0; i
< nr_entries
; i
++)
812 leaf
->prev
->recs
[nr_prev
+ i
] = leaf
->recs
[i
];
814 if (cur
->leaf
== leaf
) {
815 cur
->leaf
= leaf
->prev
;
823 int nr_next
= xfs_iext_leaf_nr_entries(ifp
, leaf
->next
, 0), i
;
825 if (nr_entries
+ nr_next
<= RECS_PER_LEAF
) {
827 * Merge the next node into this node so that we don't
828 * have to do an additional update of the keys in the
831 for (i
= 0; i
< nr_next
; i
++) {
832 leaf
->recs
[nr_entries
+ i
] =
836 if (cur
->leaf
== leaf
->next
) {
838 cur
->pos
+= nr_entries
;
841 offset
= xfs_iext_leaf_key(leaf
->next
, 0);
850 leaf
->prev
->next
= leaf
->next
;
852 leaf
->next
->prev
= leaf
->prev
;
853 xfs_iext_remove_node(ifp
, offset
, leaf
);
857 xfs_iext_free_last_leaf(
858 struct xfs_ifork
*ifp
)
861 kmem_free(ifp
->if_u1
.if_root
);
862 ifp
->if_u1
.if_root
= NULL
;
867 struct xfs_inode
*ip
,
868 struct xfs_iext_cursor
*cur
,
871 struct xfs_ifork
*ifp
= xfs_iext_state_to_fork(ip
, state
);
872 struct xfs_iext_leaf
*leaf
= cur
->leaf
;
873 xfs_fileoff_t offset
= xfs_iext_leaf_key(leaf
, 0);
876 trace_xfs_iext_remove(ip
, cur
, state
, _RET_IP_
);
878 ASSERT(ifp
->if_height
> 0);
879 ASSERT(ifp
->if_u1
.if_root
!= NULL
);
880 ASSERT(xfs_iext_valid(ifp
, cur
));
882 xfs_iext_inc_seq(ifp
, state
);
884 nr_entries
= xfs_iext_leaf_nr_entries(ifp
, leaf
, cur
->pos
) - 1;
885 for (i
= cur
->pos
; i
< nr_entries
; i
++)
886 leaf
->recs
[i
] = leaf
->recs
[i
+ 1];
887 xfs_iext_rec_clear(&leaf
->recs
[nr_entries
]);
888 ifp
->if_bytes
-= sizeof(struct xfs_iext_rec
);
890 if (cur
->pos
== 0 && nr_entries
> 0) {
891 xfs_iext_update_node(ifp
, offset
, xfs_iext_leaf_key(leaf
, 0), 1,
893 offset
= xfs_iext_leaf_key(leaf
, 0);
894 } else if (cur
->pos
== nr_entries
) {
895 if (ifp
->if_height
> 1 && leaf
->next
)
896 cur
->leaf
= leaf
->next
;
902 if (nr_entries
>= RECS_PER_LEAF
/ 2)
905 if (ifp
->if_height
> 1)
906 xfs_iext_rebalance_leaf(ifp
, cur
, leaf
, offset
, nr_entries
);
907 else if (nr_entries
== 0)
908 xfs_iext_free_last_leaf(ifp
);
912 * Lookup the extent covering bno.
914 * If there is an extent covering bno return the extent index, and store the
915 * expanded extent structure in *gotp, and the extent cursor in *cur.
916 * If there is no extent covering bno, but there is an extent after it (e.g.
917 * it lies in a hole) return that extent in *gotp and its cursor in *cur
919 * If bno is beyond the last extent return false, and return an invalid
923 xfs_iext_lookup_extent(
924 struct xfs_inode
*ip
,
925 struct xfs_ifork
*ifp
,
926 xfs_fileoff_t offset
,
927 struct xfs_iext_cursor
*cur
,
928 struct xfs_bmbt_irec
*gotp
)
930 XFS_STATS_INC(ip
->i_mount
, xs_look_exlist
);
932 cur
->leaf
= xfs_iext_find_level(ifp
, offset
, 1);
938 for (cur
->pos
= 0; cur
->pos
< xfs_iext_max_recs(ifp
); cur
->pos
++) {
939 struct xfs_iext_rec
*rec
= cur_rec(cur
);
941 if (xfs_iext_rec_is_empty(rec
))
943 if (xfs_iext_rec_cmp(rec
, offset
) >= 0)
947 /* Try looking in the next node for an entry > offset */
948 if (ifp
->if_height
== 1 || !cur
->leaf
->next
)
950 cur
->leaf
= cur
->leaf
->next
;
952 if (!xfs_iext_valid(ifp
, cur
))
955 xfs_iext_get(gotp
, cur_rec(cur
));
960 * Returns the last extent before end, and if this extent doesn't cover
961 * end, update end to the end of the extent.
964 xfs_iext_lookup_extent_before(
965 struct xfs_inode
*ip
,
966 struct xfs_ifork
*ifp
,
968 struct xfs_iext_cursor
*cur
,
969 struct xfs_bmbt_irec
*gotp
)
971 /* could be optimized to not even look up the next on a match.. */
972 if (xfs_iext_lookup_extent(ip
, ifp
, *end
- 1, cur
, gotp
) &&
973 gotp
->br_startoff
<= *end
- 1)
975 if (!xfs_iext_prev_extent(ifp
, cur
, gotp
))
977 *end
= gotp
->br_startoff
+ gotp
->br_blockcount
;
982 xfs_iext_update_extent(
983 struct xfs_inode
*ip
,
985 struct xfs_iext_cursor
*cur
,
986 struct xfs_bmbt_irec
*new)
988 struct xfs_ifork
*ifp
= xfs_iext_state_to_fork(ip
, state
);
990 xfs_iext_inc_seq(ifp
, state
);
993 struct xfs_bmbt_irec old
;
995 xfs_iext_get(&old
, cur_rec(cur
));
996 if (new->br_startoff
!= old
.br_startoff
) {
997 xfs_iext_update_node(ifp
, old
.br_startoff
,
998 new->br_startoff
, 1, cur
->leaf
);
1002 trace_xfs_bmap_pre_update(ip
, cur
, state
, _RET_IP_
);
1003 xfs_iext_set(cur_rec(cur
), new);
1004 trace_xfs_bmap_post_update(ip
, cur
, state
, _RET_IP_
);
1008 * Return true if the cursor points at an extent and return the extent structure
1009 * in gotp. Else return false.
1012 xfs_iext_get_extent(
1013 struct xfs_ifork
*ifp
,
1014 struct xfs_iext_cursor
*cur
,
1015 struct xfs_bmbt_irec
*gotp
)
1017 if (!xfs_iext_valid(ifp
, cur
))
1019 xfs_iext_get(gotp
, cur_rec(cur
));
1024 * This is a recursive function, because of that we need to be extremely
1025 * careful with stack usage.
1028 xfs_iext_destroy_node(
1029 struct xfs_iext_node
*node
,
1035 for (i
= 0; i
< KEYS_PER_NODE
; i
++) {
1036 if (node
->keys
[i
] == XFS_IEXT_KEY_INVALID
)
1038 xfs_iext_destroy_node(node
->ptrs
[i
], level
- 1);
1047 struct xfs_ifork
*ifp
)
1049 xfs_iext_destroy_node(ifp
->if_u1
.if_root
, ifp
->if_height
);
1053 ifp
->if_u1
.if_root
= NULL
;