2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_da_btree.h"
32 #include "xfs_dir2_priv.h"
33 #include "xfs_inode.h"
34 #include "xfs_trans.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
39 #include "xfs_attr_leaf.h"
40 #include "xfs_error.h"
41 #include "xfs_trace.h"
42 #include "xfs_cksum.h"
43 #include "xfs_buf_item.h"
48 * Routines to implement directories as Btrees of hashed names.
51 /*========================================================================
52 * Function prototypes for the kernel.
53 *========================================================================*/
56 * Routines used for growing the Btree.
58 STATIC
int xfs_da3_root_split(xfs_da_state_t
*state
,
59 xfs_da_state_blk_t
*existing_root
,
60 xfs_da_state_blk_t
*new_child
);
61 STATIC
int xfs_da3_node_split(xfs_da_state_t
*state
,
62 xfs_da_state_blk_t
*existing_blk
,
63 xfs_da_state_blk_t
*split_blk
,
64 xfs_da_state_blk_t
*blk_to_add
,
67 STATIC
void xfs_da3_node_rebalance(xfs_da_state_t
*state
,
68 xfs_da_state_blk_t
*node_blk_1
,
69 xfs_da_state_blk_t
*node_blk_2
);
70 STATIC
void xfs_da3_node_add(xfs_da_state_t
*state
,
71 xfs_da_state_blk_t
*old_node_blk
,
72 xfs_da_state_blk_t
*new_node_blk
);
75 * Routines used for shrinking the Btree.
77 STATIC
int xfs_da3_root_join(xfs_da_state_t
*state
,
78 xfs_da_state_blk_t
*root_blk
);
79 STATIC
int xfs_da3_node_toosmall(xfs_da_state_t
*state
, int *retval
);
80 STATIC
void xfs_da3_node_remove(xfs_da_state_t
*state
,
81 xfs_da_state_blk_t
*drop_blk
);
82 STATIC
void xfs_da3_node_unbalance(xfs_da_state_t
*state
,
83 xfs_da_state_blk_t
*src_node_blk
,
84 xfs_da_state_blk_t
*dst_node_blk
);
89 STATIC
int xfs_da3_blk_unlink(xfs_da_state_t
*state
,
90 xfs_da_state_blk_t
*drop_blk
,
91 xfs_da_state_blk_t
*save_blk
);
94 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
97 * Allocate a dir-state structure.
98 * We don't put them on the stack since they're large.
101 xfs_da_state_alloc(void)
103 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
107 * Kill the altpath contents of a da-state structure.
110 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
114 for (i
= 0; i
< state
->altpath
.active
; i
++)
115 state
->altpath
.blk
[i
].bp
= NULL
;
116 state
->altpath
.active
= 0;
120 * Free a da-state structure.
123 xfs_da_state_free(xfs_da_state_t
*state
)
125 xfs_da_state_kill_altpath(state
);
127 memset((char *)state
, 0, sizeof(*state
));
129 kmem_zone_free(xfs_da_state_zone
, state
);
136 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
137 struct xfs_da_intnode
*hdr
= bp
->b_addr
;
138 struct xfs_da3_icnode_hdr ichdr
;
139 const struct xfs_dir_ops
*ops
;
141 ops
= xfs_dir_get_ops(mp
, NULL
);
143 ops
->node_hdr_from_disk(&ichdr
, hdr
);
145 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
146 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
148 if (ichdr
.magic
!= XFS_DA3_NODE_MAGIC
)
151 if (!uuid_equal(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_uuid
))
153 if (be64_to_cpu(hdr3
->info
.blkno
) != bp
->b_bn
)
156 if (ichdr
.magic
!= XFS_DA_NODE_MAGIC
)
159 if (ichdr
.level
== 0)
161 if (ichdr
.level
> XFS_DA_NODE_MAXDEPTH
)
163 if (ichdr
.count
== 0)
167 * we don't know if the node is for and attribute or directory tree,
168 * so only fail if the count is outside both bounds
170 if (ichdr
.count
> mp
->m_dir_geo
->node_ents
&&
171 ichdr
.count
> mp
->m_attr_geo
->node_ents
)
174 /* XXX: hash order check? */
180 xfs_da3_node_write_verify(
183 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
184 struct xfs_buf_log_item
*bip
= bp
->b_fspriv
;
185 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
187 if (!xfs_da3_node_verify(bp
)) {
188 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
189 xfs_verifier_error(bp
);
193 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
197 hdr3
->info
.lsn
= cpu_to_be64(bip
->bli_item
.li_lsn
);
199 xfs_buf_update_cksum(bp
, XFS_DA3_NODE_CRC_OFF
);
203 * leaf/node format detection on trees is sketchy, so a node read can be done on
204 * leaf level blocks when detection identifies the tree as a node format tree
205 * incorrectly. In this case, we need to swap the verifier to match the correct
206 * format of the block being read.
209 xfs_da3_node_read_verify(
212 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
214 switch (be16_to_cpu(info
->magic
)) {
215 case XFS_DA3_NODE_MAGIC
:
216 if (!xfs_buf_verify_cksum(bp
, XFS_DA3_NODE_CRC_OFF
)) {
217 xfs_buf_ioerror(bp
, EFSBADCRC
);
221 case XFS_DA_NODE_MAGIC
:
222 if (!xfs_da3_node_verify(bp
)) {
223 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
227 case XFS_ATTR_LEAF_MAGIC
:
228 case XFS_ATTR3_LEAF_MAGIC
:
229 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
230 bp
->b_ops
->verify_read(bp
);
232 case XFS_DIR2_LEAFN_MAGIC
:
233 case XFS_DIR3_LEAFN_MAGIC
:
234 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
235 bp
->b_ops
->verify_read(bp
);
242 xfs_verifier_error(bp
);
245 const struct xfs_buf_ops xfs_da3_node_buf_ops
= {
246 .verify_read
= xfs_da3_node_read_verify
,
247 .verify_write
= xfs_da3_node_write_verify
,
252 struct xfs_trans
*tp
,
253 struct xfs_inode
*dp
,
255 xfs_daddr_t mappedbno
,
256 struct xfs_buf
**bpp
,
261 err
= xfs_da_read_buf(tp
, dp
, bno
, mappedbno
, bpp
,
262 which_fork
, &xfs_da3_node_buf_ops
);
264 struct xfs_da_blkinfo
*info
= (*bpp
)->b_addr
;
267 switch (be16_to_cpu(info
->magic
)) {
268 case XFS_DA_NODE_MAGIC
:
269 case XFS_DA3_NODE_MAGIC
:
270 type
= XFS_BLFT_DA_NODE_BUF
;
272 case XFS_ATTR_LEAF_MAGIC
:
273 case XFS_ATTR3_LEAF_MAGIC
:
274 type
= XFS_BLFT_ATTR_LEAF_BUF
;
276 case XFS_DIR2_LEAFN_MAGIC
:
277 case XFS_DIR3_LEAFN_MAGIC
:
278 type
= XFS_BLFT_DIR_LEAFN_BUF
;
285 xfs_trans_buf_set_type(tp
, *bpp
, type
);
290 /*========================================================================
291 * Routines used for growing the Btree.
292 *========================================================================*/
295 * Create the initial contents of an intermediate node.
299 struct xfs_da_args
*args
,
302 struct xfs_buf
**bpp
,
305 struct xfs_da_intnode
*node
;
306 struct xfs_trans
*tp
= args
->trans
;
307 struct xfs_mount
*mp
= tp
->t_mountp
;
308 struct xfs_da3_icnode_hdr ichdr
= {0};
311 struct xfs_inode
*dp
= args
->dp
;
313 trace_xfs_da_node_create(args
);
314 ASSERT(level
<= XFS_DA_NODE_MAXDEPTH
);
316 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, whichfork
);
319 bp
->b_ops
= &xfs_da3_node_buf_ops
;
320 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
323 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
324 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
326 ichdr
.magic
= XFS_DA3_NODE_MAGIC
;
327 hdr3
->info
.blkno
= cpu_to_be64(bp
->b_bn
);
328 hdr3
->info
.owner
= cpu_to_be64(args
->dp
->i_ino
);
329 uuid_copy(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_uuid
);
331 ichdr
.magic
= XFS_DA_NODE_MAGIC
;
335 dp
->d_ops
->node_hdr_to_disk(node
, &ichdr
);
336 xfs_trans_log_buf(tp
, bp
,
337 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
344 * Split a leaf node, rebalance, then possibly split
345 * intermediate nodes, rebalance, etc.
349 struct xfs_da_state
*state
)
351 struct xfs_da_state_blk
*oldblk
;
352 struct xfs_da_state_blk
*newblk
;
353 struct xfs_da_state_blk
*addblk
;
354 struct xfs_da_intnode
*node
;
361 trace_xfs_da_split(state
->args
);
364 * Walk back up the tree splitting/inserting/adjusting as necessary.
365 * If we need to insert and there isn't room, split the node, then
366 * decide which fragment to insert the new block from below into.
367 * Note that we may split the root this way, but we need more fixup.
369 max
= state
->path
.active
- 1;
370 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
371 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
372 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
374 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
375 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
376 oldblk
= &state
->path
.blk
[i
];
377 newblk
= &state
->altpath
.blk
[i
];
380 * If a leaf node then
381 * Allocate a new leaf node, then rebalance across them.
382 * else if an intermediate node then
383 * We split on the last layer, must we split the node?
385 switch (oldblk
->magic
) {
386 case XFS_ATTR_LEAF_MAGIC
:
387 error
= xfs_attr3_leaf_split(state
, oldblk
, newblk
);
388 if ((error
!= 0) && (error
!= ENOSPC
)) {
389 return(error
); /* GROT: attr is inconsistent */
396 * Entry wouldn't fit, split the leaf again.
398 state
->extravalid
= 1;
400 state
->extraafter
= 0; /* before newblk */
401 trace_xfs_attr_leaf_split_before(state
->args
);
402 error
= xfs_attr3_leaf_split(state
, oldblk
,
405 state
->extraafter
= 1; /* after newblk */
406 trace_xfs_attr_leaf_split_after(state
->args
);
407 error
= xfs_attr3_leaf_split(state
, newblk
,
411 return(error
); /* GROT: attr inconsistent */
414 case XFS_DIR2_LEAFN_MAGIC
:
415 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
420 case XFS_DA_NODE_MAGIC
:
421 error
= xfs_da3_node_split(state
, oldblk
, newblk
, addblk
,
425 return(error
); /* GROT: dir is inconsistent */
427 * Record the newly split block for the next time thru?
437 * Update the btree to show the new hashval for this child.
439 xfs_da3_fixhashpath(state
, &state
->path
);
445 * Split the root node.
447 ASSERT(state
->path
.active
== 0);
448 oldblk
= &state
->path
.blk
[0];
449 error
= xfs_da3_root_split(state
, oldblk
, addblk
);
452 return(error
); /* GROT: dir is inconsistent */
456 * Update pointers to the node which used to be block 0 and
457 * just got bumped because of the addition of a new root node.
458 * There might be three blocks involved if a double split occurred,
459 * and the original block 0 could be at any position in the list.
461 * Note: the magic numbers and sibling pointers are in the same
462 * physical place for both v2 and v3 headers (by design). Hence it
463 * doesn't matter which version of the xfs_da_intnode structure we use
464 * here as the result will be the same using either structure.
466 node
= oldblk
->bp
->b_addr
;
467 if (node
->hdr
.info
.forw
) {
468 if (be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
) {
471 ASSERT(state
->extravalid
);
472 bp
= state
->extrablk
.bp
;
475 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
476 xfs_trans_log_buf(state
->args
->trans
, bp
,
477 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
478 sizeof(node
->hdr
.info
)));
480 node
= oldblk
->bp
->b_addr
;
481 if (node
->hdr
.info
.back
) {
482 if (be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
) {
485 ASSERT(state
->extravalid
);
486 bp
= state
->extrablk
.bp
;
489 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
490 xfs_trans_log_buf(state
->args
->trans
, bp
,
491 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
492 sizeof(node
->hdr
.info
)));
499 * Split the root. We have to create a new root and point to the two
500 * parts (the split old root) that we just created. Copy block zero to
501 * the EOF, extending the inode in process.
503 STATIC
int /* error */
505 struct xfs_da_state
*state
,
506 struct xfs_da_state_blk
*blk1
,
507 struct xfs_da_state_blk
*blk2
)
509 struct xfs_da_intnode
*node
;
510 struct xfs_da_intnode
*oldroot
;
511 struct xfs_da_node_entry
*btree
;
512 struct xfs_da3_icnode_hdr nodehdr
;
513 struct xfs_da_args
*args
;
515 struct xfs_inode
*dp
;
516 struct xfs_trans
*tp
;
517 struct xfs_mount
*mp
;
518 struct xfs_dir2_leaf
*leaf
;
524 trace_xfs_da_root_split(state
->args
);
527 * Copy the existing (incorrect) block from the root node position
528 * to a free space somewhere.
531 error
= xfs_da_grow_inode(args
, &blkno
);
538 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
542 oldroot
= blk1
->bp
->b_addr
;
543 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
544 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
)) {
545 struct xfs_da3_icnode_hdr nodehdr
;
547 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, oldroot
);
548 btree
= dp
->d_ops
->node_tree_p(oldroot
);
549 size
= (int)((char *)&btree
[nodehdr
.count
] - (char *)oldroot
);
550 level
= nodehdr
.level
;
553 * we are about to copy oldroot to bp, so set up the type
554 * of bp while we know exactly what it will be.
556 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
558 struct xfs_dir3_icleaf_hdr leafhdr
;
559 struct xfs_dir2_leaf_entry
*ents
;
561 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
562 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, leaf
);
563 ents
= dp
->d_ops
->leaf_ents_p(leaf
);
565 ASSERT(leafhdr
.magic
== XFS_DIR2_LEAFN_MAGIC
||
566 leafhdr
.magic
== XFS_DIR3_LEAFN_MAGIC
);
567 size
= (int)((char *)&ents
[leafhdr
.count
] - (char *)leaf
);
571 * we are about to copy oldroot to bp, so set up the type
572 * of bp while we know exactly what it will be.
574 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DIR_LEAFN_BUF
);
578 * we can copy most of the information in the node from one block to
579 * another, but for CRC enabled headers we have to make sure that the
580 * block specific identifiers are kept intact. We update the buffer
583 memcpy(node
, oldroot
, size
);
584 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
585 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
586 struct xfs_da3_intnode
*node3
= (struct xfs_da3_intnode
*)node
;
588 node3
->hdr
.info
.blkno
= cpu_to_be64(bp
->b_bn
);
590 xfs_trans_log_buf(tp
, bp
, 0, size
- 1);
592 bp
->b_ops
= blk1
->bp
->b_ops
;
593 xfs_trans_buf_copy_type(bp
, blk1
->bp
);
598 * Set up the new root node.
600 error
= xfs_da3_node_create(args
,
601 (args
->whichfork
== XFS_DATA_FORK
) ? args
->geo
->leafblk
: 0,
602 level
+ 1, &bp
, args
->whichfork
);
607 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
608 btree
= dp
->d_ops
->node_tree_p(node
);
609 btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
610 btree
[0].before
= cpu_to_be32(blk1
->blkno
);
611 btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
612 btree
[1].before
= cpu_to_be32(blk2
->blkno
);
614 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
617 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
618 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
619 ASSERT(blk1
->blkno
>= args
->geo
->leafblk
&&
620 blk1
->blkno
< args
->geo
->freeblk
);
621 ASSERT(blk2
->blkno
>= args
->geo
->leafblk
&&
622 blk2
->blkno
< args
->geo
->freeblk
);
626 /* Header is already logged by xfs_da_node_create */
627 xfs_trans_log_buf(tp
, bp
,
628 XFS_DA_LOGRANGE(node
, btree
, sizeof(xfs_da_node_entry_t
) * 2));
634 * Split the node, rebalance, then add the new entry.
636 STATIC
int /* error */
638 struct xfs_da_state
*state
,
639 struct xfs_da_state_blk
*oldblk
,
640 struct xfs_da_state_blk
*newblk
,
641 struct xfs_da_state_blk
*addblk
,
645 struct xfs_da_intnode
*node
;
646 struct xfs_da3_icnode_hdr nodehdr
;
651 struct xfs_inode
*dp
= state
->args
->dp
;
653 trace_xfs_da_node_split(state
->args
);
655 node
= oldblk
->bp
->b_addr
;
656 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
659 * With V2 dirs the extra block is data or freespace.
661 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
662 newcount
= 1 + useextra
;
664 * Do we have to split the node?
666 if (nodehdr
.count
+ newcount
> state
->args
->geo
->node_ents
) {
668 * Allocate a new node, add to the doubly linked chain of
669 * nodes, then move some of our excess entries into it.
671 error
= xfs_da_grow_inode(state
->args
, &blkno
);
673 return(error
); /* GROT: dir is inconsistent */
675 error
= xfs_da3_node_create(state
->args
, blkno
, treelevel
,
676 &newblk
->bp
, state
->args
->whichfork
);
678 return(error
); /* GROT: dir is inconsistent */
679 newblk
->blkno
= blkno
;
680 newblk
->magic
= XFS_DA_NODE_MAGIC
;
681 xfs_da3_node_rebalance(state
, oldblk
, newblk
);
682 error
= xfs_da3_blk_link(state
, oldblk
, newblk
);
691 * Insert the new entry(s) into the correct block
692 * (updating last hashval in the process).
694 * xfs_da3_node_add() inserts BEFORE the given index,
695 * and as a result of using node_lookup_int() we always
696 * point to a valid entry (not after one), but a split
697 * operation always results in a new block whose hashvals
698 * FOLLOW the current block.
700 * If we had double-split op below us, then add the extra block too.
702 node
= oldblk
->bp
->b_addr
;
703 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
704 if (oldblk
->index
<= nodehdr
.count
) {
706 xfs_da3_node_add(state
, oldblk
, addblk
);
708 if (state
->extraafter
)
710 xfs_da3_node_add(state
, oldblk
, &state
->extrablk
);
711 state
->extravalid
= 0;
715 xfs_da3_node_add(state
, newblk
, addblk
);
717 if (state
->extraafter
)
719 xfs_da3_node_add(state
, newblk
, &state
->extrablk
);
720 state
->extravalid
= 0;
728 * Balance the btree elements between two intermediate nodes,
729 * usually one full and one empty.
731 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
734 xfs_da3_node_rebalance(
735 struct xfs_da_state
*state
,
736 struct xfs_da_state_blk
*blk1
,
737 struct xfs_da_state_blk
*blk2
)
739 struct xfs_da_intnode
*node1
;
740 struct xfs_da_intnode
*node2
;
741 struct xfs_da_intnode
*tmpnode
;
742 struct xfs_da_node_entry
*btree1
;
743 struct xfs_da_node_entry
*btree2
;
744 struct xfs_da_node_entry
*btree_s
;
745 struct xfs_da_node_entry
*btree_d
;
746 struct xfs_da3_icnode_hdr nodehdr1
;
747 struct xfs_da3_icnode_hdr nodehdr2
;
748 struct xfs_trans
*tp
;
752 struct xfs_inode
*dp
= state
->args
->dp
;
754 trace_xfs_da_node_rebalance(state
->args
);
756 node1
= blk1
->bp
->b_addr
;
757 node2
= blk2
->bp
->b_addr
;
758 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
759 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
760 btree1
= dp
->d_ops
->node_tree_p(node1
);
761 btree2
= dp
->d_ops
->node_tree_p(node2
);
764 * Figure out how many entries need to move, and in which direction.
765 * Swap the nodes around if that makes it simpler.
767 if (nodehdr1
.count
> 0 && nodehdr2
.count
> 0 &&
768 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
769 (be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
) <
770 be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
)))) {
774 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
775 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
776 btree1
= dp
->d_ops
->node_tree_p(node1
);
777 btree2
= dp
->d_ops
->node_tree_p(node2
);
781 count
= (nodehdr1
.count
- nodehdr2
.count
) / 2;
784 tp
= state
->args
->trans
;
786 * Two cases: high-to-low and low-to-high.
790 * Move elements in node2 up to make a hole.
792 tmp
= nodehdr2
.count
;
794 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
795 btree_s
= &btree2
[0];
796 btree_d
= &btree2
[count
];
797 memmove(btree_d
, btree_s
, tmp
);
801 * Move the req'd B-tree elements from high in node1 to
804 nodehdr2
.count
+= count
;
805 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
806 btree_s
= &btree1
[nodehdr1
.count
- count
];
807 btree_d
= &btree2
[0];
808 memcpy(btree_d
, btree_s
, tmp
);
809 nodehdr1
.count
-= count
;
812 * Move the req'd B-tree elements from low in node2 to
816 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
817 btree_s
= &btree2
[0];
818 btree_d
= &btree1
[nodehdr1
.count
];
819 memcpy(btree_d
, btree_s
, tmp
);
820 nodehdr1
.count
+= count
;
822 xfs_trans_log_buf(tp
, blk1
->bp
,
823 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
826 * Move elements in node2 down to fill the hole.
828 tmp
= nodehdr2
.count
- count
;
829 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
830 btree_s
= &btree2
[count
];
831 btree_d
= &btree2
[0];
832 memmove(btree_d
, btree_s
, tmp
);
833 nodehdr2
.count
-= count
;
837 * Log header of node 1 and all current bits of node 2.
839 dp
->d_ops
->node_hdr_to_disk(node1
, &nodehdr1
);
840 xfs_trans_log_buf(tp
, blk1
->bp
,
841 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, dp
->d_ops
->node_hdr_size
));
843 dp
->d_ops
->node_hdr_to_disk(node2
, &nodehdr2
);
844 xfs_trans_log_buf(tp
, blk2
->bp
,
845 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
846 dp
->d_ops
->node_hdr_size
+
847 (sizeof(btree2
[0]) * nodehdr2
.count
)));
850 * Record the last hashval from each block for upward propagation.
851 * (note: don't use the swapped node pointers)
854 node1
= blk1
->bp
->b_addr
;
855 node2
= blk2
->bp
->b_addr
;
856 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
857 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
858 btree1
= dp
->d_ops
->node_tree_p(node1
);
859 btree2
= dp
->d_ops
->node_tree_p(node2
);
861 blk1
->hashval
= be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
);
862 blk2
->hashval
= be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
);
865 * Adjust the expected index for insertion.
867 if (blk1
->index
>= nodehdr1
.count
) {
868 blk2
->index
= blk1
->index
- nodehdr1
.count
;
869 blk1
->index
= nodehdr1
.count
+ 1; /* make it invalid */
874 * Add a new entry to an intermediate node.
878 struct xfs_da_state
*state
,
879 struct xfs_da_state_blk
*oldblk
,
880 struct xfs_da_state_blk
*newblk
)
882 struct xfs_da_intnode
*node
;
883 struct xfs_da3_icnode_hdr nodehdr
;
884 struct xfs_da_node_entry
*btree
;
886 struct xfs_inode
*dp
= state
->args
->dp
;
888 trace_xfs_da_node_add(state
->args
);
890 node
= oldblk
->bp
->b_addr
;
891 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
892 btree
= dp
->d_ops
->node_tree_p(node
);
894 ASSERT(oldblk
->index
>= 0 && oldblk
->index
<= nodehdr
.count
);
895 ASSERT(newblk
->blkno
!= 0);
896 if (state
->args
->whichfork
== XFS_DATA_FORK
)
897 ASSERT(newblk
->blkno
>= state
->args
->geo
->leafblk
&&
898 newblk
->blkno
< state
->args
->geo
->freeblk
);
901 * We may need to make some room before we insert the new node.
904 if (oldblk
->index
< nodehdr
.count
) {
905 tmp
= (nodehdr
.count
- oldblk
->index
) * (uint
)sizeof(*btree
);
906 memmove(&btree
[oldblk
->index
+ 1], &btree
[oldblk
->index
], tmp
);
908 btree
[oldblk
->index
].hashval
= cpu_to_be32(newblk
->hashval
);
909 btree
[oldblk
->index
].before
= cpu_to_be32(newblk
->blkno
);
910 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
911 XFS_DA_LOGRANGE(node
, &btree
[oldblk
->index
],
912 tmp
+ sizeof(*btree
)));
915 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
916 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
917 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
920 * Copy the last hash value from the oldblk to propagate upwards.
922 oldblk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
925 /*========================================================================
926 * Routines used for shrinking the Btree.
927 *========================================================================*/
930 * Deallocate an empty leaf node, remove it from its parent,
931 * possibly deallocating that block, etc...
935 struct xfs_da_state
*state
)
937 struct xfs_da_state_blk
*drop_blk
;
938 struct xfs_da_state_blk
*save_blk
;
942 trace_xfs_da_join(state
->args
);
944 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
945 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
946 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
947 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
948 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
951 * Walk back up the tree joining/deallocating as necessary.
952 * When we stop dropping blocks, break out.
954 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
955 state
->path
.active
--) {
957 * See if we can combine the block with a neighbor.
958 * (action == 0) => no options, just leave
959 * (action == 1) => coalesce, then unlink
960 * (action == 2) => block empty, unlink it
962 switch (drop_blk
->magic
) {
963 case XFS_ATTR_LEAF_MAGIC
:
964 error
= xfs_attr3_leaf_toosmall(state
, &action
);
969 xfs_attr3_leaf_unbalance(state
, drop_blk
, save_blk
);
971 case XFS_DIR2_LEAFN_MAGIC
:
972 error
= xfs_dir2_leafn_toosmall(state
, &action
);
977 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
979 case XFS_DA_NODE_MAGIC
:
981 * Remove the offending node, fixup hashvals,
982 * check for a toosmall neighbor.
984 xfs_da3_node_remove(state
, drop_blk
);
985 xfs_da3_fixhashpath(state
, &state
->path
);
986 error
= xfs_da3_node_toosmall(state
, &action
);
991 xfs_da3_node_unbalance(state
, drop_blk
, save_blk
);
994 xfs_da3_fixhashpath(state
, &state
->altpath
);
995 error
= xfs_da3_blk_unlink(state
, drop_blk
, save_blk
);
996 xfs_da_state_kill_altpath(state
);
999 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
1001 drop_blk
->bp
= NULL
;
1006 * We joined all the way to the top. If it turns out that
1007 * we only have one entry in the root, make the child block
1010 xfs_da3_node_remove(state
, drop_blk
);
1011 xfs_da3_fixhashpath(state
, &state
->path
);
1012 error
= xfs_da3_root_join(state
, &state
->path
.blk
[0]);
1018 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
1020 __be16 magic
= blkinfo
->magic
;
1023 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1024 magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1025 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1026 magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1028 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1029 magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
));
1031 ASSERT(!blkinfo
->forw
);
1032 ASSERT(!blkinfo
->back
);
1035 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1039 * We have only one entry in the root. Copy the only remaining child of
1040 * the old root to block 0 as the new root node.
1044 struct xfs_da_state
*state
,
1045 struct xfs_da_state_blk
*root_blk
)
1047 struct xfs_da_intnode
*oldroot
;
1048 struct xfs_da_args
*args
;
1051 struct xfs_da3_icnode_hdr oldroothdr
;
1052 struct xfs_da_node_entry
*btree
;
1054 struct xfs_inode
*dp
= state
->args
->dp
;
1056 trace_xfs_da_root_join(state
->args
);
1058 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
1061 oldroot
= root_blk
->bp
->b_addr
;
1062 dp
->d_ops
->node_hdr_from_disk(&oldroothdr
, oldroot
);
1063 ASSERT(oldroothdr
.forw
== 0);
1064 ASSERT(oldroothdr
.back
== 0);
1067 * If the root has more than one child, then don't do anything.
1069 if (oldroothdr
.count
> 1)
1073 * Read in the (only) child block, then copy those bytes into
1074 * the root block's buffer and free the original child block.
1076 btree
= dp
->d_ops
->node_tree_p(oldroot
);
1077 child
= be32_to_cpu(btree
[0].before
);
1079 error
= xfs_da3_node_read(args
->trans
, dp
, child
, -1, &bp
,
1083 xfs_da_blkinfo_onlychild_validate(bp
->b_addr
, oldroothdr
.level
);
1086 * This could be copying a leaf back into the root block in the case of
1087 * there only being a single leaf block left in the tree. Hence we have
1088 * to update the b_ops pointer as well to match the buffer type change
1089 * that could occur. For dir3 blocks we also need to update the block
1090 * number in the buffer header.
1092 memcpy(root_blk
->bp
->b_addr
, bp
->b_addr
, args
->geo
->blksize
);
1093 root_blk
->bp
->b_ops
= bp
->b_ops
;
1094 xfs_trans_buf_copy_type(root_blk
->bp
, bp
);
1095 if (oldroothdr
.magic
== XFS_DA3_NODE_MAGIC
) {
1096 struct xfs_da3_blkinfo
*da3
= root_blk
->bp
->b_addr
;
1097 da3
->blkno
= cpu_to_be64(root_blk
->bp
->b_bn
);
1099 xfs_trans_log_buf(args
->trans
, root_blk
->bp
, 0,
1100 args
->geo
->blksize
- 1);
1101 error
= xfs_da_shrink_inode(args
, child
, bp
);
1106 * Check a node block and its neighbors to see if the block should be
1107 * collapsed into one or the other neighbor. Always keep the block
1108 * with the smaller block number.
1109 * If the current block is over 50% full, don't try to join it, return 0.
1110 * If the block is empty, fill in the state structure and return 2.
1111 * If it can be collapsed, fill in the state structure and return 1.
1112 * If nothing can be done, return 0.
1115 xfs_da3_node_toosmall(
1116 struct xfs_da_state
*state
,
1119 struct xfs_da_intnode
*node
;
1120 struct xfs_da_state_blk
*blk
;
1121 struct xfs_da_blkinfo
*info
;
1124 struct xfs_da3_icnode_hdr nodehdr
;
1130 struct xfs_inode
*dp
= state
->args
->dp
;
1132 trace_xfs_da_node_toosmall(state
->args
);
1135 * Check for the degenerate case of the block being over 50% full.
1136 * If so, it's not worth even looking to see if we might be able
1137 * to coalesce with a sibling.
1139 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
1140 info
= blk
->bp
->b_addr
;
1141 node
= (xfs_da_intnode_t
*)info
;
1142 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1143 if (nodehdr
.count
> (state
->args
->geo
->node_ents
>> 1)) {
1144 *action
= 0; /* blk over 50%, don't try to join */
1145 return(0); /* blk over 50%, don't try to join */
1149 * Check for the degenerate case of the block being empty.
1150 * If the block is empty, we'll simply delete it, no need to
1151 * coalesce it with a sibling block. We choose (arbitrarily)
1152 * to merge with the forward block unless it is NULL.
1154 if (nodehdr
.count
== 0) {
1156 * Make altpath point to the block we want to keep and
1157 * path point to the block we want to drop (this one).
1159 forward
= (info
->forw
!= 0);
1160 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1161 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1174 * Examine each sibling block to see if we can coalesce with
1175 * at least 25% free space to spare. We need to figure out
1176 * whether to merge with the forward or the backward block.
1177 * We prefer coalescing with the lower numbered sibling so as
1178 * to shrink a directory over time.
1180 count
= state
->args
->geo
->node_ents
;
1181 count
-= state
->args
->geo
->node_ents
>> 2;
1182 count
-= nodehdr
.count
;
1184 /* start with smaller blk num */
1185 forward
= nodehdr
.forw
< nodehdr
.back
;
1186 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
1187 struct xfs_da3_icnode_hdr thdr
;
1189 blkno
= nodehdr
.forw
;
1191 blkno
= nodehdr
.back
;
1194 error
= xfs_da3_node_read(state
->args
->trans
, dp
,
1195 blkno
, -1, &bp
, state
->args
->whichfork
);
1200 dp
->d_ops
->node_hdr_from_disk(&thdr
, node
);
1201 xfs_trans_brelse(state
->args
->trans
, bp
);
1203 if (count
- thdr
.count
>= 0)
1204 break; /* fits with at least 25% to spare */
1212 * Make altpath point to the block we want to keep (the lower
1213 * numbered block) and path point to the block we want to drop.
1215 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1216 if (blkno
< blk
->blkno
) {
1217 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1220 error
= xfs_da3_path_shift(state
, &state
->path
, forward
,
1234 * Pick up the last hashvalue from an intermediate node.
1237 xfs_da3_node_lasthash(
1238 struct xfs_inode
*dp
,
1242 struct xfs_da_intnode
*node
;
1243 struct xfs_da_node_entry
*btree
;
1244 struct xfs_da3_icnode_hdr nodehdr
;
1247 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1249 *count
= nodehdr
.count
;
1252 btree
= dp
->d_ops
->node_tree_p(node
);
1253 return be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1257 * Walk back up the tree adjusting hash values as necessary,
1258 * when we stop making changes, return.
1261 xfs_da3_fixhashpath(
1262 struct xfs_da_state
*state
,
1263 struct xfs_da_state_path
*path
)
1265 struct xfs_da_state_blk
*blk
;
1266 struct xfs_da_intnode
*node
;
1267 struct xfs_da_node_entry
*btree
;
1268 xfs_dahash_t lasthash
=0;
1271 struct xfs_inode
*dp
= state
->args
->dp
;
1273 trace_xfs_da_fixhashpath(state
->args
);
1275 level
= path
->active
-1;
1276 blk
= &path
->blk
[ level
];
1277 switch (blk
->magic
) {
1278 case XFS_ATTR_LEAF_MAGIC
:
1279 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
1283 case XFS_DIR2_LEAFN_MAGIC
:
1284 lasthash
= xfs_dir2_leafn_lasthash(dp
, blk
->bp
, &count
);
1288 case XFS_DA_NODE_MAGIC
:
1289 lasthash
= xfs_da3_node_lasthash(dp
, blk
->bp
, &count
);
1294 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
1295 struct xfs_da3_icnode_hdr nodehdr
;
1297 node
= blk
->bp
->b_addr
;
1298 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1299 btree
= dp
->d_ops
->node_tree_p(node
);
1300 if (be32_to_cpu(btree
[blk
->index
].hashval
) == lasthash
)
1302 blk
->hashval
= lasthash
;
1303 btree
[blk
->index
].hashval
= cpu_to_be32(lasthash
);
1304 xfs_trans_log_buf(state
->args
->trans
, blk
->bp
,
1305 XFS_DA_LOGRANGE(node
, &btree
[blk
->index
],
1308 lasthash
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1313 * Remove an entry from an intermediate node.
1316 xfs_da3_node_remove(
1317 struct xfs_da_state
*state
,
1318 struct xfs_da_state_blk
*drop_blk
)
1320 struct xfs_da_intnode
*node
;
1321 struct xfs_da3_icnode_hdr nodehdr
;
1322 struct xfs_da_node_entry
*btree
;
1325 struct xfs_inode
*dp
= state
->args
->dp
;
1327 trace_xfs_da_node_remove(state
->args
);
1329 node
= drop_blk
->bp
->b_addr
;
1330 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1331 ASSERT(drop_blk
->index
< nodehdr
.count
);
1332 ASSERT(drop_blk
->index
>= 0);
1335 * Copy over the offending entry, or just zero it out.
1337 index
= drop_blk
->index
;
1338 btree
= dp
->d_ops
->node_tree_p(node
);
1339 if (index
< nodehdr
.count
- 1) {
1340 tmp
= nodehdr
.count
- index
- 1;
1341 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
1342 memmove(&btree
[index
], &btree
[index
+ 1], tmp
);
1343 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1344 XFS_DA_LOGRANGE(node
, &btree
[index
], tmp
));
1345 index
= nodehdr
.count
- 1;
1347 memset(&btree
[index
], 0, sizeof(xfs_da_node_entry_t
));
1348 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1349 XFS_DA_LOGRANGE(node
, &btree
[index
], sizeof(btree
[index
])));
1351 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
1352 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1353 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
1356 * Copy the last hash value from the block to propagate upwards.
1358 drop_blk
->hashval
= be32_to_cpu(btree
[index
- 1].hashval
);
1362 * Unbalance the elements between two intermediate nodes,
1363 * move all Btree elements from one node into another.
1366 xfs_da3_node_unbalance(
1367 struct xfs_da_state
*state
,
1368 struct xfs_da_state_blk
*drop_blk
,
1369 struct xfs_da_state_blk
*save_blk
)
1371 struct xfs_da_intnode
*drop_node
;
1372 struct xfs_da_intnode
*save_node
;
1373 struct xfs_da_node_entry
*drop_btree
;
1374 struct xfs_da_node_entry
*save_btree
;
1375 struct xfs_da3_icnode_hdr drop_hdr
;
1376 struct xfs_da3_icnode_hdr save_hdr
;
1377 struct xfs_trans
*tp
;
1380 struct xfs_inode
*dp
= state
->args
->dp
;
1382 trace_xfs_da_node_unbalance(state
->args
);
1384 drop_node
= drop_blk
->bp
->b_addr
;
1385 save_node
= save_blk
->bp
->b_addr
;
1386 dp
->d_ops
->node_hdr_from_disk(&drop_hdr
, drop_node
);
1387 dp
->d_ops
->node_hdr_from_disk(&save_hdr
, save_node
);
1388 drop_btree
= dp
->d_ops
->node_tree_p(drop_node
);
1389 save_btree
= dp
->d_ops
->node_tree_p(save_node
);
1390 tp
= state
->args
->trans
;
1393 * If the dying block has lower hashvals, then move all the
1394 * elements in the remaining block up to make a hole.
1396 if ((be32_to_cpu(drop_btree
[0].hashval
) <
1397 be32_to_cpu(save_btree
[0].hashval
)) ||
1398 (be32_to_cpu(drop_btree
[drop_hdr
.count
- 1].hashval
) <
1399 be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
))) {
1400 /* XXX: check this - is memmove dst correct? */
1401 tmp
= save_hdr
.count
* sizeof(xfs_da_node_entry_t
);
1402 memmove(&save_btree
[drop_hdr
.count
], &save_btree
[0], tmp
);
1405 xfs_trans_log_buf(tp
, save_blk
->bp
,
1406 XFS_DA_LOGRANGE(save_node
, &save_btree
[0],
1407 (save_hdr
.count
+ drop_hdr
.count
) *
1408 sizeof(xfs_da_node_entry_t
)));
1410 sindex
= save_hdr
.count
;
1411 xfs_trans_log_buf(tp
, save_blk
->bp
,
1412 XFS_DA_LOGRANGE(save_node
, &save_btree
[sindex
],
1413 drop_hdr
.count
* sizeof(xfs_da_node_entry_t
)));
1417 * Move all the B-tree elements from drop_blk to save_blk.
1419 tmp
= drop_hdr
.count
* (uint
)sizeof(xfs_da_node_entry_t
);
1420 memcpy(&save_btree
[sindex
], &drop_btree
[0], tmp
);
1421 save_hdr
.count
+= drop_hdr
.count
;
1423 dp
->d_ops
->node_hdr_to_disk(save_node
, &save_hdr
);
1424 xfs_trans_log_buf(tp
, save_blk
->bp
,
1425 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1426 dp
->d_ops
->node_hdr_size
));
1429 * Save the last hashval in the remaining block for upward propagation.
1431 save_blk
->hashval
= be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
);
1434 /*========================================================================
1435 * Routines used for finding things in the Btree.
1436 *========================================================================*/
1439 * Walk down the Btree looking for a particular filename, filling
1440 * in the state structure as we go.
1442 * We will set the state structure to point to each of the elements
1443 * in each of the nodes where either the hashval is or should be.
1445 * We support duplicate hashval's so for each entry in the current
1446 * node that could contain the desired hashval, descend. This is a
1447 * pruned depth-first tree search.
1450 xfs_da3_node_lookup_int(
1451 struct xfs_da_state
*state
,
1454 struct xfs_da_state_blk
*blk
;
1455 struct xfs_da_blkinfo
*curr
;
1456 struct xfs_da_intnode
*node
;
1457 struct xfs_da_node_entry
*btree
;
1458 struct xfs_da3_icnode_hdr nodehdr
;
1459 struct xfs_da_args
*args
;
1461 xfs_dahash_t hashval
;
1462 xfs_dahash_t btreehashval
;
1468 struct xfs_inode
*dp
= state
->args
->dp
;
1473 * Descend thru the B-tree searching each level for the right
1474 * node to use, until the right hashval is found.
1476 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? args
->geo
->leafblk
: 0;
1477 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1478 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1479 blk
++, state
->path
.active
++) {
1481 * Read the next node down in the tree.
1484 error
= xfs_da3_node_read(args
->trans
, args
->dp
, blkno
,
1485 -1, &blk
->bp
, args
->whichfork
);
1488 state
->path
.active
--;
1491 curr
= blk
->bp
->b_addr
;
1492 blk
->magic
= be16_to_cpu(curr
->magic
);
1494 if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1495 blk
->magic
== XFS_ATTR3_LEAF_MAGIC
) {
1496 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1497 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1501 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1502 blk
->magic
== XFS_DIR3_LEAFN_MAGIC
) {
1503 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1504 blk
->hashval
= xfs_dir2_leafn_lasthash(args
->dp
,
1509 blk
->magic
= XFS_DA_NODE_MAGIC
;
1513 * Search an intermediate node for a match.
1515 node
= blk
->bp
->b_addr
;
1516 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1517 btree
= dp
->d_ops
->node_tree_p(node
);
1519 max
= nodehdr
.count
;
1520 blk
->hashval
= be32_to_cpu(btree
[max
- 1].hashval
);
1523 * Binary search. (note: small blocks will skip loop)
1525 probe
= span
= max
/ 2;
1526 hashval
= args
->hashval
;
1529 btreehashval
= be32_to_cpu(btree
[probe
].hashval
);
1530 if (btreehashval
< hashval
)
1532 else if (btreehashval
> hashval
)
1537 ASSERT((probe
>= 0) && (probe
< max
));
1538 ASSERT((span
<= 4) ||
1539 (be32_to_cpu(btree
[probe
].hashval
) == hashval
));
1542 * Since we may have duplicate hashval's, find the first
1543 * matching hashval in the node.
1546 be32_to_cpu(btree
[probe
].hashval
) >= hashval
) {
1549 while (probe
< max
&&
1550 be32_to_cpu(btree
[probe
].hashval
) < hashval
) {
1555 * Pick the right block to descend on.
1558 blk
->index
= max
- 1;
1559 blkno
= be32_to_cpu(btree
[max
- 1].before
);
1562 blkno
= be32_to_cpu(btree
[probe
].before
);
1567 * A leaf block that ends in the hashval that we are interested in
1568 * (final hashval == search hashval) means that the next block may
1569 * contain more entries with the same hashval, shift upward to the
1570 * next leaf and keep searching.
1573 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1574 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1575 &blk
->index
, state
);
1576 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1577 retval
= xfs_attr3_leaf_lookup_int(blk
->bp
, args
);
1578 blk
->index
= args
->index
;
1579 args
->blkno
= blk
->blkno
;
1582 return XFS_ERROR(EFSCORRUPTED
);
1584 if (((retval
== ENOENT
) || (retval
== ENOATTR
)) &&
1585 (blk
->hashval
== args
->hashval
)) {
1586 error
= xfs_da3_path_shift(state
, &state
->path
, 1, 1,
1592 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1593 /* path_shift() gives ENOENT */
1594 retval
= XFS_ERROR(ENOATTR
);
1603 /*========================================================================
1605 *========================================================================*/
1608 * Compare two intermediate nodes for "order".
1612 struct xfs_inode
*dp
,
1613 struct xfs_buf
*node1_bp
,
1614 struct xfs_buf
*node2_bp
)
1616 struct xfs_da_intnode
*node1
;
1617 struct xfs_da_intnode
*node2
;
1618 struct xfs_da_node_entry
*btree1
;
1619 struct xfs_da_node_entry
*btree2
;
1620 struct xfs_da3_icnode_hdr node1hdr
;
1621 struct xfs_da3_icnode_hdr node2hdr
;
1623 node1
= node1_bp
->b_addr
;
1624 node2
= node2_bp
->b_addr
;
1625 dp
->d_ops
->node_hdr_from_disk(&node1hdr
, node1
);
1626 dp
->d_ops
->node_hdr_from_disk(&node2hdr
, node2
);
1627 btree1
= dp
->d_ops
->node_tree_p(node1
);
1628 btree2
= dp
->d_ops
->node_tree_p(node2
);
1630 if (node1hdr
.count
> 0 && node2hdr
.count
> 0 &&
1631 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
1632 (be32_to_cpu(btree2
[node2hdr
.count
- 1].hashval
) <
1633 be32_to_cpu(btree1
[node1hdr
.count
- 1].hashval
)))) {
1640 * Link a new block into a doubly linked list of blocks (of whatever type).
1644 struct xfs_da_state
*state
,
1645 struct xfs_da_state_blk
*old_blk
,
1646 struct xfs_da_state_blk
*new_blk
)
1648 struct xfs_da_blkinfo
*old_info
;
1649 struct xfs_da_blkinfo
*new_info
;
1650 struct xfs_da_blkinfo
*tmp_info
;
1651 struct xfs_da_args
*args
;
1655 struct xfs_inode
*dp
= state
->args
->dp
;
1658 * Set up environment.
1661 ASSERT(args
!= NULL
);
1662 old_info
= old_blk
->bp
->b_addr
;
1663 new_info
= new_blk
->bp
->b_addr
;
1664 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1665 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1666 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1668 switch (old_blk
->magic
) {
1669 case XFS_ATTR_LEAF_MAGIC
:
1670 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1672 case XFS_DIR2_LEAFN_MAGIC
:
1673 before
= xfs_dir2_leafn_order(dp
, old_blk
->bp
, new_blk
->bp
);
1675 case XFS_DA_NODE_MAGIC
:
1676 before
= xfs_da3_node_order(dp
, old_blk
->bp
, new_blk
->bp
);
1681 * Link blocks in appropriate order.
1685 * Link new block in before existing block.
1687 trace_xfs_da_link_before(args
);
1688 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1689 new_info
->back
= old_info
->back
;
1690 if (old_info
->back
) {
1691 error
= xfs_da3_node_read(args
->trans
, dp
,
1692 be32_to_cpu(old_info
->back
),
1693 -1, &bp
, args
->whichfork
);
1697 tmp_info
= bp
->b_addr
;
1698 ASSERT(tmp_info
->magic
== old_info
->magic
);
1699 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1700 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1701 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1703 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1706 * Link new block in after existing block.
1708 trace_xfs_da_link_after(args
);
1709 new_info
->forw
= old_info
->forw
;
1710 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1711 if (old_info
->forw
) {
1712 error
= xfs_da3_node_read(args
->trans
, dp
,
1713 be32_to_cpu(old_info
->forw
),
1714 -1, &bp
, args
->whichfork
);
1718 tmp_info
= bp
->b_addr
;
1719 ASSERT(tmp_info
->magic
== old_info
->magic
);
1720 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1721 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1722 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1724 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1727 xfs_trans_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1728 xfs_trans_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1733 * Unlink a block from a doubly linked list of blocks.
1735 STATIC
int /* error */
1737 struct xfs_da_state
*state
,
1738 struct xfs_da_state_blk
*drop_blk
,
1739 struct xfs_da_state_blk
*save_blk
)
1741 struct xfs_da_blkinfo
*drop_info
;
1742 struct xfs_da_blkinfo
*save_info
;
1743 struct xfs_da_blkinfo
*tmp_info
;
1744 struct xfs_da_args
*args
;
1749 * Set up environment.
1752 ASSERT(args
!= NULL
);
1753 save_info
= save_blk
->bp
->b_addr
;
1754 drop_info
= drop_blk
->bp
->b_addr
;
1755 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1756 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1757 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1758 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1759 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1760 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1761 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1762 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1765 * Unlink the leaf block from the doubly linked chain of leaves.
1767 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1768 trace_xfs_da_unlink_back(args
);
1769 save_info
->back
= drop_info
->back
;
1770 if (drop_info
->back
) {
1771 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1772 be32_to_cpu(drop_info
->back
),
1773 -1, &bp
, args
->whichfork
);
1777 tmp_info
= bp
->b_addr
;
1778 ASSERT(tmp_info
->magic
== save_info
->magic
);
1779 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1780 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1781 xfs_trans_log_buf(args
->trans
, bp
, 0,
1782 sizeof(*tmp_info
) - 1);
1785 trace_xfs_da_unlink_forward(args
);
1786 save_info
->forw
= drop_info
->forw
;
1787 if (drop_info
->forw
) {
1788 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1789 be32_to_cpu(drop_info
->forw
),
1790 -1, &bp
, args
->whichfork
);
1794 tmp_info
= bp
->b_addr
;
1795 ASSERT(tmp_info
->magic
== save_info
->magic
);
1796 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1797 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1798 xfs_trans_log_buf(args
->trans
, bp
, 0,
1799 sizeof(*tmp_info
) - 1);
1803 xfs_trans_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1808 * Move a path "forward" or "!forward" one block at the current level.
1810 * This routine will adjust a "path" to point to the next block
1811 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1812 * Btree, including updating pointers to the intermediate nodes between
1813 * the new bottom and the root.
1817 struct xfs_da_state
*state
,
1818 struct xfs_da_state_path
*path
,
1823 struct xfs_da_state_blk
*blk
;
1824 struct xfs_da_blkinfo
*info
;
1825 struct xfs_da_intnode
*node
;
1826 struct xfs_da_args
*args
;
1827 struct xfs_da_node_entry
*btree
;
1828 struct xfs_da3_icnode_hdr nodehdr
;
1829 xfs_dablk_t blkno
= 0;
1832 struct xfs_inode
*dp
= state
->args
->dp
;
1834 trace_xfs_da_path_shift(state
->args
);
1837 * Roll up the Btree looking for the first block where our
1838 * current index is not at the edge of the block. Note that
1839 * we skip the bottom layer because we want the sibling block.
1842 ASSERT(args
!= NULL
);
1843 ASSERT(path
!= NULL
);
1844 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1845 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1846 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1847 node
= blk
->bp
->b_addr
;
1848 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1849 btree
= dp
->d_ops
->node_tree_p(node
);
1851 if (forward
&& (blk
->index
< nodehdr
.count
- 1)) {
1853 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1855 } else if (!forward
&& (blk
->index
> 0)) {
1857 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1862 *result
= XFS_ERROR(ENOENT
); /* we're out of our tree */
1863 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1868 * Roll down the edge of the subtree until we reach the
1869 * same depth we were at originally.
1871 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1873 * Release the old block.
1874 * (if it's dirty, trans won't actually let go)
1877 xfs_trans_brelse(args
->trans
, blk
->bp
);
1880 * Read the next child block.
1883 error
= xfs_da3_node_read(args
->trans
, dp
, blkno
, -1,
1884 &blk
->bp
, args
->whichfork
);
1887 info
= blk
->bp
->b_addr
;
1888 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1889 info
->magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
1890 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1891 info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1892 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1893 info
->magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1897 * Note: we flatten the magic number to a single type so we
1898 * don't have to compare against crc/non-crc types elsewhere.
1900 switch (be16_to_cpu(info
->magic
)) {
1901 case XFS_DA_NODE_MAGIC
:
1902 case XFS_DA3_NODE_MAGIC
:
1903 blk
->magic
= XFS_DA_NODE_MAGIC
;
1904 node
= (xfs_da_intnode_t
*)info
;
1905 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1906 btree
= dp
->d_ops
->node_tree_p(node
);
1907 blk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1911 blk
->index
= nodehdr
.count
- 1;
1912 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1914 case XFS_ATTR_LEAF_MAGIC
:
1915 case XFS_ATTR3_LEAF_MAGIC
:
1916 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1917 ASSERT(level
== path
->active
-1);
1919 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1921 case XFS_DIR2_LEAFN_MAGIC
:
1922 case XFS_DIR3_LEAFN_MAGIC
:
1923 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1924 ASSERT(level
== path
->active
-1);
1926 blk
->hashval
= xfs_dir2_leafn_lasthash(args
->dp
,
1939 /*========================================================================
1941 *========================================================================*/
1944 * Implement a simple hash on a character string.
1945 * Rotate the hash value by 7 bits, then XOR each character in.
1946 * This is implemented with some source-level loop unrolling.
1949 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1954 * Do four characters at a time as long as we can.
1956 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1957 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1958 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1961 * Now do the rest of the characters.
1965 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1968 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1970 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1971 default: /* case 0: */
1978 struct xfs_da_args
*args
,
1979 const unsigned char *name
,
1982 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1983 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1987 xfs_default_hashname(
1988 struct xfs_name
*name
)
1990 return xfs_da_hashname(name
->name
, name
->len
);
1993 const struct xfs_nameops xfs_default_nameops
= {
1994 .hashname
= xfs_default_hashname
,
1995 .compname
= xfs_da_compname
1999 xfs_da_grow_inode_int(
2000 struct xfs_da_args
*args
,
2004 struct xfs_trans
*tp
= args
->trans
;
2005 struct xfs_inode
*dp
= args
->dp
;
2006 int w
= args
->whichfork
;
2007 xfs_drfsbno_t nblks
= dp
->i_d
.di_nblocks
;
2008 struct xfs_bmbt_irec map
, *mapp
;
2009 int nmap
, error
, got
, i
, mapi
;
2012 * Find a spot in the file space to put the new block.
2014 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
2019 * Try mapping it in one filesystem block.
2022 ASSERT(args
->firstblock
!= NULL
);
2023 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
2024 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
2025 args
->firstblock
, args
->total
, &map
, &nmap
,
2034 } else if (nmap
== 0 && count
> 1) {
2039 * If we didn't get it and the block might work if fragmented,
2040 * try without the CONTIG flag. Loop until we get it all.
2042 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
2043 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
2044 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
2045 c
= (int)(*bno
+ count
- b
);
2046 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
2047 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2048 args
->firstblock
, args
->total
,
2049 &mapp
[mapi
], &nmap
, args
->flist
);
2055 b
= mapp
[mapi
- 1].br_startoff
+
2056 mapp
[mapi
- 1].br_blockcount
;
2064 * Count the blocks we got, make sure it matches the total.
2066 for (i
= 0, got
= 0; i
< mapi
; i
++)
2067 got
+= mapp
[i
].br_blockcount
;
2068 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
2069 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
2071 error
= XFS_ERROR(ENOSPC
);
2075 /* account for newly allocated blocks in reserved blocks total */
2076 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
2085 * Add a block to the btree ahead of the file.
2086 * Return the new block number to the caller.
2090 struct xfs_da_args
*args
,
2091 xfs_dablk_t
*new_blkno
)
2096 trace_xfs_da_grow_inode(args
);
2098 bno
= args
->geo
->leafblk
;
2099 error
= xfs_da_grow_inode_int(args
, &bno
, args
->geo
->fsbcount
);
2101 *new_blkno
= (xfs_dablk_t
)bno
;
2106 * Ick. We need to always be able to remove a btree block, even
2107 * if there's no space reservation because the filesystem is full.
2108 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2109 * It swaps the target block with the last block in the file. The
2110 * last block in the file can always be removed since it can't cause
2111 * a bmap btree split to do that.
2114 xfs_da3_swap_lastblock(
2115 struct xfs_da_args
*args
,
2116 xfs_dablk_t
*dead_blknop
,
2117 struct xfs_buf
**dead_bufp
)
2119 struct xfs_da_blkinfo
*dead_info
;
2120 struct xfs_da_blkinfo
*sib_info
;
2121 struct xfs_da_intnode
*par_node
;
2122 struct xfs_da_intnode
*dead_node
;
2123 struct xfs_dir2_leaf
*dead_leaf2
;
2124 struct xfs_da_node_entry
*btree
;
2125 struct xfs_da3_icnode_hdr par_hdr
;
2126 struct xfs_inode
*dp
;
2127 struct xfs_trans
*tp
;
2128 struct xfs_mount
*mp
;
2129 struct xfs_buf
*dead_buf
;
2130 struct xfs_buf
*last_buf
;
2131 struct xfs_buf
*sib_buf
;
2132 struct xfs_buf
*par_buf
;
2133 xfs_dahash_t dead_hash
;
2134 xfs_fileoff_t lastoff
;
2135 xfs_dablk_t dead_blkno
;
2136 xfs_dablk_t last_blkno
;
2137 xfs_dablk_t sib_blkno
;
2138 xfs_dablk_t par_blkno
;
2145 trace_xfs_da_swap_lastblock(args
);
2147 dead_buf
= *dead_bufp
;
2148 dead_blkno
= *dead_blknop
;
2151 w
= args
->whichfork
;
2152 ASSERT(w
== XFS_DATA_FORK
);
2154 lastoff
= args
->geo
->freeblk
;
2155 error
= xfs_bmap_last_before(tp
, dp
, &lastoff
, w
);
2158 if (unlikely(lastoff
== 0)) {
2159 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
2161 return XFS_ERROR(EFSCORRUPTED
);
2164 * Read the last block in the btree space.
2166 last_blkno
= (xfs_dablk_t
)lastoff
- args
->geo
->fsbcount
;
2167 error
= xfs_da3_node_read(tp
, dp
, last_blkno
, -1, &last_buf
, w
);
2171 * Copy the last block into the dead buffer and log it.
2173 memcpy(dead_buf
->b_addr
, last_buf
->b_addr
, args
->geo
->blksize
);
2174 xfs_trans_log_buf(tp
, dead_buf
, 0, args
->geo
->blksize
- 1);
2175 dead_info
= dead_buf
->b_addr
;
2177 * Get values from the moved block.
2179 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
2180 dead_info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
2181 struct xfs_dir3_icleaf_hdr leafhdr
;
2182 struct xfs_dir2_leaf_entry
*ents
;
2184 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
2185 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, dead_leaf2
);
2186 ents
= dp
->d_ops
->leaf_ents_p(dead_leaf2
);
2188 dead_hash
= be32_to_cpu(ents
[leafhdr
.count
- 1].hashval
);
2190 struct xfs_da3_icnode_hdr deadhdr
;
2192 dead_node
= (xfs_da_intnode_t
*)dead_info
;
2193 dp
->d_ops
->node_hdr_from_disk(&deadhdr
, dead_node
);
2194 btree
= dp
->d_ops
->node_tree_p(dead_node
);
2195 dead_level
= deadhdr
.level
;
2196 dead_hash
= be32_to_cpu(btree
[deadhdr
.count
- 1].hashval
);
2198 sib_buf
= par_buf
= NULL
;
2200 * If the moved block has a left sibling, fix up the pointers.
2202 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
2203 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2206 sib_info
= sib_buf
->b_addr
;
2208 be32_to_cpu(sib_info
->forw
) != last_blkno
||
2209 sib_info
->magic
!= dead_info
->magic
)) {
2210 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2211 XFS_ERRLEVEL_LOW
, mp
);
2212 error
= XFS_ERROR(EFSCORRUPTED
);
2215 sib_info
->forw
= cpu_to_be32(dead_blkno
);
2216 xfs_trans_log_buf(tp
, sib_buf
,
2217 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
2218 sizeof(sib_info
->forw
)));
2222 * If the moved block has a right sibling, fix up the pointers.
2224 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
2225 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2228 sib_info
= sib_buf
->b_addr
;
2230 be32_to_cpu(sib_info
->back
) != last_blkno
||
2231 sib_info
->magic
!= dead_info
->magic
)) {
2232 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2233 XFS_ERRLEVEL_LOW
, mp
);
2234 error
= XFS_ERROR(EFSCORRUPTED
);
2237 sib_info
->back
= cpu_to_be32(dead_blkno
);
2238 xfs_trans_log_buf(tp
, sib_buf
,
2239 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
2240 sizeof(sib_info
->back
)));
2243 par_blkno
= args
->geo
->leafblk
;
2246 * Walk down the tree looking for the parent of the moved block.
2249 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2252 par_node
= par_buf
->b_addr
;
2253 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2254 if (level
>= 0 && level
!= par_hdr
.level
+ 1) {
2255 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2256 XFS_ERRLEVEL_LOW
, mp
);
2257 error
= XFS_ERROR(EFSCORRUPTED
);
2260 level
= par_hdr
.level
;
2261 btree
= dp
->d_ops
->node_tree_p(par_node
);
2263 entno
< par_hdr
.count
&&
2264 be32_to_cpu(btree
[entno
].hashval
) < dead_hash
;
2267 if (entno
== par_hdr
.count
) {
2268 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2269 XFS_ERRLEVEL_LOW
, mp
);
2270 error
= XFS_ERROR(EFSCORRUPTED
);
2273 par_blkno
= be32_to_cpu(btree
[entno
].before
);
2274 if (level
== dead_level
+ 1)
2276 xfs_trans_brelse(tp
, par_buf
);
2280 * We're in the right parent block.
2281 * Look for the right entry.
2285 entno
< par_hdr
.count
&&
2286 be32_to_cpu(btree
[entno
].before
) != last_blkno
;
2289 if (entno
< par_hdr
.count
)
2291 par_blkno
= par_hdr
.forw
;
2292 xfs_trans_brelse(tp
, par_buf
);
2294 if (unlikely(par_blkno
== 0)) {
2295 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2296 XFS_ERRLEVEL_LOW
, mp
);
2297 error
= XFS_ERROR(EFSCORRUPTED
);
2300 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2303 par_node
= par_buf
->b_addr
;
2304 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2305 if (par_hdr
.level
!= level
) {
2306 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2307 XFS_ERRLEVEL_LOW
, mp
);
2308 error
= XFS_ERROR(EFSCORRUPTED
);
2311 btree
= dp
->d_ops
->node_tree_p(par_node
);
2315 * Update the parent entry pointing to the moved block.
2317 btree
[entno
].before
= cpu_to_be32(dead_blkno
);
2318 xfs_trans_log_buf(tp
, par_buf
,
2319 XFS_DA_LOGRANGE(par_node
, &btree
[entno
].before
,
2320 sizeof(btree
[entno
].before
)));
2321 *dead_blknop
= last_blkno
;
2322 *dead_bufp
= last_buf
;
2326 xfs_trans_brelse(tp
, par_buf
);
2328 xfs_trans_brelse(tp
, sib_buf
);
2329 xfs_trans_brelse(tp
, last_buf
);
2334 * Remove a btree block from a directory or attribute.
2337 xfs_da_shrink_inode(
2338 xfs_da_args_t
*args
,
2339 xfs_dablk_t dead_blkno
,
2340 struct xfs_buf
*dead_buf
)
2343 int done
, error
, w
, count
;
2347 trace_xfs_da_shrink_inode(args
);
2350 w
= args
->whichfork
;
2353 count
= args
->geo
->fsbcount
;
2356 * Remove extents. If we get ENOSPC for a dir we have to move
2357 * the last block to the place we want to kill.
2359 error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
2360 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2361 0, args
->firstblock
, args
->flist
, &done
);
2362 if (error
== ENOSPC
) {
2363 if (w
!= XFS_DATA_FORK
)
2365 error
= xfs_da3_swap_lastblock(args
, &dead_blkno
,
2373 xfs_trans_binval(tp
, dead_buf
);
2378 * See if the mapping(s) for this btree block are valid, i.e.
2379 * don't contain holes, are logically contiguous, and cover the whole range.
2382 xfs_da_map_covers_blocks(
2384 xfs_bmbt_irec_t
*mapp
,
2391 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
2392 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
2393 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
2396 if (off
!= mapp
[i
].br_startoff
) {
2399 off
+= mapp
[i
].br_blockcount
;
2401 return off
== bno
+ count
;
2405 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2407 * For the single map case, it is assumed that the caller has provided a pointer
2408 * to a valid xfs_buf_map. For the multiple map case, this function will
2409 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2410 * map pointer with the allocated map.
2413 xfs_buf_map_from_irec(
2414 struct xfs_mount
*mp
,
2415 struct xfs_buf_map
**mapp
,
2417 struct xfs_bmbt_irec
*irecs
,
2420 struct xfs_buf_map
*map
;
2423 ASSERT(*nmaps
== 1);
2424 ASSERT(nirecs
>= 1);
2427 map
= kmem_zalloc(nirecs
* sizeof(struct xfs_buf_map
),
2428 KM_SLEEP
| KM_NOFS
);
2436 for (i
= 0; i
< *nmaps
; i
++) {
2437 ASSERT(irecs
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
2438 irecs
[i
].br_startblock
!= HOLESTARTBLOCK
);
2439 map
[i
].bm_bn
= XFS_FSB_TO_DADDR(mp
, irecs
[i
].br_startblock
);
2440 map
[i
].bm_len
= XFS_FSB_TO_BB(mp
, irecs
[i
].br_blockcount
);
2446 * Map the block we are given ready for reading. There are three possible return
2448 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2449 * caller knows not to execute a subsequent read.
2450 * 0 - if we mapped the block successfully
2451 * >0 - positive error number if there was an error.
2455 struct xfs_inode
*dp
,
2457 xfs_daddr_t mappedbno
,
2459 struct xfs_buf_map
**map
,
2462 struct xfs_mount
*mp
= dp
->i_mount
;
2465 struct xfs_bmbt_irec irec
;
2466 struct xfs_bmbt_irec
*irecs
= &irec
;
2469 ASSERT(map
&& *map
);
2470 ASSERT(*nmaps
== 1);
2472 if (whichfork
== XFS_DATA_FORK
)
2473 nfsb
= mp
->m_dir_geo
->fsbcount
;
2475 nfsb
= mp
->m_attr_geo
->fsbcount
;
2478 * Caller doesn't have a mapping. -2 means don't complain
2479 * if we land in a hole.
2481 if (mappedbno
== -1 || mappedbno
== -2) {
2483 * Optimize the one-block case.
2486 irecs
= kmem_zalloc(sizeof(irec
) * nfsb
,
2487 KM_SLEEP
| KM_NOFS
);
2490 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, irecs
,
2491 &nirecs
, xfs_bmapi_aflag(whichfork
));
2495 irecs
->br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2496 irecs
->br_startoff
= (xfs_fileoff_t
)bno
;
2497 irecs
->br_blockcount
= nfsb
;
2498 irecs
->br_state
= 0;
2502 if (!xfs_da_map_covers_blocks(nirecs
, irecs
, bno
, nfsb
)) {
2503 error
= mappedbno
== -2 ? -1 : XFS_ERROR(EFSCORRUPTED
);
2504 if (unlikely(error
== EFSCORRUPTED
)) {
2505 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2507 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2508 __func__
, (long long)bno
,
2509 (long long)dp
->i_ino
);
2510 for (i
= 0; i
< *nmaps
; i
++) {
2512 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2514 (long long)irecs
[i
].br_startoff
,
2515 (long long)irecs
[i
].br_startblock
,
2516 (long long)irecs
[i
].br_blockcount
,
2520 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2521 XFS_ERRLEVEL_LOW
, mp
);
2525 error
= xfs_buf_map_from_irec(mp
, map
, nmaps
, irecs
, nirecs
);
2533 * Get a buffer for the dir/attr block.
2537 struct xfs_trans
*trans
,
2538 struct xfs_inode
*dp
,
2540 xfs_daddr_t mappedbno
,
2541 struct xfs_buf
**bpp
,
2545 struct xfs_buf_map map
;
2546 struct xfs_buf_map
*mapp
;
2553 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2556 /* mapping a hole is not an error, but we don't continue */
2562 bp
= xfs_trans_get_buf_map(trans
, dp
->i_mount
->m_ddev_targp
,
2564 error
= bp
? bp
->b_error
: XFS_ERROR(EIO
);
2566 xfs_trans_brelse(trans
, bp
);
2580 * Get a buffer for the dir/attr block, fill in the contents.
2584 struct xfs_trans
*trans
,
2585 struct xfs_inode
*dp
,
2587 xfs_daddr_t mappedbno
,
2588 struct xfs_buf
**bpp
,
2590 const struct xfs_buf_ops
*ops
)
2593 struct xfs_buf_map map
;
2594 struct xfs_buf_map
*mapp
;
2601 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2604 /* mapping a hole is not an error, but we don't continue */
2610 error
= xfs_trans_read_buf_map(dp
->i_mount
, trans
,
2611 dp
->i_mount
->m_ddev_targp
,
2612 mapp
, nmap
, 0, &bp
, ops
);
2616 if (whichfork
== XFS_ATTR_FORK
)
2617 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2619 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2629 * Readahead the dir/attr block.
2633 struct xfs_inode
*dp
,
2635 xfs_daddr_t mappedbno
,
2637 const struct xfs_buf_ops
*ops
)
2639 struct xfs_buf_map map
;
2640 struct xfs_buf_map
*mapp
;
2646 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2649 /* mapping a hole is not an error, but we don't continue */
2655 mappedbno
= mapp
[0].bm_bn
;
2656 xfs_buf_readahead_map(dp
->i_mount
->m_ddev_targp
, mapp
, nmap
, ops
);