2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
31 #include "xfs_dir2_format.h"
32 #include "xfs_dir2_priv.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
39 #include "xfs_attr_leaf.h"
40 #include "xfs_error.h"
41 #include "xfs_trace.h"
46 * Routines to implement directories as Btrees of hashed names.
49 /*========================================================================
50 * Function prototypes for the kernel.
51 *========================================================================*/
54 * Routines used for growing the Btree.
56 STATIC
int xfs_da_root_split(xfs_da_state_t
*state
,
57 xfs_da_state_blk_t
*existing_root
,
58 xfs_da_state_blk_t
*new_child
);
59 STATIC
int xfs_da_node_split(xfs_da_state_t
*state
,
60 xfs_da_state_blk_t
*existing_blk
,
61 xfs_da_state_blk_t
*split_blk
,
62 xfs_da_state_blk_t
*blk_to_add
,
65 STATIC
void xfs_da_node_rebalance(xfs_da_state_t
*state
,
66 xfs_da_state_blk_t
*node_blk_1
,
67 xfs_da_state_blk_t
*node_blk_2
);
68 STATIC
void xfs_da_node_add(xfs_da_state_t
*state
,
69 xfs_da_state_blk_t
*old_node_blk
,
70 xfs_da_state_blk_t
*new_node_blk
);
73 * Routines used for shrinking the Btree.
75 STATIC
int xfs_da_root_join(xfs_da_state_t
*state
,
76 xfs_da_state_blk_t
*root_blk
);
77 STATIC
int xfs_da_node_toosmall(xfs_da_state_t
*state
, int *retval
);
78 STATIC
void xfs_da_node_remove(xfs_da_state_t
*state
,
79 xfs_da_state_blk_t
*drop_blk
);
80 STATIC
void xfs_da_node_unbalance(xfs_da_state_t
*state
,
81 xfs_da_state_blk_t
*src_node_blk
,
82 xfs_da_state_blk_t
*dst_node_blk
);
87 STATIC uint
xfs_da_node_lasthash(xfs_dabuf_t
*bp
, int *count
);
88 STATIC
int xfs_da_node_order(xfs_dabuf_t
*node1_bp
, xfs_dabuf_t
*node2_bp
);
89 STATIC xfs_dabuf_t
*xfs_da_buf_make(int nbuf
, xfs_buf_t
**bps
);
90 STATIC
int xfs_da_blk_unlink(xfs_da_state_t
*state
,
91 xfs_da_state_blk_t
*drop_blk
,
92 xfs_da_state_blk_t
*save_blk
);
93 STATIC
void xfs_da_state_kill_altpath(xfs_da_state_t
*state
);
95 /*========================================================================
96 * Routines used for growing the Btree.
97 *========================================================================*/
100 * Create the initial contents of an intermediate node.
103 xfs_da_node_create(xfs_da_args_t
*args
, xfs_dablk_t blkno
, int level
,
104 xfs_dabuf_t
**bpp
, int whichfork
)
106 xfs_da_intnode_t
*node
;
112 error
= xfs_da_get_buf(tp
, args
->dp
, blkno
, -1, &bp
, whichfork
);
117 node
->hdr
.info
.forw
= 0;
118 node
->hdr
.info
.back
= 0;
119 node
->hdr
.info
.magic
= cpu_to_be16(XFS_DA_NODE_MAGIC
);
120 node
->hdr
.info
.pad
= 0;
122 node
->hdr
.level
= cpu_to_be16(level
);
124 xfs_da_log_buf(tp
, bp
,
125 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
132 * Split a leaf node, rebalance, then possibly split
133 * intermediate nodes, rebalance, etc.
136 xfs_da_split(xfs_da_state_t
*state
)
138 xfs_da_state_blk_t
*oldblk
, *newblk
, *addblk
;
139 xfs_da_intnode_t
*node
;
141 int max
, action
, error
, i
;
144 * Walk back up the tree splitting/inserting/adjusting as necessary.
145 * If we need to insert and there isn't room, split the node, then
146 * decide which fragment to insert the new block from below into.
147 * Note that we may split the root this way, but we need more fixup.
149 max
= state
->path
.active
- 1;
150 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
151 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
152 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
154 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
155 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
156 oldblk
= &state
->path
.blk
[i
];
157 newblk
= &state
->altpath
.blk
[i
];
160 * If a leaf node then
161 * Allocate a new leaf node, then rebalance across them.
162 * else if an intermediate node then
163 * We split on the last layer, must we split the node?
165 switch (oldblk
->magic
) {
166 case XFS_ATTR_LEAF_MAGIC
:
167 error
= xfs_attr_leaf_split(state
, oldblk
, newblk
);
168 if ((error
!= 0) && (error
!= ENOSPC
)) {
169 return(error
); /* GROT: attr is inconsistent */
176 * Entry wouldn't fit, split the leaf again.
178 state
->extravalid
= 1;
180 state
->extraafter
= 0; /* before newblk */
181 error
= xfs_attr_leaf_split(state
, oldblk
,
184 state
->extraafter
= 1; /* after newblk */
185 error
= xfs_attr_leaf_split(state
, newblk
,
189 return(error
); /* GROT: attr inconsistent */
192 case XFS_DIR2_LEAFN_MAGIC
:
193 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
198 case XFS_DA_NODE_MAGIC
:
199 error
= xfs_da_node_split(state
, oldblk
, newblk
, addblk
,
201 xfs_da_buf_done(addblk
->bp
);
204 return(error
); /* GROT: dir is inconsistent */
206 * Record the newly split block for the next time thru?
216 * Update the btree to show the new hashval for this child.
218 xfs_da_fixhashpath(state
, &state
->path
);
220 * If we won't need this block again, it's getting dropped
221 * from the active path by the loop control, so we need
222 * to mark it done now.
224 if (i
> 0 || !addblk
)
225 xfs_da_buf_done(oldblk
->bp
);
231 * Split the root node.
233 ASSERT(state
->path
.active
== 0);
234 oldblk
= &state
->path
.blk
[0];
235 error
= xfs_da_root_split(state
, oldblk
, addblk
);
237 xfs_da_buf_done(oldblk
->bp
);
238 xfs_da_buf_done(addblk
->bp
);
240 return(error
); /* GROT: dir is inconsistent */
244 * Update pointers to the node which used to be block 0 and
245 * just got bumped because of the addition of a new root node.
246 * There might be three blocks involved if a double split occurred,
247 * and the original block 0 could be at any position in the list.
250 node
= oldblk
->bp
->data
;
251 if (node
->hdr
.info
.forw
) {
252 if (be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
) {
255 ASSERT(state
->extravalid
);
256 bp
= state
->extrablk
.bp
;
259 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
260 xfs_da_log_buf(state
->args
->trans
, bp
,
261 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
262 sizeof(node
->hdr
.info
)));
264 node
= oldblk
->bp
->data
;
265 if (node
->hdr
.info
.back
) {
266 if (be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
) {
269 ASSERT(state
->extravalid
);
270 bp
= state
->extrablk
.bp
;
273 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
274 xfs_da_log_buf(state
->args
->trans
, bp
,
275 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
276 sizeof(node
->hdr
.info
)));
278 xfs_da_buf_done(oldblk
->bp
);
279 xfs_da_buf_done(addblk
->bp
);
285 * Split the root. We have to create a new root and point to the two
286 * parts (the split old root) that we just created. Copy block zero to
287 * the EOF, extending the inode in process.
289 STATIC
int /* error */
290 xfs_da_root_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
291 xfs_da_state_blk_t
*blk2
)
293 xfs_da_intnode_t
*node
, *oldroot
;
301 xfs_dir2_leaf_t
*leaf
;
304 * Copy the existing (incorrect) block from the root node position
305 * to a free space somewhere.
308 ASSERT(args
!= NULL
);
309 error
= xfs_da_grow_inode(args
, &blkno
);
315 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
320 oldroot
= blk1
->bp
->data
;
321 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
)) {
322 size
= (int)((char *)&oldroot
->btree
[be16_to_cpu(oldroot
->hdr
.count
)] -
325 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
));
326 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
327 size
= (int)((char *)&leaf
->ents
[be16_to_cpu(leaf
->hdr
.count
)] -
330 memcpy(node
, oldroot
, size
);
331 xfs_da_log_buf(tp
, bp
, 0, size
- 1);
332 xfs_da_buf_done(blk1
->bp
);
337 * Set up the new root node.
339 error
= xfs_da_node_create(args
,
340 (args
->whichfork
== XFS_DATA_FORK
) ? mp
->m_dirleafblk
: 0,
341 be16_to_cpu(node
->hdr
.level
) + 1, &bp
, args
->whichfork
);
345 node
->btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
346 node
->btree
[0].before
= cpu_to_be32(blk1
->blkno
);
347 node
->btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
348 node
->btree
[1].before
= cpu_to_be32(blk2
->blkno
);
349 node
->hdr
.count
= cpu_to_be16(2);
352 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
353 ASSERT(blk1
->blkno
>= mp
->m_dirleafblk
&&
354 blk1
->blkno
< mp
->m_dirfreeblk
);
355 ASSERT(blk2
->blkno
>= mp
->m_dirleafblk
&&
356 blk2
->blkno
< mp
->m_dirfreeblk
);
360 /* Header is already logged by xfs_da_node_create */
361 xfs_da_log_buf(tp
, bp
,
362 XFS_DA_LOGRANGE(node
, node
->btree
,
363 sizeof(xfs_da_node_entry_t
) * 2));
370 * Split the node, rebalance, then add the new entry.
372 STATIC
int /* error */
373 xfs_da_node_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
374 xfs_da_state_blk_t
*newblk
,
375 xfs_da_state_blk_t
*addblk
,
376 int treelevel
, int *result
)
378 xfs_da_intnode_t
*node
;
383 node
= oldblk
->bp
->data
;
384 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
387 * With V2 dirs the extra block is data or freespace.
389 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
390 newcount
= 1 + useextra
;
392 * Do we have to split the node?
394 if ((be16_to_cpu(node
->hdr
.count
) + newcount
) > state
->node_ents
) {
396 * Allocate a new node, add to the doubly linked chain of
397 * nodes, then move some of our excess entries into it.
399 error
= xfs_da_grow_inode(state
->args
, &blkno
);
401 return(error
); /* GROT: dir is inconsistent */
403 error
= xfs_da_node_create(state
->args
, blkno
, treelevel
,
404 &newblk
->bp
, state
->args
->whichfork
);
406 return(error
); /* GROT: dir is inconsistent */
407 newblk
->blkno
= blkno
;
408 newblk
->magic
= XFS_DA_NODE_MAGIC
;
409 xfs_da_node_rebalance(state
, oldblk
, newblk
);
410 error
= xfs_da_blk_link(state
, oldblk
, newblk
);
419 * Insert the new entry(s) into the correct block
420 * (updating last hashval in the process).
422 * xfs_da_node_add() inserts BEFORE the given index,
423 * and as a result of using node_lookup_int() we always
424 * point to a valid entry (not after one), but a split
425 * operation always results in a new block whose hashvals
426 * FOLLOW the current block.
428 * If we had double-split op below us, then add the extra block too.
430 node
= oldblk
->bp
->data
;
431 if (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)) {
433 xfs_da_node_add(state
, oldblk
, addblk
);
435 if (state
->extraafter
)
437 xfs_da_node_add(state
, oldblk
, &state
->extrablk
);
438 state
->extravalid
= 0;
442 xfs_da_node_add(state
, newblk
, addblk
);
444 if (state
->extraafter
)
446 xfs_da_node_add(state
, newblk
, &state
->extrablk
);
447 state
->extravalid
= 0;
455 * Balance the btree elements between two intermediate nodes,
456 * usually one full and one empty.
458 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
461 xfs_da_node_rebalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
462 xfs_da_state_blk_t
*blk2
)
464 xfs_da_intnode_t
*node1
, *node2
, *tmpnode
;
465 xfs_da_node_entry_t
*btree_s
, *btree_d
;
469 node1
= blk1
->bp
->data
;
470 node2
= blk2
->bp
->data
;
472 * Figure out how many entries need to move, and in which direction.
473 * Swap the nodes around if that makes it simpler.
475 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
476 ((be32_to_cpu(node2
->btree
[0].hashval
) < be32_to_cpu(node1
->btree
[0].hashval
)) ||
477 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
478 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
483 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
484 ASSERT(node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
485 count
= (be16_to_cpu(node1
->hdr
.count
) - be16_to_cpu(node2
->hdr
.count
)) / 2;
488 tp
= state
->args
->trans
;
490 * Two cases: high-to-low and low-to-high.
494 * Move elements in node2 up to make a hole.
496 if ((tmp
= be16_to_cpu(node2
->hdr
.count
)) > 0) {
497 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
498 btree_s
= &node2
->btree
[0];
499 btree_d
= &node2
->btree
[count
];
500 memmove(btree_d
, btree_s
, tmp
);
504 * Move the req'd B-tree elements from high in node1 to
507 be16_add_cpu(&node2
->hdr
.count
, count
);
508 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
509 btree_s
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
) - count
];
510 btree_d
= &node2
->btree
[0];
511 memcpy(btree_d
, btree_s
, tmp
);
512 be16_add_cpu(&node1
->hdr
.count
, -count
);
515 * Move the req'd B-tree elements from low in node2 to
519 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
520 btree_s
= &node2
->btree
[0];
521 btree_d
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
)];
522 memcpy(btree_d
, btree_s
, tmp
);
523 be16_add_cpu(&node1
->hdr
.count
, count
);
524 xfs_da_log_buf(tp
, blk1
->bp
,
525 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
528 * Move elements in node2 down to fill the hole.
530 tmp
= be16_to_cpu(node2
->hdr
.count
) - count
;
531 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
532 btree_s
= &node2
->btree
[count
];
533 btree_d
= &node2
->btree
[0];
534 memmove(btree_d
, btree_s
, tmp
);
535 be16_add_cpu(&node2
->hdr
.count
, -count
);
539 * Log header of node 1 and all current bits of node 2.
541 xfs_da_log_buf(tp
, blk1
->bp
,
542 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, sizeof(node1
->hdr
)));
543 xfs_da_log_buf(tp
, blk2
->bp
,
544 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
546 sizeof(node2
->btree
[0]) * be16_to_cpu(node2
->hdr
.count
)));
549 * Record the last hashval from each block for upward propagation.
550 * (note: don't use the swapped node pointers)
552 node1
= blk1
->bp
->data
;
553 node2
= blk2
->bp
->data
;
554 blk1
->hashval
= be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
);
555 blk2
->hashval
= be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
);
558 * Adjust the expected index for insertion.
560 if (blk1
->index
>= be16_to_cpu(node1
->hdr
.count
)) {
561 blk2
->index
= blk1
->index
- be16_to_cpu(node1
->hdr
.count
);
562 blk1
->index
= be16_to_cpu(node1
->hdr
.count
) + 1; /* make it invalid */
567 * Add a new entry to an intermediate node.
570 xfs_da_node_add(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
571 xfs_da_state_blk_t
*newblk
)
573 xfs_da_intnode_t
*node
;
574 xfs_da_node_entry_t
*btree
;
577 node
= oldblk
->bp
->data
;
578 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
579 ASSERT((oldblk
->index
>= 0) && (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)));
580 ASSERT(newblk
->blkno
!= 0);
581 if (state
->args
->whichfork
== XFS_DATA_FORK
)
582 ASSERT(newblk
->blkno
>= state
->mp
->m_dirleafblk
&&
583 newblk
->blkno
< state
->mp
->m_dirfreeblk
);
586 * We may need to make some room before we insert the new node.
589 btree
= &node
->btree
[ oldblk
->index
];
590 if (oldblk
->index
< be16_to_cpu(node
->hdr
.count
)) {
591 tmp
= (be16_to_cpu(node
->hdr
.count
) - oldblk
->index
) * (uint
)sizeof(*btree
);
592 memmove(btree
+ 1, btree
, tmp
);
594 btree
->hashval
= cpu_to_be32(newblk
->hashval
);
595 btree
->before
= cpu_to_be32(newblk
->blkno
);
596 xfs_da_log_buf(state
->args
->trans
, oldblk
->bp
,
597 XFS_DA_LOGRANGE(node
, btree
, tmp
+ sizeof(*btree
)));
598 be16_add_cpu(&node
->hdr
.count
, 1);
599 xfs_da_log_buf(state
->args
->trans
, oldblk
->bp
,
600 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
603 * Copy the last hash value from the oldblk to propagate upwards.
605 oldblk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1 ].hashval
);
608 /*========================================================================
609 * Routines used for shrinking the Btree.
610 *========================================================================*/
613 * Deallocate an empty leaf node, remove it from its parent,
614 * possibly deallocating that block, etc...
617 xfs_da_join(xfs_da_state_t
*state
)
619 xfs_da_state_blk_t
*drop_blk
, *save_blk
;
623 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
624 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
625 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
626 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
627 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
630 * Walk back up the tree joining/deallocating as necessary.
631 * When we stop dropping blocks, break out.
633 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
634 state
->path
.active
--) {
636 * See if we can combine the block with a neighbor.
637 * (action == 0) => no options, just leave
638 * (action == 1) => coalesce, then unlink
639 * (action == 2) => block empty, unlink it
641 switch (drop_blk
->magic
) {
642 case XFS_ATTR_LEAF_MAGIC
:
643 error
= xfs_attr_leaf_toosmall(state
, &action
);
648 xfs_attr_leaf_unbalance(state
, drop_blk
, save_blk
);
650 case XFS_DIR2_LEAFN_MAGIC
:
651 error
= xfs_dir2_leafn_toosmall(state
, &action
);
656 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
658 case XFS_DA_NODE_MAGIC
:
660 * Remove the offending node, fixup hashvals,
661 * check for a toosmall neighbor.
663 xfs_da_node_remove(state
, drop_blk
);
664 xfs_da_fixhashpath(state
, &state
->path
);
665 error
= xfs_da_node_toosmall(state
, &action
);
670 xfs_da_node_unbalance(state
, drop_blk
, save_blk
);
673 xfs_da_fixhashpath(state
, &state
->altpath
);
674 error
= xfs_da_blk_unlink(state
, drop_blk
, save_blk
);
675 xfs_da_state_kill_altpath(state
);
678 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
685 * We joined all the way to the top. If it turns out that
686 * we only have one entry in the root, make the child block
689 xfs_da_node_remove(state
, drop_blk
);
690 xfs_da_fixhashpath(state
, &state
->path
);
691 error
= xfs_da_root_join(state
, &state
->path
.blk
[0]);
696 * We have only one entry in the root. Copy the only remaining child of
697 * the old root to block 0 as the new root node.
700 xfs_da_root_join(xfs_da_state_t
*state
, xfs_da_state_blk_t
*root_blk
)
702 xfs_da_intnode_t
*oldroot
;
704 xfs_da_blkinfo_t
*blkinfo
;
711 ASSERT(args
!= NULL
);
712 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
713 oldroot
= root_blk
->bp
->data
;
714 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
715 ASSERT(!oldroot
->hdr
.info
.forw
);
716 ASSERT(!oldroot
->hdr
.info
.back
);
719 * If the root has more than one child, then don't do anything.
721 if (be16_to_cpu(oldroot
->hdr
.count
) > 1)
725 * Read in the (only) child block, then copy those bytes into
726 * the root block's buffer and free the original child block.
728 child
= be32_to_cpu(oldroot
->btree
[0].before
);
730 error
= xfs_da_read_buf(args
->trans
, args
->dp
, child
, -1, &bp
,
736 if (be16_to_cpu(oldroot
->hdr
.level
) == 1) {
737 ASSERT(blkinfo
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
738 blkinfo
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
740 ASSERT(blkinfo
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
742 ASSERT(!blkinfo
->forw
);
743 ASSERT(!blkinfo
->back
);
744 memcpy(root_blk
->bp
->data
, bp
->data
, state
->blocksize
);
745 xfs_da_log_buf(args
->trans
, root_blk
->bp
, 0, state
->blocksize
- 1);
746 error
= xfs_da_shrink_inode(args
, child
, bp
);
751 * Check a node block and its neighbors to see if the block should be
752 * collapsed into one or the other neighbor. Always keep the block
753 * with the smaller block number.
754 * If the current block is over 50% full, don't try to join it, return 0.
755 * If the block is empty, fill in the state structure and return 2.
756 * If it can be collapsed, fill in the state structure and return 1.
757 * If nothing can be done, return 0.
760 xfs_da_node_toosmall(xfs_da_state_t
*state
, int *action
)
762 xfs_da_intnode_t
*node
;
763 xfs_da_state_blk_t
*blk
;
764 xfs_da_blkinfo_t
*info
;
765 int count
, forward
, error
, retval
, i
;
770 * Check for the degenerate case of the block being over 50% full.
771 * If so, it's not worth even looking to see if we might be able
772 * to coalesce with a sibling.
774 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
775 info
= blk
->bp
->data
;
776 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
777 node
= (xfs_da_intnode_t
*)info
;
778 count
= be16_to_cpu(node
->hdr
.count
);
779 if (count
> (state
->node_ents
>> 1)) {
780 *action
= 0; /* blk over 50%, don't try to join */
781 return(0); /* blk over 50%, don't try to join */
785 * Check for the degenerate case of the block being empty.
786 * If the block is empty, we'll simply delete it, no need to
787 * coalesce it with a sibling block. We choose (arbitrarily)
788 * to merge with the forward block unless it is NULL.
792 * Make altpath point to the block we want to keep and
793 * path point to the block we want to drop (this one).
795 forward
= (info
->forw
!= 0);
796 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
797 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
810 * Examine each sibling block to see if we can coalesce with
811 * at least 25% free space to spare. We need to figure out
812 * whether to merge with the forward or the backward block.
813 * We prefer coalescing with the lower numbered sibling so as
814 * to shrink a directory over time.
816 /* start with smaller blk num */
817 forward
= (be32_to_cpu(info
->forw
) < be32_to_cpu(info
->back
));
818 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
820 blkno
= be32_to_cpu(info
->forw
);
822 blkno
= be32_to_cpu(info
->back
);
825 error
= xfs_da_read_buf(state
->args
->trans
, state
->args
->dp
,
826 blkno
, -1, &bp
, state
->args
->whichfork
);
831 node
= (xfs_da_intnode_t
*)info
;
832 count
= state
->node_ents
;
833 count
-= state
->node_ents
>> 2;
834 count
-= be16_to_cpu(node
->hdr
.count
);
836 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
837 count
-= be16_to_cpu(node
->hdr
.count
);
838 xfs_da_brelse(state
->args
->trans
, bp
);
840 break; /* fits with at least 25% to spare */
848 * Make altpath point to the block we want to keep (the lower
849 * numbered block) and path point to the block we want to drop.
851 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
852 if (blkno
< blk
->blkno
) {
853 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
863 error
= xfs_da_path_shift(state
, &state
->path
, forward
,
878 * Walk back up the tree adjusting hash values as necessary,
879 * when we stop making changes, return.
882 xfs_da_fixhashpath(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
)
884 xfs_da_state_blk_t
*blk
;
885 xfs_da_intnode_t
*node
;
886 xfs_da_node_entry_t
*btree
;
887 xfs_dahash_t lasthash
=0;
890 level
= path
->active
-1;
891 blk
= &path
->blk
[ level
];
892 switch (blk
->magic
) {
893 case XFS_ATTR_LEAF_MAGIC
:
894 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
898 case XFS_DIR2_LEAFN_MAGIC
:
899 lasthash
= xfs_dir2_leafn_lasthash(blk
->bp
, &count
);
903 case XFS_DA_NODE_MAGIC
:
904 lasthash
= xfs_da_node_lasthash(blk
->bp
, &count
);
909 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
910 node
= blk
->bp
->data
;
911 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
912 btree
= &node
->btree
[ blk
->index
];
913 if (be32_to_cpu(btree
->hashval
) == lasthash
)
915 blk
->hashval
= lasthash
;
916 btree
->hashval
= cpu_to_be32(lasthash
);
917 xfs_da_log_buf(state
->args
->trans
, blk
->bp
,
918 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
920 lasthash
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
925 * Remove an entry from an intermediate node.
928 xfs_da_node_remove(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
)
930 xfs_da_intnode_t
*node
;
931 xfs_da_node_entry_t
*btree
;
934 node
= drop_blk
->bp
->data
;
935 ASSERT(drop_blk
->index
< be16_to_cpu(node
->hdr
.count
));
936 ASSERT(drop_blk
->index
>= 0);
939 * Copy over the offending entry, or just zero it out.
941 btree
= &node
->btree
[drop_blk
->index
];
942 if (drop_blk
->index
< (be16_to_cpu(node
->hdr
.count
)-1)) {
943 tmp
= be16_to_cpu(node
->hdr
.count
) - drop_blk
->index
- 1;
944 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
945 memmove(btree
, btree
+ 1, tmp
);
946 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
947 XFS_DA_LOGRANGE(node
, btree
, tmp
));
948 btree
= &node
->btree
[be16_to_cpu(node
->hdr
.count
)-1];
950 memset((char *)btree
, 0, sizeof(xfs_da_node_entry_t
));
951 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
952 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
953 be16_add_cpu(&node
->hdr
.count
, -1);
954 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
955 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
958 * Copy the last hash value from the block to propagate upwards.
961 drop_blk
->hashval
= be32_to_cpu(btree
->hashval
);
965 * Unbalance the btree elements between two intermediate nodes,
966 * move all Btree elements from one node into another.
969 xfs_da_node_unbalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
970 xfs_da_state_blk_t
*save_blk
)
972 xfs_da_intnode_t
*drop_node
, *save_node
;
973 xfs_da_node_entry_t
*btree
;
977 drop_node
= drop_blk
->bp
->data
;
978 save_node
= save_blk
->bp
->data
;
979 ASSERT(drop_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
980 ASSERT(save_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
981 tp
= state
->args
->trans
;
984 * If the dying block has lower hashvals, then move all the
985 * elements in the remaining block up to make a hole.
987 if ((be32_to_cpu(drop_node
->btree
[0].hashval
) < be32_to_cpu(save_node
->btree
[ 0 ].hashval
)) ||
988 (be32_to_cpu(drop_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)-1].hashval
) <
989 be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
)))
991 btree
= &save_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)];
992 tmp
= be16_to_cpu(save_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
993 memmove(btree
, &save_node
->btree
[0], tmp
);
994 btree
= &save_node
->btree
[0];
995 xfs_da_log_buf(tp
, save_blk
->bp
,
996 XFS_DA_LOGRANGE(save_node
, btree
,
997 (be16_to_cpu(save_node
->hdr
.count
) + be16_to_cpu(drop_node
->hdr
.count
)) *
998 sizeof(xfs_da_node_entry_t
)));
1000 btree
= &save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)];
1001 xfs_da_log_buf(tp
, save_blk
->bp
,
1002 XFS_DA_LOGRANGE(save_node
, btree
,
1003 be16_to_cpu(drop_node
->hdr
.count
) *
1004 sizeof(xfs_da_node_entry_t
)));
1008 * Move all the B-tree elements from drop_blk to save_blk.
1010 tmp
= be16_to_cpu(drop_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1011 memcpy(btree
, &drop_node
->btree
[0], tmp
);
1012 be16_add_cpu(&save_node
->hdr
.count
, be16_to_cpu(drop_node
->hdr
.count
));
1014 xfs_da_log_buf(tp
, save_blk
->bp
,
1015 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1016 sizeof(save_node
->hdr
)));
1019 * Save the last hashval in the remaining block for upward propagation.
1021 save_blk
->hashval
= be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
);
1024 /*========================================================================
1025 * Routines used for finding things in the Btree.
1026 *========================================================================*/
1029 * Walk down the Btree looking for a particular filename, filling
1030 * in the state structure as we go.
1032 * We will set the state structure to point to each of the elements
1033 * in each of the nodes where either the hashval is or should be.
1035 * We support duplicate hashval's so for each entry in the current
1036 * node that could contain the desired hashval, descend. This is a
1037 * pruned depth-first tree search.
1040 xfs_da_node_lookup_int(xfs_da_state_t
*state
, int *result
)
1042 xfs_da_state_blk_t
*blk
;
1043 xfs_da_blkinfo_t
*curr
;
1044 xfs_da_intnode_t
*node
;
1045 xfs_da_node_entry_t
*btree
;
1047 int probe
, span
, max
, error
, retval
;
1048 xfs_dahash_t hashval
, btreehashval
;
1049 xfs_da_args_t
*args
;
1054 * Descend thru the B-tree searching each level for the right
1055 * node to use, until the right hashval is found.
1057 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? state
->mp
->m_dirleafblk
: 0;
1058 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1059 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1060 blk
++, state
->path
.active
++) {
1062 * Read the next node down in the tree.
1065 error
= xfs_da_read_buf(args
->trans
, args
->dp
, blkno
,
1066 -1, &blk
->bp
, args
->whichfork
);
1069 state
->path
.active
--;
1072 curr
= blk
->bp
->data
;
1073 blk
->magic
= be16_to_cpu(curr
->magic
);
1074 ASSERT(blk
->magic
== XFS_DA_NODE_MAGIC
||
1075 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1076 blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1079 * Search an intermediate node for a match.
1081 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1082 node
= blk
->bp
->data
;
1083 max
= be16_to_cpu(node
->hdr
.count
);
1084 blk
->hashval
= be32_to_cpu(node
->btree
[max
-1].hashval
);
1087 * Binary search. (note: small blocks will skip loop)
1089 probe
= span
= max
/ 2;
1090 hashval
= args
->hashval
;
1091 for (btree
= &node
->btree
[probe
]; span
> 4;
1092 btree
= &node
->btree
[probe
]) {
1094 btreehashval
= be32_to_cpu(btree
->hashval
);
1095 if (btreehashval
< hashval
)
1097 else if (btreehashval
> hashval
)
1102 ASSERT((probe
>= 0) && (probe
< max
));
1103 ASSERT((span
<= 4) || (be32_to_cpu(btree
->hashval
) == hashval
));
1106 * Since we may have duplicate hashval's, find the first
1107 * matching hashval in the node.
1109 while ((probe
> 0) && (be32_to_cpu(btree
->hashval
) >= hashval
)) {
1113 while ((probe
< max
) && (be32_to_cpu(btree
->hashval
) < hashval
)) {
1119 * Pick the right block to descend on.
1123 blkno
= be32_to_cpu(node
->btree
[max
-1].before
);
1126 blkno
= be32_to_cpu(btree
->before
);
1128 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1129 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1131 } else if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1132 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
, NULL
);
1138 * A leaf block that ends in the hashval that we are interested in
1139 * (final hashval == search hashval) means that the next block may
1140 * contain more entries with the same hashval, shift upward to the
1141 * next leaf and keep searching.
1144 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1145 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1146 &blk
->index
, state
);
1147 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1148 retval
= xfs_attr_leaf_lookup_int(blk
->bp
, args
);
1149 blk
->index
= args
->index
;
1150 args
->blkno
= blk
->blkno
;
1153 return XFS_ERROR(EFSCORRUPTED
);
1155 if (((retval
== ENOENT
) || (retval
== ENOATTR
)) &&
1156 (blk
->hashval
== args
->hashval
)) {
1157 error
= xfs_da_path_shift(state
, &state
->path
, 1, 1,
1163 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1164 /* path_shift() gives ENOENT */
1165 retval
= XFS_ERROR(ENOATTR
);
1174 /*========================================================================
1176 *========================================================================*/
1179 * Link a new block into a doubly linked list of blocks (of whatever type).
1182 xfs_da_blk_link(xfs_da_state_t
*state
, xfs_da_state_blk_t
*old_blk
,
1183 xfs_da_state_blk_t
*new_blk
)
1185 xfs_da_blkinfo_t
*old_info
, *new_info
, *tmp_info
;
1186 xfs_da_args_t
*args
;
1187 int before
=0, error
;
1191 * Set up environment.
1194 ASSERT(args
!= NULL
);
1195 old_info
= old_blk
->bp
->data
;
1196 new_info
= new_blk
->bp
->data
;
1197 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1198 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1199 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1200 ASSERT(old_blk
->magic
== be16_to_cpu(old_info
->magic
));
1201 ASSERT(new_blk
->magic
== be16_to_cpu(new_info
->magic
));
1202 ASSERT(old_blk
->magic
== new_blk
->magic
);
1204 switch (old_blk
->magic
) {
1205 case XFS_ATTR_LEAF_MAGIC
:
1206 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1208 case XFS_DIR2_LEAFN_MAGIC
:
1209 before
= xfs_dir2_leafn_order(old_blk
->bp
, new_blk
->bp
);
1211 case XFS_DA_NODE_MAGIC
:
1212 before
= xfs_da_node_order(old_blk
->bp
, new_blk
->bp
);
1217 * Link blocks in appropriate order.
1221 * Link new block in before existing block.
1223 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1224 new_info
->back
= old_info
->back
;
1225 if (old_info
->back
) {
1226 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1227 be32_to_cpu(old_info
->back
),
1228 -1, &bp
, args
->whichfork
);
1232 tmp_info
= bp
->data
;
1233 ASSERT(be16_to_cpu(tmp_info
->magic
) == be16_to_cpu(old_info
->magic
));
1234 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1235 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1236 xfs_da_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1237 xfs_da_buf_done(bp
);
1239 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1242 * Link new block in after existing block.
1244 new_info
->forw
= old_info
->forw
;
1245 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1246 if (old_info
->forw
) {
1247 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1248 be32_to_cpu(old_info
->forw
),
1249 -1, &bp
, args
->whichfork
);
1253 tmp_info
= bp
->data
;
1254 ASSERT(tmp_info
->magic
== old_info
->magic
);
1255 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1256 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1257 xfs_da_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1258 xfs_da_buf_done(bp
);
1260 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1263 xfs_da_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1264 xfs_da_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1269 * Compare two intermediate nodes for "order".
1272 xfs_da_node_order(xfs_dabuf_t
*node1_bp
, xfs_dabuf_t
*node2_bp
)
1274 xfs_da_intnode_t
*node1
, *node2
;
1276 node1
= node1_bp
->data
;
1277 node2
= node2_bp
->data
;
1278 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) &&
1279 node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1280 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
1281 ((be32_to_cpu(node2
->btree
[0].hashval
) <
1282 be32_to_cpu(node1
->btree
[0].hashval
)) ||
1283 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
1284 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
1291 * Pick up the last hashvalue from an intermediate node.
1294 xfs_da_node_lasthash(xfs_dabuf_t
*bp
, int *count
)
1296 xfs_da_intnode_t
*node
;
1299 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1301 *count
= be16_to_cpu(node
->hdr
.count
);
1302 if (!node
->hdr
.count
)
1304 return be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1308 * Unlink a block from a doubly linked list of blocks.
1310 STATIC
int /* error */
1311 xfs_da_blk_unlink(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1312 xfs_da_state_blk_t
*save_blk
)
1314 xfs_da_blkinfo_t
*drop_info
, *save_info
, *tmp_info
;
1315 xfs_da_args_t
*args
;
1320 * Set up environment.
1323 ASSERT(args
!= NULL
);
1324 save_info
= save_blk
->bp
->data
;
1325 drop_info
= drop_blk
->bp
->data
;
1326 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1327 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1328 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1329 ASSERT(save_blk
->magic
== be16_to_cpu(save_info
->magic
));
1330 ASSERT(drop_blk
->magic
== be16_to_cpu(drop_info
->magic
));
1331 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1332 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1333 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1334 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1335 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1338 * Unlink the leaf block from the doubly linked chain of leaves.
1340 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1341 save_info
->back
= drop_info
->back
;
1342 if (drop_info
->back
) {
1343 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1344 be32_to_cpu(drop_info
->back
),
1345 -1, &bp
, args
->whichfork
);
1349 tmp_info
= bp
->data
;
1350 ASSERT(tmp_info
->magic
== save_info
->magic
);
1351 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1352 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1353 xfs_da_log_buf(args
->trans
, bp
, 0,
1354 sizeof(*tmp_info
) - 1);
1355 xfs_da_buf_done(bp
);
1358 save_info
->forw
= drop_info
->forw
;
1359 if (drop_info
->forw
) {
1360 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1361 be32_to_cpu(drop_info
->forw
),
1362 -1, &bp
, args
->whichfork
);
1366 tmp_info
= bp
->data
;
1367 ASSERT(tmp_info
->magic
== save_info
->magic
);
1368 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1369 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1370 xfs_da_log_buf(args
->trans
, bp
, 0,
1371 sizeof(*tmp_info
) - 1);
1372 xfs_da_buf_done(bp
);
1376 xfs_da_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1381 * Move a path "forward" or "!forward" one block at the current level.
1383 * This routine will adjust a "path" to point to the next block
1384 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1385 * Btree, including updating pointers to the intermediate nodes between
1386 * the new bottom and the root.
1389 xfs_da_path_shift(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
,
1390 int forward
, int release
, int *result
)
1392 xfs_da_state_blk_t
*blk
;
1393 xfs_da_blkinfo_t
*info
;
1394 xfs_da_intnode_t
*node
;
1395 xfs_da_args_t
*args
;
1396 xfs_dablk_t blkno
=0;
1400 * Roll up the Btree looking for the first block where our
1401 * current index is not at the edge of the block. Note that
1402 * we skip the bottom layer because we want the sibling block.
1405 ASSERT(args
!= NULL
);
1406 ASSERT(path
!= NULL
);
1407 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1408 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1409 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1410 ASSERT(blk
->bp
!= NULL
);
1411 node
= blk
->bp
->data
;
1412 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1413 if (forward
&& (blk
->index
< be16_to_cpu(node
->hdr
.count
)-1)) {
1415 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1417 } else if (!forward
&& (blk
->index
> 0)) {
1419 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1424 *result
= XFS_ERROR(ENOENT
); /* we're out of our tree */
1425 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1430 * Roll down the edge of the subtree until we reach the
1431 * same depth we were at originally.
1433 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1435 * Release the old block.
1436 * (if it's dirty, trans won't actually let go)
1439 xfs_da_brelse(args
->trans
, blk
->bp
);
1442 * Read the next child block.
1445 error
= xfs_da_read_buf(args
->trans
, args
->dp
, blkno
, -1,
1446 &blk
->bp
, args
->whichfork
);
1449 ASSERT(blk
->bp
!= NULL
);
1450 info
= blk
->bp
->data
;
1451 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1452 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1453 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
1454 blk
->magic
= be16_to_cpu(info
->magic
);
1455 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1456 node
= (xfs_da_intnode_t
*)info
;
1457 blk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1461 blk
->index
= be16_to_cpu(node
->hdr
.count
)-1;
1462 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1464 ASSERT(level
== path
->active
-1);
1466 switch(blk
->magic
) {
1467 case XFS_ATTR_LEAF_MAGIC
:
1468 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
,
1471 case XFS_DIR2_LEAFN_MAGIC
:
1472 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
,
1476 ASSERT(blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1477 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
1487 /*========================================================================
1489 *========================================================================*/
1492 * Implement a simple hash on a character string.
1493 * Rotate the hash value by 7 bits, then XOR each character in.
1494 * This is implemented with some source-level loop unrolling.
1497 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1502 * Do four characters at a time as long as we can.
1504 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1505 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1506 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1509 * Now do the rest of the characters.
1513 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1516 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1518 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1519 default: /* case 0: */
1526 struct xfs_da_args
*args
,
1527 const unsigned char *name
,
1530 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1531 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1535 xfs_default_hashname(
1536 struct xfs_name
*name
)
1538 return xfs_da_hashname(name
->name
, name
->len
);
1541 const struct xfs_nameops xfs_default_nameops
= {
1542 .hashname
= xfs_default_hashname
,
1543 .compname
= xfs_da_compname
1547 xfs_da_grow_inode_int(
1548 struct xfs_da_args
*args
,
1552 struct xfs_trans
*tp
= args
->trans
;
1553 struct xfs_inode
*dp
= args
->dp
;
1554 int w
= args
->whichfork
;
1555 xfs_drfsbno_t nblks
= dp
->i_d
.di_nblocks
;
1556 struct xfs_bmbt_irec map
, *mapp
;
1557 int nmap
, error
, got
, i
, mapi
;
1560 * Find a spot in the file space to put the new block.
1562 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
1567 * Try mapping it in one filesystem block.
1570 ASSERT(args
->firstblock
!= NULL
);
1571 error
= xfs_bmapi(tp
, dp
, *bno
, count
,
1572 xfs_bmapi_aflag(w
)|XFS_BMAPI_WRITE
|XFS_BMAPI_METADATA
|
1574 args
->firstblock
, args
->total
, &map
, &nmap
,
1583 } else if (nmap
== 0 && count
> 1) {
1588 * If we didn't get it and the block might work if fragmented,
1589 * try without the CONTIG flag. Loop until we get it all.
1591 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
1592 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
1593 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
1594 c
= (int)(*bno
+ count
- b
);
1595 error
= xfs_bmapi(tp
, dp
, b
, c
,
1596 xfs_bmapi_aflag(w
)|XFS_BMAPI_WRITE
|
1598 args
->firstblock
, args
->total
,
1599 &mapp
[mapi
], &nmap
, args
->flist
);
1605 b
= mapp
[mapi
- 1].br_startoff
+
1606 mapp
[mapi
- 1].br_blockcount
;
1614 * Count the blocks we got, make sure it matches the total.
1616 for (i
= 0, got
= 0; i
< mapi
; i
++)
1617 got
+= mapp
[i
].br_blockcount
;
1618 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
1619 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
1621 error
= XFS_ERROR(ENOSPC
);
1625 /* account for newly allocated blocks in reserved blocks total */
1626 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
1635 * Add a block to the btree ahead of the file.
1636 * Return the new block number to the caller.
1640 struct xfs_da_args
*args
,
1641 xfs_dablk_t
*new_blkno
)
1647 if (args
->whichfork
== XFS_DATA_FORK
) {
1648 bno
= args
->dp
->i_mount
->m_dirleafblk
;
1649 count
= args
->dp
->i_mount
->m_dirblkfsbs
;
1655 error
= xfs_da_grow_inode_int(args
, &bno
, count
);
1657 *new_blkno
= (xfs_dablk_t
)bno
;
1662 * Ick. We need to always be able to remove a btree block, even
1663 * if there's no space reservation because the filesystem is full.
1664 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1665 * It swaps the target block with the last block in the file. The
1666 * last block in the file can always be removed since it can't cause
1667 * a bmap btree split to do that.
1670 xfs_da_swap_lastblock(xfs_da_args_t
*args
, xfs_dablk_t
*dead_blknop
,
1671 xfs_dabuf_t
**dead_bufp
)
1673 xfs_dablk_t dead_blkno
, last_blkno
, sib_blkno
, par_blkno
;
1674 xfs_dabuf_t
*dead_buf
, *last_buf
, *sib_buf
, *par_buf
;
1675 xfs_fileoff_t lastoff
;
1679 int error
, w
, entno
, level
, dead_level
;
1680 xfs_da_blkinfo_t
*dead_info
, *sib_info
;
1681 xfs_da_intnode_t
*par_node
, *dead_node
;
1682 xfs_dir2_leaf_t
*dead_leaf2
;
1683 xfs_dahash_t dead_hash
;
1685 dead_buf
= *dead_bufp
;
1686 dead_blkno
= *dead_blknop
;
1689 w
= args
->whichfork
;
1690 ASSERT(w
== XFS_DATA_FORK
);
1692 lastoff
= mp
->m_dirfreeblk
;
1693 error
= xfs_bmap_last_before(tp
, ip
, &lastoff
, w
);
1696 if (unlikely(lastoff
== 0)) {
1697 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
1699 return XFS_ERROR(EFSCORRUPTED
);
1702 * Read the last block in the btree space.
1704 last_blkno
= (xfs_dablk_t
)lastoff
- mp
->m_dirblkfsbs
;
1705 if ((error
= xfs_da_read_buf(tp
, ip
, last_blkno
, -1, &last_buf
, w
)))
1708 * Copy the last block into the dead buffer and log it.
1710 memcpy(dead_buf
->data
, last_buf
->data
, mp
->m_dirblksize
);
1711 xfs_da_log_buf(tp
, dead_buf
, 0, mp
->m_dirblksize
- 1);
1712 dead_info
= dead_buf
->data
;
1714 * Get values from the moved block.
1716 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
1717 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
1719 dead_hash
= be32_to_cpu(dead_leaf2
->ents
[be16_to_cpu(dead_leaf2
->hdr
.count
) - 1].hashval
);
1721 ASSERT(dead_info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1722 dead_node
= (xfs_da_intnode_t
*)dead_info
;
1723 dead_level
= be16_to_cpu(dead_node
->hdr
.level
);
1724 dead_hash
= be32_to_cpu(dead_node
->btree
[be16_to_cpu(dead_node
->hdr
.count
) - 1].hashval
);
1726 sib_buf
= par_buf
= NULL
;
1728 * If the moved block has a left sibling, fix up the pointers.
1730 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
1731 if ((error
= xfs_da_read_buf(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
)))
1733 sib_info
= sib_buf
->data
;
1735 be32_to_cpu(sib_info
->forw
) != last_blkno
||
1736 sib_info
->magic
!= dead_info
->magic
)) {
1737 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1738 XFS_ERRLEVEL_LOW
, mp
);
1739 error
= XFS_ERROR(EFSCORRUPTED
);
1742 sib_info
->forw
= cpu_to_be32(dead_blkno
);
1743 xfs_da_log_buf(tp
, sib_buf
,
1744 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
1745 sizeof(sib_info
->forw
)));
1746 xfs_da_buf_done(sib_buf
);
1750 * If the moved block has a right sibling, fix up the pointers.
1752 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
1753 if ((error
= xfs_da_read_buf(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
)))
1755 sib_info
= sib_buf
->data
;
1757 be32_to_cpu(sib_info
->back
) != last_blkno
||
1758 sib_info
->magic
!= dead_info
->magic
)) {
1759 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1760 XFS_ERRLEVEL_LOW
, mp
);
1761 error
= XFS_ERROR(EFSCORRUPTED
);
1764 sib_info
->back
= cpu_to_be32(dead_blkno
);
1765 xfs_da_log_buf(tp
, sib_buf
,
1766 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
1767 sizeof(sib_info
->back
)));
1768 xfs_da_buf_done(sib_buf
);
1771 par_blkno
= mp
->m_dirleafblk
;
1774 * Walk down the tree looking for the parent of the moved block.
1777 if ((error
= xfs_da_read_buf(tp
, ip
, par_blkno
, -1, &par_buf
, w
)))
1779 par_node
= par_buf
->data
;
1780 if (unlikely(par_node
->hdr
.info
.magic
!=
1781 cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1782 (level
>= 0 && level
!= be16_to_cpu(par_node
->hdr
.level
) + 1))) {
1783 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1784 XFS_ERRLEVEL_LOW
, mp
);
1785 error
= XFS_ERROR(EFSCORRUPTED
);
1788 level
= be16_to_cpu(par_node
->hdr
.level
);
1790 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1791 be32_to_cpu(par_node
->btree
[entno
].hashval
) < dead_hash
;
1794 if (unlikely(entno
== be16_to_cpu(par_node
->hdr
.count
))) {
1795 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1796 XFS_ERRLEVEL_LOW
, mp
);
1797 error
= XFS_ERROR(EFSCORRUPTED
);
1800 par_blkno
= be32_to_cpu(par_node
->btree
[entno
].before
);
1801 if (level
== dead_level
+ 1)
1803 xfs_da_brelse(tp
, par_buf
);
1807 * We're in the right parent block.
1808 * Look for the right entry.
1812 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1813 be32_to_cpu(par_node
->btree
[entno
].before
) != last_blkno
;
1816 if (entno
< be16_to_cpu(par_node
->hdr
.count
))
1818 par_blkno
= be32_to_cpu(par_node
->hdr
.info
.forw
);
1819 xfs_da_brelse(tp
, par_buf
);
1821 if (unlikely(par_blkno
== 0)) {
1822 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1823 XFS_ERRLEVEL_LOW
, mp
);
1824 error
= XFS_ERROR(EFSCORRUPTED
);
1827 if ((error
= xfs_da_read_buf(tp
, ip
, par_blkno
, -1, &par_buf
, w
)))
1829 par_node
= par_buf
->data
;
1831 be16_to_cpu(par_node
->hdr
.level
) != level
||
1832 par_node
->hdr
.info
.magic
!= cpu_to_be16(XFS_DA_NODE_MAGIC
))) {
1833 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1834 XFS_ERRLEVEL_LOW
, mp
);
1835 error
= XFS_ERROR(EFSCORRUPTED
);
1841 * Update the parent entry pointing to the moved block.
1843 par_node
->btree
[entno
].before
= cpu_to_be32(dead_blkno
);
1844 xfs_da_log_buf(tp
, par_buf
,
1845 XFS_DA_LOGRANGE(par_node
, &par_node
->btree
[entno
].before
,
1846 sizeof(par_node
->btree
[entno
].before
)));
1847 xfs_da_buf_done(par_buf
);
1848 xfs_da_buf_done(dead_buf
);
1849 *dead_blknop
= last_blkno
;
1850 *dead_bufp
= last_buf
;
1854 xfs_da_brelse(tp
, par_buf
);
1856 xfs_da_brelse(tp
, sib_buf
);
1857 xfs_da_brelse(tp
, last_buf
);
1862 * Remove a btree block from a directory or attribute.
1865 xfs_da_shrink_inode(xfs_da_args_t
*args
, xfs_dablk_t dead_blkno
,
1866 xfs_dabuf_t
*dead_buf
)
1869 int done
, error
, w
, count
;
1874 w
= args
->whichfork
;
1877 if (w
== XFS_DATA_FORK
)
1878 count
= mp
->m_dirblkfsbs
;
1883 * Remove extents. If we get ENOSPC for a dir we have to move
1884 * the last block to the place we want to kill.
1886 if ((error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
1887 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
1888 0, args
->firstblock
, args
->flist
,
1889 &done
)) == ENOSPC
) {
1890 if (w
!= XFS_DATA_FORK
)
1892 if ((error
= xfs_da_swap_lastblock(args
, &dead_blkno
,
1899 xfs_da_binval(tp
, dead_buf
);
1904 * See if the mapping(s) for this btree block are valid, i.e.
1905 * don't contain holes, are logically contiguous, and cover the whole range.
1908 xfs_da_map_covers_blocks(
1910 xfs_bmbt_irec_t
*mapp
,
1917 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
1918 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
1919 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
1922 if (off
!= mapp
[i
].br_startoff
) {
1925 off
+= mapp
[i
].br_blockcount
;
1927 return off
== bno
+ count
;
1932 * Used for get_buf, read_buf, read_bufr, and reada_buf.
1939 xfs_daddr_t
*mappedbnop
,
1944 xfs_buf_t
*bp
= NULL
;
1948 xfs_bmbt_irec_t map
;
1949 xfs_bmbt_irec_t
*mapp
;
1950 xfs_daddr_t mappedbno
;
1958 nfsb
= (whichfork
== XFS_DATA_FORK
) ? mp
->m_dirblkfsbs
: 1;
1959 mappedbno
= *mappedbnop
;
1961 * Caller doesn't have a mapping. -2 means don't complain
1962 * if we land in a hole.
1964 if (mappedbno
== -1 || mappedbno
== -2) {
1966 * Optimize the one-block case.
1972 xfs_bmapi_single(trans
, dp
, whichfork
, &fsb
,
1973 (xfs_fileoff_t
)bno
))) {
1977 if (fsb
== NULLFSBLOCK
) {
1980 map
.br_startblock
= fsb
;
1981 map
.br_startoff
= (xfs_fileoff_t
)bno
;
1982 map
.br_blockcount
= 1;
1986 mapp
= kmem_alloc(sizeof(*mapp
) * nfsb
, KM_SLEEP
);
1988 if ((error
= xfs_bmapi(trans
, dp
, (xfs_fileoff_t
)bno
,
1990 XFS_BMAPI_METADATA
|
1991 xfs_bmapi_aflag(whichfork
),
1992 NULL
, 0, mapp
, &nmap
, NULL
)))
1996 map
.br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
1997 map
.br_startoff
= (xfs_fileoff_t
)bno
;
1998 map
.br_blockcount
= nfsb
;
2002 if (!xfs_da_map_covers_blocks(nmap
, mapp
, bno
, nfsb
)) {
2003 error
= mappedbno
== -2 ? 0 : XFS_ERROR(EFSCORRUPTED
);
2004 if (unlikely(error
== EFSCORRUPTED
)) {
2005 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2006 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2007 __func__
, (long long)bno
,
2008 (long long)dp
->i_ino
);
2009 for (i
= 0; i
< nmap
; i
++) {
2011 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2013 (long long)mapp
[i
].br_startoff
,
2014 (long long)mapp
[i
].br_startblock
,
2015 (long long)mapp
[i
].br_blockcount
,
2019 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2020 XFS_ERRLEVEL_LOW
, mp
);
2024 if (caller
!= 3 && nmap
> 1) {
2025 bplist
= kmem_alloc(sizeof(*bplist
) * nmap
, KM_SLEEP
);
2030 * Turn the mapping(s) into buffer(s).
2032 for (i
= 0; i
< nmap
; i
++) {
2035 mappedbno
= XFS_FSB_TO_DADDR(mp
, mapp
[i
].br_startblock
);
2037 *mappedbnop
= mappedbno
;
2038 nmapped
= (int)XFS_FSB_TO_BB(mp
, mapp
[i
].br_blockcount
);
2041 bp
= xfs_trans_get_buf(trans
, mp
->m_ddev_targp
,
2042 mappedbno
, nmapped
, 0);
2043 error
= bp
? XFS_BUF_GETERROR(bp
) : XFS_ERROR(EIO
);
2048 error
= xfs_trans_read_buf(mp
, trans
, mp
->m_ddev_targp
,
2049 mappedbno
, nmapped
, 0, &bp
);
2052 xfs_buf_readahead(mp
->m_ddev_targp
, mappedbno
, nmapped
);
2059 xfs_trans_brelse(trans
, bp
);
2065 if (whichfork
== XFS_ATTR_FORK
) {
2066 XFS_BUF_SET_VTYPE_REF(bp
, B_FS_ATTR_BTREE
,
2067 XFS_ATTR_BTREE_REF
);
2069 XFS_BUF_SET_VTYPE_REF(bp
, B_FS_DIR_BTREE
,
2074 bplist
[nbplist
++] = bp
;
2078 * Build a dabuf structure.
2081 rbp
= xfs_da_buf_make(nbplist
, bplist
);
2083 rbp
= xfs_da_buf_make(1, &bp
);
2087 * For read_buf, check the magic number.
2090 xfs_dir2_data_hdr_t
*hdr
= rbp
->data
;
2091 xfs_dir2_free_t
*free
= rbp
->data
;
2092 xfs_da_blkinfo_t
*info
= rbp
->data
;
2095 magic
= be16_to_cpu(info
->magic
);
2096 magic1
= be32_to_cpu(hdr
->magic
);
2098 XFS_TEST_ERROR((magic
!= XFS_DA_NODE_MAGIC
) &&
2099 (magic
!= XFS_ATTR_LEAF_MAGIC
) &&
2100 (magic
!= XFS_DIR2_LEAF1_MAGIC
) &&
2101 (magic
!= XFS_DIR2_LEAFN_MAGIC
) &&
2102 (magic1
!= XFS_DIR2_BLOCK_MAGIC
) &&
2103 (magic1
!= XFS_DIR2_DATA_MAGIC
) &&
2104 (free
->hdr
.magic
!= cpu_to_be32(XFS_DIR2_FREE_MAGIC
)),
2105 mp
, XFS_ERRTAG_DA_READ_BUF
,
2106 XFS_RANDOM_DA_READ_BUF
))) {
2107 trace_xfs_da_btree_corrupt(rbp
->bps
[0], _RET_IP_
);
2108 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2109 XFS_ERRLEVEL_LOW
, mp
, info
);
2110 error
= XFS_ERROR(EFSCORRUPTED
);
2111 xfs_da_brelse(trans
, rbp
);
2127 for (i
= 0; i
< nbplist
; i
++)
2128 xfs_trans_brelse(trans
, bplist
[i
]);
2140 * Get a buffer for the dir/attr block.
2147 xfs_daddr_t mappedbno
,
2151 return xfs_da_do_buf(trans
, dp
, bno
, &mappedbno
, bpp
, whichfork
, 0);
2155 * Get a buffer for the dir/attr block, fill in the contents.
2162 xfs_daddr_t mappedbno
,
2166 return xfs_da_do_buf(trans
, dp
, bno
, &mappedbno
, bpp
, whichfork
, 1);
2170 * Readahead the dir/attr block.
2182 if (xfs_da_do_buf(trans
, dp
, bno
, &rval
, NULL
, whichfork
, 3))
2188 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
2189 kmem_zone_t
*xfs_dabuf_zone
; /* dabuf zone */
2192 * Allocate a dir-state structure.
2193 * We don't put them on the stack since they're large.
2196 xfs_da_state_alloc(void)
2198 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
2202 * Kill the altpath contents of a da-state structure.
2205 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
2209 for (i
= 0; i
< state
->altpath
.active
; i
++) {
2210 if (state
->altpath
.blk
[i
].bp
) {
2211 if (state
->altpath
.blk
[i
].bp
!= state
->path
.blk
[i
].bp
)
2212 xfs_da_buf_done(state
->altpath
.blk
[i
].bp
);
2213 state
->altpath
.blk
[i
].bp
= NULL
;
2216 state
->altpath
.active
= 0;
2220 * Free a da-state structure.
2223 xfs_da_state_free(xfs_da_state_t
*state
)
2227 xfs_da_state_kill_altpath(state
);
2228 for (i
= 0; i
< state
->path
.active
; i
++) {
2229 if (state
->path
.blk
[i
].bp
)
2230 xfs_da_buf_done(state
->path
.blk
[i
].bp
);
2232 if (state
->extravalid
&& state
->extrablk
.bp
)
2233 xfs_da_buf_done(state
->extrablk
.bp
);
2235 memset((char *)state
, 0, sizeof(*state
));
2237 kmem_zone_free(xfs_da_state_zone
, state
);
2244 STATIC xfs_dabuf_t
*
2245 xfs_da_buf_make(int nbuf
, xfs_buf_t
**bps
)
2253 dabuf
= kmem_zone_alloc(xfs_dabuf_zone
, KM_NOFS
);
2255 dabuf
= kmem_alloc(XFS_DA_BUF_SIZE(nbuf
), KM_NOFS
);
2260 dabuf
->bbcount
= (short)BTOBB(XFS_BUF_COUNT(bp
));
2261 dabuf
->data
= XFS_BUF_PTR(bp
);
2265 for (i
= 0, dabuf
->bbcount
= 0; i
< nbuf
; i
++) {
2266 dabuf
->bps
[i
] = bp
= bps
[i
];
2267 dabuf
->bbcount
+= BTOBB(XFS_BUF_COUNT(bp
));
2269 dabuf
->data
= kmem_alloc(BBTOB(dabuf
->bbcount
), KM_SLEEP
);
2270 for (i
= off
= 0; i
< nbuf
; i
++, off
+= XFS_BUF_COUNT(bp
)) {
2272 memcpy((char *)dabuf
->data
+ off
, XFS_BUF_PTR(bp
),
2283 xfs_da_buf_clean(xfs_dabuf_t
*dabuf
)
2290 ASSERT(dabuf
->nbuf
> 1);
2292 for (i
= off
= 0; i
< dabuf
->nbuf
;
2293 i
++, off
+= XFS_BUF_COUNT(bp
)) {
2295 memcpy(XFS_BUF_PTR(bp
), (char *)dabuf
->data
+ off
,
2305 xfs_da_buf_done(xfs_dabuf_t
*dabuf
)
2308 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2310 xfs_da_buf_clean(dabuf
);
2311 if (dabuf
->nbuf
> 1) {
2312 kmem_free(dabuf
->data
);
2315 kmem_zone_free(xfs_dabuf_zone
, dabuf
);
2320 * Log transaction from a dabuf.
2323 xfs_da_log_buf(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
, uint first
, uint last
)
2331 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2332 if (dabuf
->nbuf
== 1) {
2333 ASSERT(dabuf
->data
== (void *)XFS_BUF_PTR(dabuf
->bps
[0]));
2334 xfs_trans_log_buf(tp
, dabuf
->bps
[0], first
, last
);
2338 ASSERT(first
<= last
);
2339 for (i
= off
= 0; i
< dabuf
->nbuf
; i
++, off
+= XFS_BUF_COUNT(bp
)) {
2342 l
= f
+ XFS_BUF_COUNT(bp
) - 1;
2348 xfs_trans_log_buf(tp
, bp
, f
- off
, l
- off
);
2350 * B_DONE is set by xfs_trans_log buf.
2351 * If we don't set it on a new buffer (get not read)
2352 * then if we don't put anything in the buffer it won't
2353 * be set, and at commit it it released into the cache,
2354 * and then a read will fail.
2356 else if (!(XFS_BUF_ISDONE(bp
)))
2363 * Release dabuf from a transaction.
2364 * Have to free up the dabuf before the buffers are released,
2365 * since the synchronization on the dabuf is really the lock on the buffer.
2368 xfs_da_brelse(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
)
2375 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2376 if ((nbuf
= dabuf
->nbuf
) == 1) {
2380 bplist
= kmem_alloc(nbuf
* sizeof(*bplist
), KM_SLEEP
);
2381 memcpy(bplist
, dabuf
->bps
, nbuf
* sizeof(*bplist
));
2383 xfs_da_buf_done(dabuf
);
2384 for (i
= 0; i
< nbuf
; i
++)
2385 xfs_trans_brelse(tp
, bplist
[i
]);
2391 * Invalidate dabuf from a transaction.
2394 xfs_da_binval(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
)
2401 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2402 if ((nbuf
= dabuf
->nbuf
) == 1) {
2406 bplist
= kmem_alloc(nbuf
* sizeof(*bplist
), KM_SLEEP
);
2407 memcpy(bplist
, dabuf
->bps
, nbuf
* sizeof(*bplist
));
2409 xfs_da_buf_done(dabuf
);
2410 for (i
= 0; i
< nbuf
; i
++)
2411 xfs_trans_binval(tp
, bplist
[i
]);
2417 * Get the first daddr from a dabuf.
2420 xfs_da_blkno(xfs_dabuf_t
*dabuf
)
2422 ASSERT(dabuf
->nbuf
);
2423 ASSERT(dabuf
->data
);
2424 return XFS_BUF_ADDR(dabuf
->bps
[0]);