Add linux-next specific files for 20110716
[linux-2.6/next.git] / fs / xfs / xfs_da_btree.c
blob2925726529f8029fcd4ba3ecaf4b22efec285f0c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_dir2.h"
31 #include "xfs_dir2_format.h"
32 #include "xfs_dir2_priv.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_bmap.h"
38 #include "xfs_attr.h"
39 #include "xfs_attr_leaf.h"
40 #include "xfs_error.h"
41 #include "xfs_trace.h"
44 * xfs_da_btree.c
46 * Routines to implement directories as Btrees of hashed names.
49 /*========================================================================
50 * Function prototypes for the kernel.
51 *========================================================================*/
54 * Routines used for growing the Btree.
56 STATIC int xfs_da_root_split(xfs_da_state_t *state,
57 xfs_da_state_blk_t *existing_root,
58 xfs_da_state_blk_t *new_child);
59 STATIC int xfs_da_node_split(xfs_da_state_t *state,
60 xfs_da_state_blk_t *existing_blk,
61 xfs_da_state_blk_t *split_blk,
62 xfs_da_state_blk_t *blk_to_add,
63 int treelevel,
64 int *result);
65 STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
66 xfs_da_state_blk_t *node_blk_1,
67 xfs_da_state_blk_t *node_blk_2);
68 STATIC void xfs_da_node_add(xfs_da_state_t *state,
69 xfs_da_state_blk_t *old_node_blk,
70 xfs_da_state_blk_t *new_node_blk);
73 * Routines used for shrinking the Btree.
75 STATIC int xfs_da_root_join(xfs_da_state_t *state,
76 xfs_da_state_blk_t *root_blk);
77 STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
78 STATIC void xfs_da_node_remove(xfs_da_state_t *state,
79 xfs_da_state_blk_t *drop_blk);
80 STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
81 xfs_da_state_blk_t *src_node_blk,
82 xfs_da_state_blk_t *dst_node_blk);
85 * Utility routines.
87 STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
88 STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
89 STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
90 STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
91 xfs_da_state_blk_t *drop_blk,
92 xfs_da_state_blk_t *save_blk);
93 STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
95 /*========================================================================
96 * Routines used for growing the Btree.
97 *========================================================================*/
100 * Create the initial contents of an intermediate node.
103 xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
104 xfs_dabuf_t **bpp, int whichfork)
106 xfs_da_intnode_t *node;
107 xfs_dabuf_t *bp;
108 int error;
109 xfs_trans_t *tp;
111 tp = args->trans;
112 error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
113 if (error)
114 return(error);
115 ASSERT(bp != NULL);
116 node = bp->data;
117 node->hdr.info.forw = 0;
118 node->hdr.info.back = 0;
119 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
120 node->hdr.info.pad = 0;
121 node->hdr.count = 0;
122 node->hdr.level = cpu_to_be16(level);
124 xfs_da_log_buf(tp, bp,
125 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
127 *bpp = bp;
128 return(0);
132 * Split a leaf node, rebalance, then possibly split
133 * intermediate nodes, rebalance, etc.
135 int /* error */
136 xfs_da_split(xfs_da_state_t *state)
138 xfs_da_state_blk_t *oldblk, *newblk, *addblk;
139 xfs_da_intnode_t *node;
140 xfs_dabuf_t *bp;
141 int max, action, error, i;
144 * Walk back up the tree splitting/inserting/adjusting as necessary.
145 * If we need to insert and there isn't room, split the node, then
146 * decide which fragment to insert the new block from below into.
147 * Note that we may split the root this way, but we need more fixup.
149 max = state->path.active - 1;
150 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
151 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
152 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
154 addblk = &state->path.blk[max]; /* initial dummy value */
155 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
156 oldblk = &state->path.blk[i];
157 newblk = &state->altpath.blk[i];
160 * If a leaf node then
161 * Allocate a new leaf node, then rebalance across them.
162 * else if an intermediate node then
163 * We split on the last layer, must we split the node?
165 switch (oldblk->magic) {
166 case XFS_ATTR_LEAF_MAGIC:
167 error = xfs_attr_leaf_split(state, oldblk, newblk);
168 if ((error != 0) && (error != ENOSPC)) {
169 return(error); /* GROT: attr is inconsistent */
171 if (!error) {
172 addblk = newblk;
173 break;
176 * Entry wouldn't fit, split the leaf again.
178 state->extravalid = 1;
179 if (state->inleaf) {
180 state->extraafter = 0; /* before newblk */
181 error = xfs_attr_leaf_split(state, oldblk,
182 &state->extrablk);
183 } else {
184 state->extraafter = 1; /* after newblk */
185 error = xfs_attr_leaf_split(state, newblk,
186 &state->extrablk);
188 if (error)
189 return(error); /* GROT: attr inconsistent */
190 addblk = newblk;
191 break;
192 case XFS_DIR2_LEAFN_MAGIC:
193 error = xfs_dir2_leafn_split(state, oldblk, newblk);
194 if (error)
195 return error;
196 addblk = newblk;
197 break;
198 case XFS_DA_NODE_MAGIC:
199 error = xfs_da_node_split(state, oldblk, newblk, addblk,
200 max - i, &action);
201 xfs_da_buf_done(addblk->bp);
202 addblk->bp = NULL;
203 if (error)
204 return(error); /* GROT: dir is inconsistent */
206 * Record the newly split block for the next time thru?
208 if (action)
209 addblk = newblk;
210 else
211 addblk = NULL;
212 break;
216 * Update the btree to show the new hashval for this child.
218 xfs_da_fixhashpath(state, &state->path);
220 * If we won't need this block again, it's getting dropped
221 * from the active path by the loop control, so we need
222 * to mark it done now.
224 if (i > 0 || !addblk)
225 xfs_da_buf_done(oldblk->bp);
227 if (!addblk)
228 return(0);
231 * Split the root node.
233 ASSERT(state->path.active == 0);
234 oldblk = &state->path.blk[0];
235 error = xfs_da_root_split(state, oldblk, addblk);
236 if (error) {
237 xfs_da_buf_done(oldblk->bp);
238 xfs_da_buf_done(addblk->bp);
239 addblk->bp = NULL;
240 return(error); /* GROT: dir is inconsistent */
244 * Update pointers to the node which used to be block 0 and
245 * just got bumped because of the addition of a new root node.
246 * There might be three blocks involved if a double split occurred,
247 * and the original block 0 could be at any position in the list.
250 node = oldblk->bp->data;
251 if (node->hdr.info.forw) {
252 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
253 bp = addblk->bp;
254 } else {
255 ASSERT(state->extravalid);
256 bp = state->extrablk.bp;
258 node = bp->data;
259 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
260 xfs_da_log_buf(state->args->trans, bp,
261 XFS_DA_LOGRANGE(node, &node->hdr.info,
262 sizeof(node->hdr.info)));
264 node = oldblk->bp->data;
265 if (node->hdr.info.back) {
266 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
267 bp = addblk->bp;
268 } else {
269 ASSERT(state->extravalid);
270 bp = state->extrablk.bp;
272 node = bp->data;
273 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
274 xfs_da_log_buf(state->args->trans, bp,
275 XFS_DA_LOGRANGE(node, &node->hdr.info,
276 sizeof(node->hdr.info)));
278 xfs_da_buf_done(oldblk->bp);
279 xfs_da_buf_done(addblk->bp);
280 addblk->bp = NULL;
281 return(0);
285 * Split the root. We have to create a new root and point to the two
286 * parts (the split old root) that we just created. Copy block zero to
287 * the EOF, extending the inode in process.
289 STATIC int /* error */
290 xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
291 xfs_da_state_blk_t *blk2)
293 xfs_da_intnode_t *node, *oldroot;
294 xfs_da_args_t *args;
295 xfs_dablk_t blkno;
296 xfs_dabuf_t *bp;
297 int error, size;
298 xfs_inode_t *dp;
299 xfs_trans_t *tp;
300 xfs_mount_t *mp;
301 xfs_dir2_leaf_t *leaf;
304 * Copy the existing (incorrect) block from the root node position
305 * to a free space somewhere.
307 args = state->args;
308 ASSERT(args != NULL);
309 error = xfs_da_grow_inode(args, &blkno);
310 if (error)
311 return(error);
312 dp = args->dp;
313 tp = args->trans;
314 mp = state->mp;
315 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
316 if (error)
317 return(error);
318 ASSERT(bp != NULL);
319 node = bp->data;
320 oldroot = blk1->bp->data;
321 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
322 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
323 (char *)oldroot);
324 } else {
325 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
326 leaf = (xfs_dir2_leaf_t *)oldroot;
327 size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
328 (char *)leaf);
330 memcpy(node, oldroot, size);
331 xfs_da_log_buf(tp, bp, 0, size - 1);
332 xfs_da_buf_done(blk1->bp);
333 blk1->bp = bp;
334 blk1->blkno = blkno;
337 * Set up the new root node.
339 error = xfs_da_node_create(args,
340 (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
341 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
342 if (error)
343 return(error);
344 node = bp->data;
345 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
346 node->btree[0].before = cpu_to_be32(blk1->blkno);
347 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
348 node->btree[1].before = cpu_to_be32(blk2->blkno);
349 node->hdr.count = cpu_to_be16(2);
351 #ifdef DEBUG
352 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
353 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
354 blk1->blkno < mp->m_dirfreeblk);
355 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
356 blk2->blkno < mp->m_dirfreeblk);
358 #endif
360 /* Header is already logged by xfs_da_node_create */
361 xfs_da_log_buf(tp, bp,
362 XFS_DA_LOGRANGE(node, node->btree,
363 sizeof(xfs_da_node_entry_t) * 2));
364 xfs_da_buf_done(bp);
366 return(0);
370 * Split the node, rebalance, then add the new entry.
372 STATIC int /* error */
373 xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
374 xfs_da_state_blk_t *newblk,
375 xfs_da_state_blk_t *addblk,
376 int treelevel, int *result)
378 xfs_da_intnode_t *node;
379 xfs_dablk_t blkno;
380 int newcount, error;
381 int useextra;
383 node = oldblk->bp->data;
384 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
387 * With V2 dirs the extra block is data or freespace.
389 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
390 newcount = 1 + useextra;
392 * Do we have to split the node?
394 if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
396 * Allocate a new node, add to the doubly linked chain of
397 * nodes, then move some of our excess entries into it.
399 error = xfs_da_grow_inode(state->args, &blkno);
400 if (error)
401 return(error); /* GROT: dir is inconsistent */
403 error = xfs_da_node_create(state->args, blkno, treelevel,
404 &newblk->bp, state->args->whichfork);
405 if (error)
406 return(error); /* GROT: dir is inconsistent */
407 newblk->blkno = blkno;
408 newblk->magic = XFS_DA_NODE_MAGIC;
409 xfs_da_node_rebalance(state, oldblk, newblk);
410 error = xfs_da_blk_link(state, oldblk, newblk);
411 if (error)
412 return(error);
413 *result = 1;
414 } else {
415 *result = 0;
419 * Insert the new entry(s) into the correct block
420 * (updating last hashval in the process).
422 * xfs_da_node_add() inserts BEFORE the given index,
423 * and as a result of using node_lookup_int() we always
424 * point to a valid entry (not after one), but a split
425 * operation always results in a new block whose hashvals
426 * FOLLOW the current block.
428 * If we had double-split op below us, then add the extra block too.
430 node = oldblk->bp->data;
431 if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
432 oldblk->index++;
433 xfs_da_node_add(state, oldblk, addblk);
434 if (useextra) {
435 if (state->extraafter)
436 oldblk->index++;
437 xfs_da_node_add(state, oldblk, &state->extrablk);
438 state->extravalid = 0;
440 } else {
441 newblk->index++;
442 xfs_da_node_add(state, newblk, addblk);
443 if (useextra) {
444 if (state->extraafter)
445 newblk->index++;
446 xfs_da_node_add(state, newblk, &state->extrablk);
447 state->extravalid = 0;
451 return(0);
455 * Balance the btree elements between two intermediate nodes,
456 * usually one full and one empty.
458 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
460 STATIC void
461 xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
462 xfs_da_state_blk_t *blk2)
464 xfs_da_intnode_t *node1, *node2, *tmpnode;
465 xfs_da_node_entry_t *btree_s, *btree_d;
466 int count, tmp;
467 xfs_trans_t *tp;
469 node1 = blk1->bp->data;
470 node2 = blk2->bp->data;
472 * Figure out how many entries need to move, and in which direction.
473 * Swap the nodes around if that makes it simpler.
475 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
476 ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
477 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
478 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
479 tmpnode = node1;
480 node1 = node2;
481 node2 = tmpnode;
483 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
484 ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
485 count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
486 if (count == 0)
487 return;
488 tp = state->args->trans;
490 * Two cases: high-to-low and low-to-high.
492 if (count > 0) {
494 * Move elements in node2 up to make a hole.
496 if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
497 tmp *= (uint)sizeof(xfs_da_node_entry_t);
498 btree_s = &node2->btree[0];
499 btree_d = &node2->btree[count];
500 memmove(btree_d, btree_s, tmp);
504 * Move the req'd B-tree elements from high in node1 to
505 * low in node2.
507 be16_add_cpu(&node2->hdr.count, count);
508 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
509 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
510 btree_d = &node2->btree[0];
511 memcpy(btree_d, btree_s, tmp);
512 be16_add_cpu(&node1->hdr.count, -count);
513 } else {
515 * Move the req'd B-tree elements from low in node2 to
516 * high in node1.
518 count = -count;
519 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
520 btree_s = &node2->btree[0];
521 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
522 memcpy(btree_d, btree_s, tmp);
523 be16_add_cpu(&node1->hdr.count, count);
524 xfs_da_log_buf(tp, blk1->bp,
525 XFS_DA_LOGRANGE(node1, btree_d, tmp));
528 * Move elements in node2 down to fill the hole.
530 tmp = be16_to_cpu(node2->hdr.count) - count;
531 tmp *= (uint)sizeof(xfs_da_node_entry_t);
532 btree_s = &node2->btree[count];
533 btree_d = &node2->btree[0];
534 memmove(btree_d, btree_s, tmp);
535 be16_add_cpu(&node2->hdr.count, -count);
539 * Log header of node 1 and all current bits of node 2.
541 xfs_da_log_buf(tp, blk1->bp,
542 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
543 xfs_da_log_buf(tp, blk2->bp,
544 XFS_DA_LOGRANGE(node2, &node2->hdr,
545 sizeof(node2->hdr) +
546 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
549 * Record the last hashval from each block for upward propagation.
550 * (note: don't use the swapped node pointers)
552 node1 = blk1->bp->data;
553 node2 = blk2->bp->data;
554 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
555 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
558 * Adjust the expected index for insertion.
560 if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
561 blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
562 blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */
567 * Add a new entry to an intermediate node.
569 STATIC void
570 xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
571 xfs_da_state_blk_t *newblk)
573 xfs_da_intnode_t *node;
574 xfs_da_node_entry_t *btree;
575 int tmp;
577 node = oldblk->bp->data;
578 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
579 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
580 ASSERT(newblk->blkno != 0);
581 if (state->args->whichfork == XFS_DATA_FORK)
582 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
583 newblk->blkno < state->mp->m_dirfreeblk);
586 * We may need to make some room before we insert the new node.
588 tmp = 0;
589 btree = &node->btree[ oldblk->index ];
590 if (oldblk->index < be16_to_cpu(node->hdr.count)) {
591 tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
592 memmove(btree + 1, btree, tmp);
594 btree->hashval = cpu_to_be32(newblk->hashval);
595 btree->before = cpu_to_be32(newblk->blkno);
596 xfs_da_log_buf(state->args->trans, oldblk->bp,
597 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
598 be16_add_cpu(&node->hdr.count, 1);
599 xfs_da_log_buf(state->args->trans, oldblk->bp,
600 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
603 * Copy the last hash value from the oldblk to propagate upwards.
605 oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
608 /*========================================================================
609 * Routines used for shrinking the Btree.
610 *========================================================================*/
613 * Deallocate an empty leaf node, remove it from its parent,
614 * possibly deallocating that block, etc...
617 xfs_da_join(xfs_da_state_t *state)
619 xfs_da_state_blk_t *drop_blk, *save_blk;
620 int action, error;
622 action = 0;
623 drop_blk = &state->path.blk[ state->path.active-1 ];
624 save_blk = &state->altpath.blk[ state->path.active-1 ];
625 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
626 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
627 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
630 * Walk back up the tree joining/deallocating as necessary.
631 * When we stop dropping blocks, break out.
633 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
634 state->path.active--) {
636 * See if we can combine the block with a neighbor.
637 * (action == 0) => no options, just leave
638 * (action == 1) => coalesce, then unlink
639 * (action == 2) => block empty, unlink it
641 switch (drop_blk->magic) {
642 case XFS_ATTR_LEAF_MAGIC:
643 error = xfs_attr_leaf_toosmall(state, &action);
644 if (error)
645 return(error);
646 if (action == 0)
647 return(0);
648 xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
649 break;
650 case XFS_DIR2_LEAFN_MAGIC:
651 error = xfs_dir2_leafn_toosmall(state, &action);
652 if (error)
653 return error;
654 if (action == 0)
655 return 0;
656 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
657 break;
658 case XFS_DA_NODE_MAGIC:
660 * Remove the offending node, fixup hashvals,
661 * check for a toosmall neighbor.
663 xfs_da_node_remove(state, drop_blk);
664 xfs_da_fixhashpath(state, &state->path);
665 error = xfs_da_node_toosmall(state, &action);
666 if (error)
667 return(error);
668 if (action == 0)
669 return 0;
670 xfs_da_node_unbalance(state, drop_blk, save_blk);
671 break;
673 xfs_da_fixhashpath(state, &state->altpath);
674 error = xfs_da_blk_unlink(state, drop_blk, save_blk);
675 xfs_da_state_kill_altpath(state);
676 if (error)
677 return(error);
678 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
679 drop_blk->bp);
680 drop_blk->bp = NULL;
681 if (error)
682 return(error);
685 * We joined all the way to the top. If it turns out that
686 * we only have one entry in the root, make the child block
687 * the new root.
689 xfs_da_node_remove(state, drop_blk);
690 xfs_da_fixhashpath(state, &state->path);
691 error = xfs_da_root_join(state, &state->path.blk[0]);
692 return(error);
696 * We have only one entry in the root. Copy the only remaining child of
697 * the old root to block 0 as the new root node.
699 STATIC int
700 xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
702 xfs_da_intnode_t *oldroot;
703 /* REFERENCED */
704 xfs_da_blkinfo_t *blkinfo;
705 xfs_da_args_t *args;
706 xfs_dablk_t child;
707 xfs_dabuf_t *bp;
708 int error;
710 args = state->args;
711 ASSERT(args != NULL);
712 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
713 oldroot = root_blk->bp->data;
714 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
715 ASSERT(!oldroot->hdr.info.forw);
716 ASSERT(!oldroot->hdr.info.back);
719 * If the root has more than one child, then don't do anything.
721 if (be16_to_cpu(oldroot->hdr.count) > 1)
722 return(0);
725 * Read in the (only) child block, then copy those bytes into
726 * the root block's buffer and free the original child block.
728 child = be32_to_cpu(oldroot->btree[0].before);
729 ASSERT(child != 0);
730 error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
731 args->whichfork);
732 if (error)
733 return(error);
734 ASSERT(bp != NULL);
735 blkinfo = bp->data;
736 if (be16_to_cpu(oldroot->hdr.level) == 1) {
737 ASSERT(blkinfo->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
738 blkinfo->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
739 } else {
740 ASSERT(blkinfo->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
742 ASSERT(!blkinfo->forw);
743 ASSERT(!blkinfo->back);
744 memcpy(root_blk->bp->data, bp->data, state->blocksize);
745 xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
746 error = xfs_da_shrink_inode(args, child, bp);
747 return(error);
751 * Check a node block and its neighbors to see if the block should be
752 * collapsed into one or the other neighbor. Always keep the block
753 * with the smaller block number.
754 * If the current block is over 50% full, don't try to join it, return 0.
755 * If the block is empty, fill in the state structure and return 2.
756 * If it can be collapsed, fill in the state structure and return 1.
757 * If nothing can be done, return 0.
759 STATIC int
760 xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
762 xfs_da_intnode_t *node;
763 xfs_da_state_blk_t *blk;
764 xfs_da_blkinfo_t *info;
765 int count, forward, error, retval, i;
766 xfs_dablk_t blkno;
767 xfs_dabuf_t *bp;
770 * Check for the degenerate case of the block being over 50% full.
771 * If so, it's not worth even looking to see if we might be able
772 * to coalesce with a sibling.
774 blk = &state->path.blk[ state->path.active-1 ];
775 info = blk->bp->data;
776 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
777 node = (xfs_da_intnode_t *)info;
778 count = be16_to_cpu(node->hdr.count);
779 if (count > (state->node_ents >> 1)) {
780 *action = 0; /* blk over 50%, don't try to join */
781 return(0); /* blk over 50%, don't try to join */
785 * Check for the degenerate case of the block being empty.
786 * If the block is empty, we'll simply delete it, no need to
787 * coalesce it with a sibling block. We choose (arbitrarily)
788 * to merge with the forward block unless it is NULL.
790 if (count == 0) {
792 * Make altpath point to the block we want to keep and
793 * path point to the block we want to drop (this one).
795 forward = (info->forw != 0);
796 memcpy(&state->altpath, &state->path, sizeof(state->path));
797 error = xfs_da_path_shift(state, &state->altpath, forward,
798 0, &retval);
799 if (error)
800 return(error);
801 if (retval) {
802 *action = 0;
803 } else {
804 *action = 2;
806 return(0);
810 * Examine each sibling block to see if we can coalesce with
811 * at least 25% free space to spare. We need to figure out
812 * whether to merge with the forward or the backward block.
813 * We prefer coalescing with the lower numbered sibling so as
814 * to shrink a directory over time.
816 /* start with smaller blk num */
817 forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
818 for (i = 0; i < 2; forward = !forward, i++) {
819 if (forward)
820 blkno = be32_to_cpu(info->forw);
821 else
822 blkno = be32_to_cpu(info->back);
823 if (blkno == 0)
824 continue;
825 error = xfs_da_read_buf(state->args->trans, state->args->dp,
826 blkno, -1, &bp, state->args->whichfork);
827 if (error)
828 return(error);
829 ASSERT(bp != NULL);
831 node = (xfs_da_intnode_t *)info;
832 count = state->node_ents;
833 count -= state->node_ents >> 2;
834 count -= be16_to_cpu(node->hdr.count);
835 node = bp->data;
836 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
837 count -= be16_to_cpu(node->hdr.count);
838 xfs_da_brelse(state->args->trans, bp);
839 if (count >= 0)
840 break; /* fits with at least 25% to spare */
842 if (i >= 2) {
843 *action = 0;
844 return(0);
848 * Make altpath point to the block we want to keep (the lower
849 * numbered block) and path point to the block we want to drop.
851 memcpy(&state->altpath, &state->path, sizeof(state->path));
852 if (blkno < blk->blkno) {
853 error = xfs_da_path_shift(state, &state->altpath, forward,
854 0, &retval);
855 if (error) {
856 return(error);
858 if (retval) {
859 *action = 0;
860 return(0);
862 } else {
863 error = xfs_da_path_shift(state, &state->path, forward,
864 0, &retval);
865 if (error) {
866 return(error);
868 if (retval) {
869 *action = 0;
870 return(0);
873 *action = 1;
874 return(0);
878 * Walk back up the tree adjusting hash values as necessary,
879 * when we stop making changes, return.
881 void
882 xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
884 xfs_da_state_blk_t *blk;
885 xfs_da_intnode_t *node;
886 xfs_da_node_entry_t *btree;
887 xfs_dahash_t lasthash=0;
888 int level, count;
890 level = path->active-1;
891 blk = &path->blk[ level ];
892 switch (blk->magic) {
893 case XFS_ATTR_LEAF_MAGIC:
894 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
895 if (count == 0)
896 return;
897 break;
898 case XFS_DIR2_LEAFN_MAGIC:
899 lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
900 if (count == 0)
901 return;
902 break;
903 case XFS_DA_NODE_MAGIC:
904 lasthash = xfs_da_node_lasthash(blk->bp, &count);
905 if (count == 0)
906 return;
907 break;
909 for (blk--, level--; level >= 0; blk--, level--) {
910 node = blk->bp->data;
911 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
912 btree = &node->btree[ blk->index ];
913 if (be32_to_cpu(btree->hashval) == lasthash)
914 break;
915 blk->hashval = lasthash;
916 btree->hashval = cpu_to_be32(lasthash);
917 xfs_da_log_buf(state->args->trans, blk->bp,
918 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
920 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
925 * Remove an entry from an intermediate node.
927 STATIC void
928 xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
930 xfs_da_intnode_t *node;
931 xfs_da_node_entry_t *btree;
932 int tmp;
934 node = drop_blk->bp->data;
935 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
936 ASSERT(drop_blk->index >= 0);
939 * Copy over the offending entry, or just zero it out.
941 btree = &node->btree[drop_blk->index];
942 if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
943 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
944 tmp *= (uint)sizeof(xfs_da_node_entry_t);
945 memmove(btree, btree + 1, tmp);
946 xfs_da_log_buf(state->args->trans, drop_blk->bp,
947 XFS_DA_LOGRANGE(node, btree, tmp));
948 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
950 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
951 xfs_da_log_buf(state->args->trans, drop_blk->bp,
952 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
953 be16_add_cpu(&node->hdr.count, -1);
954 xfs_da_log_buf(state->args->trans, drop_blk->bp,
955 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
958 * Copy the last hash value from the block to propagate upwards.
960 btree--;
961 drop_blk->hashval = be32_to_cpu(btree->hashval);
965 * Unbalance the btree elements between two intermediate nodes,
966 * move all Btree elements from one node into another.
968 STATIC void
969 xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
970 xfs_da_state_blk_t *save_blk)
972 xfs_da_intnode_t *drop_node, *save_node;
973 xfs_da_node_entry_t *btree;
974 int tmp;
975 xfs_trans_t *tp;
977 drop_node = drop_blk->bp->data;
978 save_node = save_blk->bp->data;
979 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
980 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
981 tp = state->args->trans;
984 * If the dying block has lower hashvals, then move all the
985 * elements in the remaining block up to make a hole.
987 if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
988 (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
989 be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
991 btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
992 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
993 memmove(btree, &save_node->btree[0], tmp);
994 btree = &save_node->btree[0];
995 xfs_da_log_buf(tp, save_blk->bp,
996 XFS_DA_LOGRANGE(save_node, btree,
997 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
998 sizeof(xfs_da_node_entry_t)));
999 } else {
1000 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1001 xfs_da_log_buf(tp, save_blk->bp,
1002 XFS_DA_LOGRANGE(save_node, btree,
1003 be16_to_cpu(drop_node->hdr.count) *
1004 sizeof(xfs_da_node_entry_t)));
1008 * Move all the B-tree elements from drop_blk to save_blk.
1010 tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1011 memcpy(btree, &drop_node->btree[0], tmp);
1012 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1014 xfs_da_log_buf(tp, save_blk->bp,
1015 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1016 sizeof(save_node->hdr)));
1019 * Save the last hashval in the remaining block for upward propagation.
1021 save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1024 /*========================================================================
1025 * Routines used for finding things in the Btree.
1026 *========================================================================*/
1029 * Walk down the Btree looking for a particular filename, filling
1030 * in the state structure as we go.
1032 * We will set the state structure to point to each of the elements
1033 * in each of the nodes where either the hashval is or should be.
1035 * We support duplicate hashval's so for each entry in the current
1036 * node that could contain the desired hashval, descend. This is a
1037 * pruned depth-first tree search.
1039 int /* error */
1040 xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1042 xfs_da_state_blk_t *blk;
1043 xfs_da_blkinfo_t *curr;
1044 xfs_da_intnode_t *node;
1045 xfs_da_node_entry_t *btree;
1046 xfs_dablk_t blkno;
1047 int probe, span, max, error, retval;
1048 xfs_dahash_t hashval, btreehashval;
1049 xfs_da_args_t *args;
1051 args = state->args;
1054 * Descend thru the B-tree searching each level for the right
1055 * node to use, until the right hashval is found.
1057 blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1058 for (blk = &state->path.blk[0], state->path.active = 1;
1059 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1060 blk++, state->path.active++) {
1062 * Read the next node down in the tree.
1064 blk->blkno = blkno;
1065 error = xfs_da_read_buf(args->trans, args->dp, blkno,
1066 -1, &blk->bp, args->whichfork);
1067 if (error) {
1068 blk->blkno = 0;
1069 state->path.active--;
1070 return(error);
1072 curr = blk->bp->data;
1073 blk->magic = be16_to_cpu(curr->magic);
1074 ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1075 blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1076 blk->magic == XFS_ATTR_LEAF_MAGIC);
1079 * Search an intermediate node for a match.
1081 if (blk->magic == XFS_DA_NODE_MAGIC) {
1082 node = blk->bp->data;
1083 max = be16_to_cpu(node->hdr.count);
1084 blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1087 * Binary search. (note: small blocks will skip loop)
1089 probe = span = max / 2;
1090 hashval = args->hashval;
1091 for (btree = &node->btree[probe]; span > 4;
1092 btree = &node->btree[probe]) {
1093 span /= 2;
1094 btreehashval = be32_to_cpu(btree->hashval);
1095 if (btreehashval < hashval)
1096 probe += span;
1097 else if (btreehashval > hashval)
1098 probe -= span;
1099 else
1100 break;
1102 ASSERT((probe >= 0) && (probe < max));
1103 ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1106 * Since we may have duplicate hashval's, find the first
1107 * matching hashval in the node.
1109 while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1110 btree--;
1111 probe--;
1113 while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1114 btree++;
1115 probe++;
1119 * Pick the right block to descend on.
1121 if (probe == max) {
1122 blk->index = max-1;
1123 blkno = be32_to_cpu(node->btree[max-1].before);
1124 } else {
1125 blk->index = probe;
1126 blkno = be32_to_cpu(btree->before);
1128 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1129 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1130 break;
1131 } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1132 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1133 break;
1138 * A leaf block that ends in the hashval that we are interested in
1139 * (final hashval == search hashval) means that the next block may
1140 * contain more entries with the same hashval, shift upward to the
1141 * next leaf and keep searching.
1143 for (;;) {
1144 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1145 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1146 &blk->index, state);
1147 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1148 retval = xfs_attr_leaf_lookup_int(blk->bp, args);
1149 blk->index = args->index;
1150 args->blkno = blk->blkno;
1151 } else {
1152 ASSERT(0);
1153 return XFS_ERROR(EFSCORRUPTED);
1155 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1156 (blk->hashval == args->hashval)) {
1157 error = xfs_da_path_shift(state, &state->path, 1, 1,
1158 &retval);
1159 if (error)
1160 return(error);
1161 if (retval == 0) {
1162 continue;
1163 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1164 /* path_shift() gives ENOENT */
1165 retval = XFS_ERROR(ENOATTR);
1168 break;
1170 *result = retval;
1171 return(0);
1174 /*========================================================================
1175 * Utility routines.
1176 *========================================================================*/
1179 * Link a new block into a doubly linked list of blocks (of whatever type).
1181 int /* error */
1182 xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1183 xfs_da_state_blk_t *new_blk)
1185 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1186 xfs_da_args_t *args;
1187 int before=0, error;
1188 xfs_dabuf_t *bp;
1191 * Set up environment.
1193 args = state->args;
1194 ASSERT(args != NULL);
1195 old_info = old_blk->bp->data;
1196 new_info = new_blk->bp->data;
1197 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1198 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1199 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1200 ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
1201 ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
1202 ASSERT(old_blk->magic == new_blk->magic);
1204 switch (old_blk->magic) {
1205 case XFS_ATTR_LEAF_MAGIC:
1206 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1207 break;
1208 case XFS_DIR2_LEAFN_MAGIC:
1209 before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1210 break;
1211 case XFS_DA_NODE_MAGIC:
1212 before = xfs_da_node_order(old_blk->bp, new_blk->bp);
1213 break;
1217 * Link blocks in appropriate order.
1219 if (before) {
1221 * Link new block in before existing block.
1223 new_info->forw = cpu_to_be32(old_blk->blkno);
1224 new_info->back = old_info->back;
1225 if (old_info->back) {
1226 error = xfs_da_read_buf(args->trans, args->dp,
1227 be32_to_cpu(old_info->back),
1228 -1, &bp, args->whichfork);
1229 if (error)
1230 return(error);
1231 ASSERT(bp != NULL);
1232 tmp_info = bp->data;
1233 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1234 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1235 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1236 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1237 xfs_da_buf_done(bp);
1239 old_info->back = cpu_to_be32(new_blk->blkno);
1240 } else {
1242 * Link new block in after existing block.
1244 new_info->forw = old_info->forw;
1245 new_info->back = cpu_to_be32(old_blk->blkno);
1246 if (old_info->forw) {
1247 error = xfs_da_read_buf(args->trans, args->dp,
1248 be32_to_cpu(old_info->forw),
1249 -1, &bp, args->whichfork);
1250 if (error)
1251 return(error);
1252 ASSERT(bp != NULL);
1253 tmp_info = bp->data;
1254 ASSERT(tmp_info->magic == old_info->magic);
1255 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1256 tmp_info->back = cpu_to_be32(new_blk->blkno);
1257 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1258 xfs_da_buf_done(bp);
1260 old_info->forw = cpu_to_be32(new_blk->blkno);
1263 xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1264 xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1265 return(0);
1269 * Compare two intermediate nodes for "order".
1271 STATIC int
1272 xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1274 xfs_da_intnode_t *node1, *node2;
1276 node1 = node1_bp->data;
1277 node2 = node2_bp->data;
1278 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
1279 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1280 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
1281 ((be32_to_cpu(node2->btree[0].hashval) <
1282 be32_to_cpu(node1->btree[0].hashval)) ||
1283 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1284 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1285 return(1);
1287 return(0);
1291 * Pick up the last hashvalue from an intermediate node.
1293 STATIC uint
1294 xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
1296 xfs_da_intnode_t *node;
1298 node = bp->data;
1299 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1300 if (count)
1301 *count = be16_to_cpu(node->hdr.count);
1302 if (!node->hdr.count)
1303 return(0);
1304 return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1308 * Unlink a block from a doubly linked list of blocks.
1310 STATIC int /* error */
1311 xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1312 xfs_da_state_blk_t *save_blk)
1314 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1315 xfs_da_args_t *args;
1316 xfs_dabuf_t *bp;
1317 int error;
1320 * Set up environment.
1322 args = state->args;
1323 ASSERT(args != NULL);
1324 save_info = save_blk->bp->data;
1325 drop_info = drop_blk->bp->data;
1326 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1327 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1328 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1329 ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
1330 ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
1331 ASSERT(save_blk->magic == drop_blk->magic);
1332 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1333 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1334 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1335 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1338 * Unlink the leaf block from the doubly linked chain of leaves.
1340 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1341 save_info->back = drop_info->back;
1342 if (drop_info->back) {
1343 error = xfs_da_read_buf(args->trans, args->dp,
1344 be32_to_cpu(drop_info->back),
1345 -1, &bp, args->whichfork);
1346 if (error)
1347 return(error);
1348 ASSERT(bp != NULL);
1349 tmp_info = bp->data;
1350 ASSERT(tmp_info->magic == save_info->magic);
1351 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1352 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1353 xfs_da_log_buf(args->trans, bp, 0,
1354 sizeof(*tmp_info) - 1);
1355 xfs_da_buf_done(bp);
1357 } else {
1358 save_info->forw = drop_info->forw;
1359 if (drop_info->forw) {
1360 error = xfs_da_read_buf(args->trans, args->dp,
1361 be32_to_cpu(drop_info->forw),
1362 -1, &bp, args->whichfork);
1363 if (error)
1364 return(error);
1365 ASSERT(bp != NULL);
1366 tmp_info = bp->data;
1367 ASSERT(tmp_info->magic == save_info->magic);
1368 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1369 tmp_info->back = cpu_to_be32(save_blk->blkno);
1370 xfs_da_log_buf(args->trans, bp, 0,
1371 sizeof(*tmp_info) - 1);
1372 xfs_da_buf_done(bp);
1376 xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1377 return(0);
1381 * Move a path "forward" or "!forward" one block at the current level.
1383 * This routine will adjust a "path" to point to the next block
1384 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1385 * Btree, including updating pointers to the intermediate nodes between
1386 * the new bottom and the root.
1388 int /* error */
1389 xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1390 int forward, int release, int *result)
1392 xfs_da_state_blk_t *blk;
1393 xfs_da_blkinfo_t *info;
1394 xfs_da_intnode_t *node;
1395 xfs_da_args_t *args;
1396 xfs_dablk_t blkno=0;
1397 int level, error;
1400 * Roll up the Btree looking for the first block where our
1401 * current index is not at the edge of the block. Note that
1402 * we skip the bottom layer because we want the sibling block.
1404 args = state->args;
1405 ASSERT(args != NULL);
1406 ASSERT(path != NULL);
1407 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1408 level = (path->active-1) - 1; /* skip bottom layer in path */
1409 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1410 ASSERT(blk->bp != NULL);
1411 node = blk->bp->data;
1412 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1413 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1414 blk->index++;
1415 blkno = be32_to_cpu(node->btree[blk->index].before);
1416 break;
1417 } else if (!forward && (blk->index > 0)) {
1418 blk->index--;
1419 blkno = be32_to_cpu(node->btree[blk->index].before);
1420 break;
1423 if (level < 0) {
1424 *result = XFS_ERROR(ENOENT); /* we're out of our tree */
1425 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1426 return(0);
1430 * Roll down the edge of the subtree until we reach the
1431 * same depth we were at originally.
1433 for (blk++, level++; level < path->active; blk++, level++) {
1435 * Release the old block.
1436 * (if it's dirty, trans won't actually let go)
1438 if (release)
1439 xfs_da_brelse(args->trans, blk->bp);
1442 * Read the next child block.
1444 blk->blkno = blkno;
1445 error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
1446 &blk->bp, args->whichfork);
1447 if (error)
1448 return(error);
1449 ASSERT(blk->bp != NULL);
1450 info = blk->bp->data;
1451 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1452 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1453 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1454 blk->magic = be16_to_cpu(info->magic);
1455 if (blk->magic == XFS_DA_NODE_MAGIC) {
1456 node = (xfs_da_intnode_t *)info;
1457 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1458 if (forward)
1459 blk->index = 0;
1460 else
1461 blk->index = be16_to_cpu(node->hdr.count)-1;
1462 blkno = be32_to_cpu(node->btree[blk->index].before);
1463 } else {
1464 ASSERT(level == path->active-1);
1465 blk->index = 0;
1466 switch(blk->magic) {
1467 case XFS_ATTR_LEAF_MAGIC:
1468 blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1469 NULL);
1470 break;
1471 case XFS_DIR2_LEAFN_MAGIC:
1472 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1473 NULL);
1474 break;
1475 default:
1476 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
1477 blk->magic == XFS_DIR2_LEAFN_MAGIC);
1478 break;
1482 *result = 0;
1483 return(0);
1487 /*========================================================================
1488 * Utility routines.
1489 *========================================================================*/
1492 * Implement a simple hash on a character string.
1493 * Rotate the hash value by 7 bits, then XOR each character in.
1494 * This is implemented with some source-level loop unrolling.
1496 xfs_dahash_t
1497 xfs_da_hashname(const __uint8_t *name, int namelen)
1499 xfs_dahash_t hash;
1502 * Do four characters at a time as long as we can.
1504 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1505 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1506 (name[3] << 0) ^ rol32(hash, 7 * 4);
1509 * Now do the rest of the characters.
1511 switch (namelen) {
1512 case 3:
1513 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1514 rol32(hash, 7 * 3);
1515 case 2:
1516 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1517 case 1:
1518 return (name[0] << 0) ^ rol32(hash, 7 * 1);
1519 default: /* case 0: */
1520 return hash;
1524 enum xfs_dacmp
1525 xfs_da_compname(
1526 struct xfs_da_args *args,
1527 const unsigned char *name,
1528 int len)
1530 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1531 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1534 static xfs_dahash_t
1535 xfs_default_hashname(
1536 struct xfs_name *name)
1538 return xfs_da_hashname(name->name, name->len);
1541 const struct xfs_nameops xfs_default_nameops = {
1542 .hashname = xfs_default_hashname,
1543 .compname = xfs_da_compname
1547 xfs_da_grow_inode_int(
1548 struct xfs_da_args *args,
1549 xfs_fileoff_t *bno,
1550 int count)
1552 struct xfs_trans *tp = args->trans;
1553 struct xfs_inode *dp = args->dp;
1554 int w = args->whichfork;
1555 xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
1556 struct xfs_bmbt_irec map, *mapp;
1557 int nmap, error, got, i, mapi;
1560 * Find a spot in the file space to put the new block.
1562 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
1563 if (error)
1564 return error;
1567 * Try mapping it in one filesystem block.
1569 nmap = 1;
1570 ASSERT(args->firstblock != NULL);
1571 error = xfs_bmapi(tp, dp, *bno, count,
1572 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
1573 XFS_BMAPI_CONTIG,
1574 args->firstblock, args->total, &map, &nmap,
1575 args->flist);
1576 if (error)
1577 return error;
1579 ASSERT(nmap <= 1);
1580 if (nmap == 1) {
1581 mapp = &map;
1582 mapi = 1;
1583 } else if (nmap == 0 && count > 1) {
1584 xfs_fileoff_t b;
1585 int c;
1588 * If we didn't get it and the block might work if fragmented,
1589 * try without the CONTIG flag. Loop until we get it all.
1591 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
1592 for (b = *bno, mapi = 0; b < *bno + count; ) {
1593 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
1594 c = (int)(*bno + count - b);
1595 error = xfs_bmapi(tp, dp, b, c,
1596 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
1597 XFS_BMAPI_METADATA,
1598 args->firstblock, args->total,
1599 &mapp[mapi], &nmap, args->flist);
1600 if (error)
1601 goto out_free_map;
1602 if (nmap < 1)
1603 break;
1604 mapi += nmap;
1605 b = mapp[mapi - 1].br_startoff +
1606 mapp[mapi - 1].br_blockcount;
1608 } else {
1609 mapi = 0;
1610 mapp = NULL;
1614 * Count the blocks we got, make sure it matches the total.
1616 for (i = 0, got = 0; i < mapi; i++)
1617 got += mapp[i].br_blockcount;
1618 if (got != count || mapp[0].br_startoff != *bno ||
1619 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
1620 *bno + count) {
1621 error = XFS_ERROR(ENOSPC);
1622 goto out_free_map;
1625 /* account for newly allocated blocks in reserved blocks total */
1626 args->total -= dp->i_d.di_nblocks - nblks;
1628 out_free_map:
1629 if (mapp != &map)
1630 kmem_free(mapp);
1631 return error;
1635 * Add a block to the btree ahead of the file.
1636 * Return the new block number to the caller.
1639 xfs_da_grow_inode(
1640 struct xfs_da_args *args,
1641 xfs_dablk_t *new_blkno)
1643 xfs_fileoff_t bno;
1644 int count;
1645 int error;
1647 if (args->whichfork == XFS_DATA_FORK) {
1648 bno = args->dp->i_mount->m_dirleafblk;
1649 count = args->dp->i_mount->m_dirblkfsbs;
1650 } else {
1651 bno = 0;
1652 count = 1;
1655 error = xfs_da_grow_inode_int(args, &bno, count);
1656 if (!error)
1657 *new_blkno = (xfs_dablk_t)bno;
1658 return error;
1662 * Ick. We need to always be able to remove a btree block, even
1663 * if there's no space reservation because the filesystem is full.
1664 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1665 * It swaps the target block with the last block in the file. The
1666 * last block in the file can always be removed since it can't cause
1667 * a bmap btree split to do that.
1669 STATIC int
1670 xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1671 xfs_dabuf_t **dead_bufp)
1673 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1674 xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
1675 xfs_fileoff_t lastoff;
1676 xfs_inode_t *ip;
1677 xfs_trans_t *tp;
1678 xfs_mount_t *mp;
1679 int error, w, entno, level, dead_level;
1680 xfs_da_blkinfo_t *dead_info, *sib_info;
1681 xfs_da_intnode_t *par_node, *dead_node;
1682 xfs_dir2_leaf_t *dead_leaf2;
1683 xfs_dahash_t dead_hash;
1685 dead_buf = *dead_bufp;
1686 dead_blkno = *dead_blknop;
1687 tp = args->trans;
1688 ip = args->dp;
1689 w = args->whichfork;
1690 ASSERT(w == XFS_DATA_FORK);
1691 mp = ip->i_mount;
1692 lastoff = mp->m_dirfreeblk;
1693 error = xfs_bmap_last_before(tp, ip, &lastoff, w);
1694 if (error)
1695 return error;
1696 if (unlikely(lastoff == 0)) {
1697 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
1698 mp);
1699 return XFS_ERROR(EFSCORRUPTED);
1702 * Read the last block in the btree space.
1704 last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
1705 if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
1706 return error;
1708 * Copy the last block into the dead buffer and log it.
1710 memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
1711 xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1712 dead_info = dead_buf->data;
1714 * Get values from the moved block.
1716 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
1717 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
1718 dead_level = 0;
1719 dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
1720 } else {
1721 ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1722 dead_node = (xfs_da_intnode_t *)dead_info;
1723 dead_level = be16_to_cpu(dead_node->hdr.level);
1724 dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
1726 sib_buf = par_buf = NULL;
1728 * If the moved block has a left sibling, fix up the pointers.
1730 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1731 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1732 goto done;
1733 sib_info = sib_buf->data;
1734 if (unlikely(
1735 be32_to_cpu(sib_info->forw) != last_blkno ||
1736 sib_info->magic != dead_info->magic)) {
1737 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1738 XFS_ERRLEVEL_LOW, mp);
1739 error = XFS_ERROR(EFSCORRUPTED);
1740 goto done;
1742 sib_info->forw = cpu_to_be32(dead_blkno);
1743 xfs_da_log_buf(tp, sib_buf,
1744 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1745 sizeof(sib_info->forw)));
1746 xfs_da_buf_done(sib_buf);
1747 sib_buf = NULL;
1750 * If the moved block has a right sibling, fix up the pointers.
1752 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1753 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1754 goto done;
1755 sib_info = sib_buf->data;
1756 if (unlikely(
1757 be32_to_cpu(sib_info->back) != last_blkno ||
1758 sib_info->magic != dead_info->magic)) {
1759 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1760 XFS_ERRLEVEL_LOW, mp);
1761 error = XFS_ERROR(EFSCORRUPTED);
1762 goto done;
1764 sib_info->back = cpu_to_be32(dead_blkno);
1765 xfs_da_log_buf(tp, sib_buf,
1766 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1767 sizeof(sib_info->back)));
1768 xfs_da_buf_done(sib_buf);
1769 sib_buf = NULL;
1771 par_blkno = mp->m_dirleafblk;
1772 level = -1;
1774 * Walk down the tree looking for the parent of the moved block.
1776 for (;;) {
1777 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1778 goto done;
1779 par_node = par_buf->data;
1780 if (unlikely(par_node->hdr.info.magic !=
1781 cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1782 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
1783 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1784 XFS_ERRLEVEL_LOW, mp);
1785 error = XFS_ERROR(EFSCORRUPTED);
1786 goto done;
1788 level = be16_to_cpu(par_node->hdr.level);
1789 for (entno = 0;
1790 entno < be16_to_cpu(par_node->hdr.count) &&
1791 be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1792 entno++)
1793 continue;
1794 if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
1795 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1796 XFS_ERRLEVEL_LOW, mp);
1797 error = XFS_ERROR(EFSCORRUPTED);
1798 goto done;
1800 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1801 if (level == dead_level + 1)
1802 break;
1803 xfs_da_brelse(tp, par_buf);
1804 par_buf = NULL;
1807 * We're in the right parent block.
1808 * Look for the right entry.
1810 for (;;) {
1811 for (;
1812 entno < be16_to_cpu(par_node->hdr.count) &&
1813 be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1814 entno++)
1815 continue;
1816 if (entno < be16_to_cpu(par_node->hdr.count))
1817 break;
1818 par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1819 xfs_da_brelse(tp, par_buf);
1820 par_buf = NULL;
1821 if (unlikely(par_blkno == 0)) {
1822 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1823 XFS_ERRLEVEL_LOW, mp);
1824 error = XFS_ERROR(EFSCORRUPTED);
1825 goto done;
1827 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1828 goto done;
1829 par_node = par_buf->data;
1830 if (unlikely(
1831 be16_to_cpu(par_node->hdr.level) != level ||
1832 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
1833 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1834 XFS_ERRLEVEL_LOW, mp);
1835 error = XFS_ERROR(EFSCORRUPTED);
1836 goto done;
1838 entno = 0;
1841 * Update the parent entry pointing to the moved block.
1843 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1844 xfs_da_log_buf(tp, par_buf,
1845 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1846 sizeof(par_node->btree[entno].before)));
1847 xfs_da_buf_done(par_buf);
1848 xfs_da_buf_done(dead_buf);
1849 *dead_blknop = last_blkno;
1850 *dead_bufp = last_buf;
1851 return 0;
1852 done:
1853 if (par_buf)
1854 xfs_da_brelse(tp, par_buf);
1855 if (sib_buf)
1856 xfs_da_brelse(tp, sib_buf);
1857 xfs_da_brelse(tp, last_buf);
1858 return error;
1862 * Remove a btree block from a directory or attribute.
1865 xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1866 xfs_dabuf_t *dead_buf)
1868 xfs_inode_t *dp;
1869 int done, error, w, count;
1870 xfs_trans_t *tp;
1871 xfs_mount_t *mp;
1873 dp = args->dp;
1874 w = args->whichfork;
1875 tp = args->trans;
1876 mp = dp->i_mount;
1877 if (w == XFS_DATA_FORK)
1878 count = mp->m_dirblkfsbs;
1879 else
1880 count = 1;
1881 for (;;) {
1883 * Remove extents. If we get ENOSPC for a dir we have to move
1884 * the last block to the place we want to kill.
1886 if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
1887 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1888 0, args->firstblock, args->flist,
1889 &done)) == ENOSPC) {
1890 if (w != XFS_DATA_FORK)
1891 break;
1892 if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
1893 &dead_buf)))
1894 break;
1895 } else {
1896 break;
1899 xfs_da_binval(tp, dead_buf);
1900 return error;
1904 * See if the mapping(s) for this btree block are valid, i.e.
1905 * don't contain holes, are logically contiguous, and cover the whole range.
1907 STATIC int
1908 xfs_da_map_covers_blocks(
1909 int nmap,
1910 xfs_bmbt_irec_t *mapp,
1911 xfs_dablk_t bno,
1912 int count)
1914 int i;
1915 xfs_fileoff_t off;
1917 for (i = 0, off = bno; i < nmap; i++) {
1918 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
1919 mapp[i].br_startblock == DELAYSTARTBLOCK) {
1920 return 0;
1922 if (off != mapp[i].br_startoff) {
1923 return 0;
1925 off += mapp[i].br_blockcount;
1927 return off == bno + count;
1931 * Make a dabuf.
1932 * Used for get_buf, read_buf, read_bufr, and reada_buf.
1934 STATIC int
1935 xfs_da_do_buf(
1936 xfs_trans_t *trans,
1937 xfs_inode_t *dp,
1938 xfs_dablk_t bno,
1939 xfs_daddr_t *mappedbnop,
1940 xfs_dabuf_t **bpp,
1941 int whichfork,
1942 int caller)
1944 xfs_buf_t *bp = NULL;
1945 xfs_buf_t **bplist;
1946 int error=0;
1947 int i;
1948 xfs_bmbt_irec_t map;
1949 xfs_bmbt_irec_t *mapp;
1950 xfs_daddr_t mappedbno;
1951 xfs_mount_t *mp;
1952 int nbplist=0;
1953 int nfsb;
1954 int nmap;
1955 xfs_dabuf_t *rbp;
1957 mp = dp->i_mount;
1958 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
1959 mappedbno = *mappedbnop;
1961 * Caller doesn't have a mapping. -2 means don't complain
1962 * if we land in a hole.
1964 if (mappedbno == -1 || mappedbno == -2) {
1966 * Optimize the one-block case.
1968 if (nfsb == 1) {
1969 xfs_fsblock_t fsb;
1971 if ((error =
1972 xfs_bmapi_single(trans, dp, whichfork, &fsb,
1973 (xfs_fileoff_t)bno))) {
1974 return error;
1976 mapp = &map;
1977 if (fsb == NULLFSBLOCK) {
1978 nmap = 0;
1979 } else {
1980 map.br_startblock = fsb;
1981 map.br_startoff = (xfs_fileoff_t)bno;
1982 map.br_blockcount = 1;
1983 nmap = 1;
1985 } else {
1986 mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
1987 nmap = nfsb;
1988 if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno,
1989 nfsb,
1990 XFS_BMAPI_METADATA |
1991 xfs_bmapi_aflag(whichfork),
1992 NULL, 0, mapp, &nmap, NULL)))
1993 goto exit0;
1995 } else {
1996 map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
1997 map.br_startoff = (xfs_fileoff_t)bno;
1998 map.br_blockcount = nfsb;
1999 mapp = &map;
2000 nmap = 1;
2002 if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
2003 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
2004 if (unlikely(error == EFSCORRUPTED)) {
2005 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2006 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2007 __func__, (long long)bno,
2008 (long long)dp->i_ino);
2009 for (i = 0; i < nmap; i++) {
2010 xfs_alert(mp,
2011 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2013 (long long)mapp[i].br_startoff,
2014 (long long)mapp[i].br_startblock,
2015 (long long)mapp[i].br_blockcount,
2016 mapp[i].br_state);
2019 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2020 XFS_ERRLEVEL_LOW, mp);
2022 goto exit0;
2024 if (caller != 3 && nmap > 1) {
2025 bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
2026 nbplist = 0;
2027 } else
2028 bplist = NULL;
2030 * Turn the mapping(s) into buffer(s).
2032 for (i = 0; i < nmap; i++) {
2033 int nmapped;
2035 mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
2036 if (i == 0)
2037 *mappedbnop = mappedbno;
2038 nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
2039 switch (caller) {
2040 case 0:
2041 bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
2042 mappedbno, nmapped, 0);
2043 error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO);
2044 break;
2045 case 1:
2046 case 2:
2047 bp = NULL;
2048 error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
2049 mappedbno, nmapped, 0, &bp);
2050 break;
2051 case 3:
2052 xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
2053 error = 0;
2054 bp = NULL;
2055 break;
2057 if (error) {
2058 if (bp)
2059 xfs_trans_brelse(trans, bp);
2060 goto exit1;
2062 if (!bp)
2063 continue;
2064 if (caller == 1) {
2065 if (whichfork == XFS_ATTR_FORK) {
2066 XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE,
2067 XFS_ATTR_BTREE_REF);
2068 } else {
2069 XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE,
2070 XFS_DIR_BTREE_REF);
2073 if (bplist) {
2074 bplist[nbplist++] = bp;
2078 * Build a dabuf structure.
2080 if (bplist) {
2081 rbp = xfs_da_buf_make(nbplist, bplist);
2082 } else if (bp)
2083 rbp = xfs_da_buf_make(1, &bp);
2084 else
2085 rbp = NULL;
2087 * For read_buf, check the magic number.
2089 if (caller == 1) {
2090 xfs_dir2_data_hdr_t *hdr = rbp->data;
2091 xfs_dir2_free_t *free = rbp->data;
2092 xfs_da_blkinfo_t *info = rbp->data;
2093 uint magic, magic1;
2095 magic = be16_to_cpu(info->magic);
2096 magic1 = be32_to_cpu(hdr->magic);
2097 if (unlikely(
2098 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2099 (magic != XFS_ATTR_LEAF_MAGIC) &&
2100 (magic != XFS_DIR2_LEAF1_MAGIC) &&
2101 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2102 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2103 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2104 (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
2105 mp, XFS_ERRTAG_DA_READ_BUF,
2106 XFS_RANDOM_DA_READ_BUF))) {
2107 trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
2108 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2109 XFS_ERRLEVEL_LOW, mp, info);
2110 error = XFS_ERROR(EFSCORRUPTED);
2111 xfs_da_brelse(trans, rbp);
2112 nbplist = 0;
2113 goto exit1;
2116 if (bplist) {
2117 kmem_free(bplist);
2119 if (mapp != &map) {
2120 kmem_free(mapp);
2122 if (bpp)
2123 *bpp = rbp;
2124 return 0;
2125 exit1:
2126 if (bplist) {
2127 for (i = 0; i < nbplist; i++)
2128 xfs_trans_brelse(trans, bplist[i]);
2129 kmem_free(bplist);
2131 exit0:
2132 if (mapp != &map)
2133 kmem_free(mapp);
2134 if (bpp)
2135 *bpp = NULL;
2136 return error;
2140 * Get a buffer for the dir/attr block.
2143 xfs_da_get_buf(
2144 xfs_trans_t *trans,
2145 xfs_inode_t *dp,
2146 xfs_dablk_t bno,
2147 xfs_daddr_t mappedbno,
2148 xfs_dabuf_t **bpp,
2149 int whichfork)
2151 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
2155 * Get a buffer for the dir/attr block, fill in the contents.
2158 xfs_da_read_buf(
2159 xfs_trans_t *trans,
2160 xfs_inode_t *dp,
2161 xfs_dablk_t bno,
2162 xfs_daddr_t mappedbno,
2163 xfs_dabuf_t **bpp,
2164 int whichfork)
2166 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
2170 * Readahead the dir/attr block.
2172 xfs_daddr_t
2173 xfs_da_reada_buf(
2174 xfs_trans_t *trans,
2175 xfs_inode_t *dp,
2176 xfs_dablk_t bno,
2177 int whichfork)
2179 xfs_daddr_t rval;
2181 rval = -1;
2182 if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
2183 return -1;
2184 else
2185 return rval;
2188 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
2189 kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
2192 * Allocate a dir-state structure.
2193 * We don't put them on the stack since they're large.
2195 xfs_da_state_t *
2196 xfs_da_state_alloc(void)
2198 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
2202 * Kill the altpath contents of a da-state structure.
2204 STATIC void
2205 xfs_da_state_kill_altpath(xfs_da_state_t *state)
2207 int i;
2209 for (i = 0; i < state->altpath.active; i++) {
2210 if (state->altpath.blk[i].bp) {
2211 if (state->altpath.blk[i].bp != state->path.blk[i].bp)
2212 xfs_da_buf_done(state->altpath.blk[i].bp);
2213 state->altpath.blk[i].bp = NULL;
2216 state->altpath.active = 0;
2220 * Free a da-state structure.
2222 void
2223 xfs_da_state_free(xfs_da_state_t *state)
2225 int i;
2227 xfs_da_state_kill_altpath(state);
2228 for (i = 0; i < state->path.active; i++) {
2229 if (state->path.blk[i].bp)
2230 xfs_da_buf_done(state->path.blk[i].bp);
2232 if (state->extravalid && state->extrablk.bp)
2233 xfs_da_buf_done(state->extrablk.bp);
2234 #ifdef DEBUG
2235 memset((char *)state, 0, sizeof(*state));
2236 #endif /* DEBUG */
2237 kmem_zone_free(xfs_da_state_zone, state);
2241 * Create a dabuf.
2243 /* ARGSUSED */
2244 STATIC xfs_dabuf_t *
2245 xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
2247 xfs_buf_t *bp;
2248 xfs_dabuf_t *dabuf;
2249 int i;
2250 int off;
2252 if (nbuf == 1)
2253 dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
2254 else
2255 dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
2256 dabuf->dirty = 0;
2257 if (nbuf == 1) {
2258 dabuf->nbuf = 1;
2259 bp = bps[0];
2260 dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp));
2261 dabuf->data = XFS_BUF_PTR(bp);
2262 dabuf->bps[0] = bp;
2263 } else {
2264 dabuf->nbuf = nbuf;
2265 for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
2266 dabuf->bps[i] = bp = bps[i];
2267 dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp));
2269 dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
2270 for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
2271 bp = bps[i];
2272 memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp),
2273 XFS_BUF_COUNT(bp));
2276 return dabuf;
2280 * Un-dirty a dabuf.
2282 STATIC void
2283 xfs_da_buf_clean(xfs_dabuf_t *dabuf)
2285 xfs_buf_t *bp;
2286 int i;
2287 int off;
2289 if (dabuf->dirty) {
2290 ASSERT(dabuf->nbuf > 1);
2291 dabuf->dirty = 0;
2292 for (i = off = 0; i < dabuf->nbuf;
2293 i++, off += XFS_BUF_COUNT(bp)) {
2294 bp = dabuf->bps[i];
2295 memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off,
2296 XFS_BUF_COUNT(bp));
2302 * Release a dabuf.
2304 void
2305 xfs_da_buf_done(xfs_dabuf_t *dabuf)
2307 ASSERT(dabuf);
2308 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2309 if (dabuf->dirty)
2310 xfs_da_buf_clean(dabuf);
2311 if (dabuf->nbuf > 1) {
2312 kmem_free(dabuf->data);
2313 kmem_free(dabuf);
2314 } else {
2315 kmem_zone_free(xfs_dabuf_zone, dabuf);
2320 * Log transaction from a dabuf.
2322 void
2323 xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
2325 xfs_buf_t *bp;
2326 uint f;
2327 int i;
2328 uint l;
2329 int off;
2331 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2332 if (dabuf->nbuf == 1) {
2333 ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0]));
2334 xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
2335 return;
2337 dabuf->dirty = 1;
2338 ASSERT(first <= last);
2339 for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) {
2340 bp = dabuf->bps[i];
2341 f = off;
2342 l = f + XFS_BUF_COUNT(bp) - 1;
2343 if (f < first)
2344 f = first;
2345 if (l > last)
2346 l = last;
2347 if (f <= l)
2348 xfs_trans_log_buf(tp, bp, f - off, l - off);
2350 * B_DONE is set by xfs_trans_log buf.
2351 * If we don't set it on a new buffer (get not read)
2352 * then if we don't put anything in the buffer it won't
2353 * be set, and at commit it it released into the cache,
2354 * and then a read will fail.
2356 else if (!(XFS_BUF_ISDONE(bp)))
2357 XFS_BUF_DONE(bp);
2359 ASSERT(last < off);
2363 * Release dabuf from a transaction.
2364 * Have to free up the dabuf before the buffers are released,
2365 * since the synchronization on the dabuf is really the lock on the buffer.
2367 void
2368 xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2370 xfs_buf_t *bp;
2371 xfs_buf_t **bplist;
2372 int i;
2373 int nbuf;
2375 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2376 if ((nbuf = dabuf->nbuf) == 1) {
2377 bplist = &bp;
2378 bp = dabuf->bps[0];
2379 } else {
2380 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2381 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2383 xfs_da_buf_done(dabuf);
2384 for (i = 0; i < nbuf; i++)
2385 xfs_trans_brelse(tp, bplist[i]);
2386 if (bplist != &bp)
2387 kmem_free(bplist);
2391 * Invalidate dabuf from a transaction.
2393 void
2394 xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2396 xfs_buf_t *bp;
2397 xfs_buf_t **bplist;
2398 int i;
2399 int nbuf;
2401 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2402 if ((nbuf = dabuf->nbuf) == 1) {
2403 bplist = &bp;
2404 bp = dabuf->bps[0];
2405 } else {
2406 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2407 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2409 xfs_da_buf_done(dabuf);
2410 for (i = 0; i < nbuf; i++)
2411 xfs_trans_binval(tp, bplist[i]);
2412 if (bplist != &bp)
2413 kmem_free(bplist);
2417 * Get the first daddr from a dabuf.
2419 xfs_daddr_t
2420 xfs_da_blkno(xfs_dabuf_t *dabuf)
2422 ASSERT(dabuf->nbuf);
2423 ASSERT(dabuf->data);
2424 return XFS_BUF_ADDR(dabuf->bps[0]);