2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
30 #include "xfs_dir2_priv.h"
31 #include "xfs_inode.h"
32 #include "xfs_trans.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_alloc.h"
37 #include "xfs_attr_leaf.h"
38 #include "xfs_error.h"
39 #include "xfs_trace.h"
40 #include "xfs_cksum.h"
41 #include "xfs_buf_item.h"
46 * Routines to implement directories as Btrees of hashed names.
49 /*========================================================================
50 * Function prototypes for the kernel.
51 *========================================================================*/
54 * Routines used for growing the Btree.
56 STATIC
int xfs_da3_root_split(xfs_da_state_t
*state
,
57 xfs_da_state_blk_t
*existing_root
,
58 xfs_da_state_blk_t
*new_child
);
59 STATIC
int xfs_da3_node_split(xfs_da_state_t
*state
,
60 xfs_da_state_blk_t
*existing_blk
,
61 xfs_da_state_blk_t
*split_blk
,
62 xfs_da_state_blk_t
*blk_to_add
,
65 STATIC
void xfs_da3_node_rebalance(xfs_da_state_t
*state
,
66 xfs_da_state_blk_t
*node_blk_1
,
67 xfs_da_state_blk_t
*node_blk_2
);
68 STATIC
void xfs_da3_node_add(xfs_da_state_t
*state
,
69 xfs_da_state_blk_t
*old_node_blk
,
70 xfs_da_state_blk_t
*new_node_blk
);
73 * Routines used for shrinking the Btree.
75 STATIC
int xfs_da3_root_join(xfs_da_state_t
*state
,
76 xfs_da_state_blk_t
*root_blk
);
77 STATIC
int xfs_da3_node_toosmall(xfs_da_state_t
*state
, int *retval
);
78 STATIC
void xfs_da3_node_remove(xfs_da_state_t
*state
,
79 xfs_da_state_blk_t
*drop_blk
);
80 STATIC
void xfs_da3_node_unbalance(xfs_da_state_t
*state
,
81 xfs_da_state_blk_t
*src_node_blk
,
82 xfs_da_state_blk_t
*dst_node_blk
);
87 STATIC
int xfs_da3_blk_unlink(xfs_da_state_t
*state
,
88 xfs_da_state_blk_t
*drop_blk
,
89 xfs_da_state_blk_t
*save_blk
);
92 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
95 * Allocate a dir-state structure.
96 * We don't put them on the stack since they're large.
99 xfs_da_state_alloc(void)
101 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
105 * Kill the altpath contents of a da-state structure.
108 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
112 for (i
= 0; i
< state
->altpath
.active
; i
++)
113 state
->altpath
.blk
[i
].bp
= NULL
;
114 state
->altpath
.active
= 0;
118 * Free a da-state structure.
121 xfs_da_state_free(xfs_da_state_t
*state
)
123 xfs_da_state_kill_altpath(state
);
125 memset((char *)state
, 0, sizeof(*state
));
127 kmem_zone_free(xfs_da_state_zone
, state
);
134 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
135 struct xfs_da_intnode
*hdr
= bp
->b_addr
;
136 struct xfs_da3_icnode_hdr ichdr
;
137 const struct xfs_dir_ops
*ops
;
139 ops
= xfs_dir_get_ops(mp
, NULL
);
141 ops
->node_hdr_from_disk(&ichdr
, hdr
);
143 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
144 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
146 if (ichdr
.magic
!= XFS_DA3_NODE_MAGIC
)
149 if (!uuid_equal(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_uuid
))
151 if (be64_to_cpu(hdr3
->info
.blkno
) != bp
->b_bn
)
154 if (ichdr
.magic
!= XFS_DA_NODE_MAGIC
)
157 if (ichdr
.level
== 0)
159 if (ichdr
.level
> XFS_DA_NODE_MAXDEPTH
)
161 if (ichdr
.count
== 0)
165 * we don't know if the node is for and attribute or directory tree,
166 * so only fail if the count is outside both bounds
168 if (ichdr
.count
> mp
->m_dir_geo
->node_ents
&&
169 ichdr
.count
> mp
->m_attr_geo
->node_ents
)
172 /* XXX: hash order check? */
178 xfs_da3_node_write_verify(
181 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
182 struct xfs_buf_log_item
*bip
= bp
->b_fspriv
;
183 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
185 if (!xfs_da3_node_verify(bp
)) {
186 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
187 xfs_verifier_error(bp
);
191 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
195 hdr3
->info
.lsn
= cpu_to_be64(bip
->bli_item
.li_lsn
);
197 xfs_buf_update_cksum(bp
, XFS_DA3_NODE_CRC_OFF
);
201 * leaf/node format detection on trees is sketchy, so a node read can be done on
202 * leaf level blocks when detection identifies the tree as a node format tree
203 * incorrectly. In this case, we need to swap the verifier to match the correct
204 * format of the block being read.
207 xfs_da3_node_read_verify(
210 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
212 switch (be16_to_cpu(info
->magic
)) {
213 case XFS_DA3_NODE_MAGIC
:
214 if (!xfs_buf_verify_cksum(bp
, XFS_DA3_NODE_CRC_OFF
)) {
215 xfs_buf_ioerror(bp
, -EFSBADCRC
);
219 case XFS_DA_NODE_MAGIC
:
220 if (!xfs_da3_node_verify(bp
)) {
221 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
225 case XFS_ATTR_LEAF_MAGIC
:
226 case XFS_ATTR3_LEAF_MAGIC
:
227 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
228 bp
->b_ops
->verify_read(bp
);
230 case XFS_DIR2_LEAFN_MAGIC
:
231 case XFS_DIR3_LEAFN_MAGIC
:
232 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
233 bp
->b_ops
->verify_read(bp
);
240 xfs_verifier_error(bp
);
243 const struct xfs_buf_ops xfs_da3_node_buf_ops
= {
244 .verify_read
= xfs_da3_node_read_verify
,
245 .verify_write
= xfs_da3_node_write_verify
,
250 struct xfs_trans
*tp
,
251 struct xfs_inode
*dp
,
253 xfs_daddr_t mappedbno
,
254 struct xfs_buf
**bpp
,
259 err
= xfs_da_read_buf(tp
, dp
, bno
, mappedbno
, bpp
,
260 which_fork
, &xfs_da3_node_buf_ops
);
262 struct xfs_da_blkinfo
*info
= (*bpp
)->b_addr
;
265 switch (be16_to_cpu(info
->magic
)) {
266 case XFS_DA_NODE_MAGIC
:
267 case XFS_DA3_NODE_MAGIC
:
268 type
= XFS_BLFT_DA_NODE_BUF
;
270 case XFS_ATTR_LEAF_MAGIC
:
271 case XFS_ATTR3_LEAF_MAGIC
:
272 type
= XFS_BLFT_ATTR_LEAF_BUF
;
274 case XFS_DIR2_LEAFN_MAGIC
:
275 case XFS_DIR3_LEAFN_MAGIC
:
276 type
= XFS_BLFT_DIR_LEAFN_BUF
;
283 xfs_trans_buf_set_type(tp
, *bpp
, type
);
288 /*========================================================================
289 * Routines used for growing the Btree.
290 *========================================================================*/
293 * Create the initial contents of an intermediate node.
297 struct xfs_da_args
*args
,
300 struct xfs_buf
**bpp
,
303 struct xfs_da_intnode
*node
;
304 struct xfs_trans
*tp
= args
->trans
;
305 struct xfs_mount
*mp
= tp
->t_mountp
;
306 struct xfs_da3_icnode_hdr ichdr
= {0};
309 struct xfs_inode
*dp
= args
->dp
;
311 trace_xfs_da_node_create(args
);
312 ASSERT(level
<= XFS_DA_NODE_MAXDEPTH
);
314 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, whichfork
);
317 bp
->b_ops
= &xfs_da3_node_buf_ops
;
318 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
321 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
322 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
324 ichdr
.magic
= XFS_DA3_NODE_MAGIC
;
325 hdr3
->info
.blkno
= cpu_to_be64(bp
->b_bn
);
326 hdr3
->info
.owner
= cpu_to_be64(args
->dp
->i_ino
);
327 uuid_copy(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_uuid
);
329 ichdr
.magic
= XFS_DA_NODE_MAGIC
;
333 dp
->d_ops
->node_hdr_to_disk(node
, &ichdr
);
334 xfs_trans_log_buf(tp
, bp
,
335 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
342 * Split a leaf node, rebalance, then possibly split
343 * intermediate nodes, rebalance, etc.
347 struct xfs_da_state
*state
)
349 struct xfs_da_state_blk
*oldblk
;
350 struct xfs_da_state_blk
*newblk
;
351 struct xfs_da_state_blk
*addblk
;
352 struct xfs_da_intnode
*node
;
359 trace_xfs_da_split(state
->args
);
362 * Walk back up the tree splitting/inserting/adjusting as necessary.
363 * If we need to insert and there isn't room, split the node, then
364 * decide which fragment to insert the new block from below into.
365 * Note that we may split the root this way, but we need more fixup.
367 max
= state
->path
.active
- 1;
368 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
369 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
370 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
372 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
373 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
374 oldblk
= &state
->path
.blk
[i
];
375 newblk
= &state
->altpath
.blk
[i
];
378 * If a leaf node then
379 * Allocate a new leaf node, then rebalance across them.
380 * else if an intermediate node then
381 * We split on the last layer, must we split the node?
383 switch (oldblk
->magic
) {
384 case XFS_ATTR_LEAF_MAGIC
:
385 error
= xfs_attr3_leaf_split(state
, oldblk
, newblk
);
386 if ((error
!= 0) && (error
!= -ENOSPC
)) {
387 return error
; /* GROT: attr is inconsistent */
394 * Entry wouldn't fit, split the leaf again.
396 state
->extravalid
= 1;
398 state
->extraafter
= 0; /* before newblk */
399 trace_xfs_attr_leaf_split_before(state
->args
);
400 error
= xfs_attr3_leaf_split(state
, oldblk
,
403 state
->extraafter
= 1; /* after newblk */
404 trace_xfs_attr_leaf_split_after(state
->args
);
405 error
= xfs_attr3_leaf_split(state
, newblk
,
409 return error
; /* GROT: attr inconsistent */
412 case XFS_DIR2_LEAFN_MAGIC
:
413 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
418 case XFS_DA_NODE_MAGIC
:
419 error
= xfs_da3_node_split(state
, oldblk
, newblk
, addblk
,
423 return error
; /* GROT: dir is inconsistent */
425 * Record the newly split block for the next time thru?
435 * Update the btree to show the new hashval for this child.
437 xfs_da3_fixhashpath(state
, &state
->path
);
443 * Split the root node.
445 ASSERT(state
->path
.active
== 0);
446 oldblk
= &state
->path
.blk
[0];
447 error
= xfs_da3_root_split(state
, oldblk
, addblk
);
450 return error
; /* GROT: dir is inconsistent */
454 * Update pointers to the node which used to be block 0 and
455 * just got bumped because of the addition of a new root node.
456 * There might be three blocks involved if a double split occurred,
457 * and the original block 0 could be at any position in the list.
459 * Note: the magic numbers and sibling pointers are in the same
460 * physical place for both v2 and v3 headers (by design). Hence it
461 * doesn't matter which version of the xfs_da_intnode structure we use
462 * here as the result will be the same using either structure.
464 node
= oldblk
->bp
->b_addr
;
465 if (node
->hdr
.info
.forw
) {
466 if (be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
) {
469 ASSERT(state
->extravalid
);
470 bp
= state
->extrablk
.bp
;
473 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
474 xfs_trans_log_buf(state
->args
->trans
, bp
,
475 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
476 sizeof(node
->hdr
.info
)));
478 node
= oldblk
->bp
->b_addr
;
479 if (node
->hdr
.info
.back
) {
480 if (be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
) {
483 ASSERT(state
->extravalid
);
484 bp
= state
->extrablk
.bp
;
487 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
488 xfs_trans_log_buf(state
->args
->trans
, bp
,
489 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
490 sizeof(node
->hdr
.info
)));
497 * Split the root. We have to create a new root and point to the two
498 * parts (the split old root) that we just created. Copy block zero to
499 * the EOF, extending the inode in process.
501 STATIC
int /* error */
503 struct xfs_da_state
*state
,
504 struct xfs_da_state_blk
*blk1
,
505 struct xfs_da_state_blk
*blk2
)
507 struct xfs_da_intnode
*node
;
508 struct xfs_da_intnode
*oldroot
;
509 struct xfs_da_node_entry
*btree
;
510 struct xfs_da3_icnode_hdr nodehdr
;
511 struct xfs_da_args
*args
;
513 struct xfs_inode
*dp
;
514 struct xfs_trans
*tp
;
515 struct xfs_dir2_leaf
*leaf
;
521 trace_xfs_da_root_split(state
->args
);
524 * Copy the existing (incorrect) block from the root node position
525 * to a free space somewhere.
528 error
= xfs_da_grow_inode(args
, &blkno
);
534 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
538 oldroot
= blk1
->bp
->b_addr
;
539 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
540 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
)) {
541 struct xfs_da3_icnode_hdr icnodehdr
;
543 dp
->d_ops
->node_hdr_from_disk(&icnodehdr
, oldroot
);
544 btree
= dp
->d_ops
->node_tree_p(oldroot
);
545 size
= (int)((char *)&btree
[icnodehdr
.count
] - (char *)oldroot
);
546 level
= icnodehdr
.level
;
549 * we are about to copy oldroot to bp, so set up the type
550 * of bp while we know exactly what it will be.
552 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
554 struct xfs_dir3_icleaf_hdr leafhdr
;
555 struct xfs_dir2_leaf_entry
*ents
;
557 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
558 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, leaf
);
559 ents
= dp
->d_ops
->leaf_ents_p(leaf
);
561 ASSERT(leafhdr
.magic
== XFS_DIR2_LEAFN_MAGIC
||
562 leafhdr
.magic
== XFS_DIR3_LEAFN_MAGIC
);
563 size
= (int)((char *)&ents
[leafhdr
.count
] - (char *)leaf
);
567 * we are about to copy oldroot to bp, so set up the type
568 * of bp while we know exactly what it will be.
570 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DIR_LEAFN_BUF
);
574 * we can copy most of the information in the node from one block to
575 * another, but for CRC enabled headers we have to make sure that the
576 * block specific identifiers are kept intact. We update the buffer
579 memcpy(node
, oldroot
, size
);
580 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
581 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
582 struct xfs_da3_intnode
*node3
= (struct xfs_da3_intnode
*)node
;
584 node3
->hdr
.info
.blkno
= cpu_to_be64(bp
->b_bn
);
586 xfs_trans_log_buf(tp
, bp
, 0, size
- 1);
588 bp
->b_ops
= blk1
->bp
->b_ops
;
589 xfs_trans_buf_copy_type(bp
, blk1
->bp
);
594 * Set up the new root node.
596 error
= xfs_da3_node_create(args
,
597 (args
->whichfork
== XFS_DATA_FORK
) ? args
->geo
->leafblk
: 0,
598 level
+ 1, &bp
, args
->whichfork
);
603 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
604 btree
= dp
->d_ops
->node_tree_p(node
);
605 btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
606 btree
[0].before
= cpu_to_be32(blk1
->blkno
);
607 btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
608 btree
[1].before
= cpu_to_be32(blk2
->blkno
);
610 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
613 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
614 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
615 ASSERT(blk1
->blkno
>= args
->geo
->leafblk
&&
616 blk1
->blkno
< args
->geo
->freeblk
);
617 ASSERT(blk2
->blkno
>= args
->geo
->leafblk
&&
618 blk2
->blkno
< args
->geo
->freeblk
);
622 /* Header is already logged by xfs_da_node_create */
623 xfs_trans_log_buf(tp
, bp
,
624 XFS_DA_LOGRANGE(node
, btree
, sizeof(xfs_da_node_entry_t
) * 2));
630 * Split the node, rebalance, then add the new entry.
632 STATIC
int /* error */
634 struct xfs_da_state
*state
,
635 struct xfs_da_state_blk
*oldblk
,
636 struct xfs_da_state_blk
*newblk
,
637 struct xfs_da_state_blk
*addblk
,
641 struct xfs_da_intnode
*node
;
642 struct xfs_da3_icnode_hdr nodehdr
;
647 struct xfs_inode
*dp
= state
->args
->dp
;
649 trace_xfs_da_node_split(state
->args
);
651 node
= oldblk
->bp
->b_addr
;
652 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
655 * With V2 dirs the extra block is data or freespace.
657 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
658 newcount
= 1 + useextra
;
660 * Do we have to split the node?
662 if (nodehdr
.count
+ newcount
> state
->args
->geo
->node_ents
) {
664 * Allocate a new node, add to the doubly linked chain of
665 * nodes, then move some of our excess entries into it.
667 error
= xfs_da_grow_inode(state
->args
, &blkno
);
669 return error
; /* GROT: dir is inconsistent */
671 error
= xfs_da3_node_create(state
->args
, blkno
, treelevel
,
672 &newblk
->bp
, state
->args
->whichfork
);
674 return error
; /* GROT: dir is inconsistent */
675 newblk
->blkno
= blkno
;
676 newblk
->magic
= XFS_DA_NODE_MAGIC
;
677 xfs_da3_node_rebalance(state
, oldblk
, newblk
);
678 error
= xfs_da3_blk_link(state
, oldblk
, newblk
);
687 * Insert the new entry(s) into the correct block
688 * (updating last hashval in the process).
690 * xfs_da3_node_add() inserts BEFORE the given index,
691 * and as a result of using node_lookup_int() we always
692 * point to a valid entry (not after one), but a split
693 * operation always results in a new block whose hashvals
694 * FOLLOW the current block.
696 * If we had double-split op below us, then add the extra block too.
698 node
= oldblk
->bp
->b_addr
;
699 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
700 if (oldblk
->index
<= nodehdr
.count
) {
702 xfs_da3_node_add(state
, oldblk
, addblk
);
704 if (state
->extraafter
)
706 xfs_da3_node_add(state
, oldblk
, &state
->extrablk
);
707 state
->extravalid
= 0;
711 xfs_da3_node_add(state
, newblk
, addblk
);
713 if (state
->extraafter
)
715 xfs_da3_node_add(state
, newblk
, &state
->extrablk
);
716 state
->extravalid
= 0;
724 * Balance the btree elements between two intermediate nodes,
725 * usually one full and one empty.
727 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
730 xfs_da3_node_rebalance(
731 struct xfs_da_state
*state
,
732 struct xfs_da_state_blk
*blk1
,
733 struct xfs_da_state_blk
*blk2
)
735 struct xfs_da_intnode
*node1
;
736 struct xfs_da_intnode
*node2
;
737 struct xfs_da_intnode
*tmpnode
;
738 struct xfs_da_node_entry
*btree1
;
739 struct xfs_da_node_entry
*btree2
;
740 struct xfs_da_node_entry
*btree_s
;
741 struct xfs_da_node_entry
*btree_d
;
742 struct xfs_da3_icnode_hdr nodehdr1
;
743 struct xfs_da3_icnode_hdr nodehdr2
;
744 struct xfs_trans
*tp
;
748 struct xfs_inode
*dp
= state
->args
->dp
;
750 trace_xfs_da_node_rebalance(state
->args
);
752 node1
= blk1
->bp
->b_addr
;
753 node2
= blk2
->bp
->b_addr
;
754 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
755 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
756 btree1
= dp
->d_ops
->node_tree_p(node1
);
757 btree2
= dp
->d_ops
->node_tree_p(node2
);
760 * Figure out how many entries need to move, and in which direction.
761 * Swap the nodes around if that makes it simpler.
763 if (nodehdr1
.count
> 0 && nodehdr2
.count
> 0 &&
764 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
765 (be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
) <
766 be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
)))) {
770 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
771 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
772 btree1
= dp
->d_ops
->node_tree_p(node1
);
773 btree2
= dp
->d_ops
->node_tree_p(node2
);
777 count
= (nodehdr1
.count
- nodehdr2
.count
) / 2;
780 tp
= state
->args
->trans
;
782 * Two cases: high-to-low and low-to-high.
786 * Move elements in node2 up to make a hole.
788 tmp
= nodehdr2
.count
;
790 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
791 btree_s
= &btree2
[0];
792 btree_d
= &btree2
[count
];
793 memmove(btree_d
, btree_s
, tmp
);
797 * Move the req'd B-tree elements from high in node1 to
800 nodehdr2
.count
+= count
;
801 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
802 btree_s
= &btree1
[nodehdr1
.count
- count
];
803 btree_d
= &btree2
[0];
804 memcpy(btree_d
, btree_s
, tmp
);
805 nodehdr1
.count
-= count
;
808 * Move the req'd B-tree elements from low in node2 to
812 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
813 btree_s
= &btree2
[0];
814 btree_d
= &btree1
[nodehdr1
.count
];
815 memcpy(btree_d
, btree_s
, tmp
);
816 nodehdr1
.count
+= count
;
818 xfs_trans_log_buf(tp
, blk1
->bp
,
819 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
822 * Move elements in node2 down to fill the hole.
824 tmp
= nodehdr2
.count
- count
;
825 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
826 btree_s
= &btree2
[count
];
827 btree_d
= &btree2
[0];
828 memmove(btree_d
, btree_s
, tmp
);
829 nodehdr2
.count
-= count
;
833 * Log header of node 1 and all current bits of node 2.
835 dp
->d_ops
->node_hdr_to_disk(node1
, &nodehdr1
);
836 xfs_trans_log_buf(tp
, blk1
->bp
,
837 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, dp
->d_ops
->node_hdr_size
));
839 dp
->d_ops
->node_hdr_to_disk(node2
, &nodehdr2
);
840 xfs_trans_log_buf(tp
, blk2
->bp
,
841 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
842 dp
->d_ops
->node_hdr_size
+
843 (sizeof(btree2
[0]) * nodehdr2
.count
)));
846 * Record the last hashval from each block for upward propagation.
847 * (note: don't use the swapped node pointers)
850 node1
= blk1
->bp
->b_addr
;
851 node2
= blk2
->bp
->b_addr
;
852 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
853 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
854 btree1
= dp
->d_ops
->node_tree_p(node1
);
855 btree2
= dp
->d_ops
->node_tree_p(node2
);
857 blk1
->hashval
= be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
);
858 blk2
->hashval
= be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
);
861 * Adjust the expected index for insertion.
863 if (blk1
->index
>= nodehdr1
.count
) {
864 blk2
->index
= blk1
->index
- nodehdr1
.count
;
865 blk1
->index
= nodehdr1
.count
+ 1; /* make it invalid */
870 * Add a new entry to an intermediate node.
874 struct xfs_da_state
*state
,
875 struct xfs_da_state_blk
*oldblk
,
876 struct xfs_da_state_blk
*newblk
)
878 struct xfs_da_intnode
*node
;
879 struct xfs_da3_icnode_hdr nodehdr
;
880 struct xfs_da_node_entry
*btree
;
882 struct xfs_inode
*dp
= state
->args
->dp
;
884 trace_xfs_da_node_add(state
->args
);
886 node
= oldblk
->bp
->b_addr
;
887 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
888 btree
= dp
->d_ops
->node_tree_p(node
);
890 ASSERT(oldblk
->index
>= 0 && oldblk
->index
<= nodehdr
.count
);
891 ASSERT(newblk
->blkno
!= 0);
892 if (state
->args
->whichfork
== XFS_DATA_FORK
)
893 ASSERT(newblk
->blkno
>= state
->args
->geo
->leafblk
&&
894 newblk
->blkno
< state
->args
->geo
->freeblk
);
897 * We may need to make some room before we insert the new node.
900 if (oldblk
->index
< nodehdr
.count
) {
901 tmp
= (nodehdr
.count
- oldblk
->index
) * (uint
)sizeof(*btree
);
902 memmove(&btree
[oldblk
->index
+ 1], &btree
[oldblk
->index
], tmp
);
904 btree
[oldblk
->index
].hashval
= cpu_to_be32(newblk
->hashval
);
905 btree
[oldblk
->index
].before
= cpu_to_be32(newblk
->blkno
);
906 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
907 XFS_DA_LOGRANGE(node
, &btree
[oldblk
->index
],
908 tmp
+ sizeof(*btree
)));
911 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
912 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
913 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
916 * Copy the last hash value from the oldblk to propagate upwards.
918 oldblk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
921 /*========================================================================
922 * Routines used for shrinking the Btree.
923 *========================================================================*/
926 * Deallocate an empty leaf node, remove it from its parent,
927 * possibly deallocating that block, etc...
931 struct xfs_da_state
*state
)
933 struct xfs_da_state_blk
*drop_blk
;
934 struct xfs_da_state_blk
*save_blk
;
938 trace_xfs_da_join(state
->args
);
940 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
941 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
942 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
943 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
944 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
947 * Walk back up the tree joining/deallocating as necessary.
948 * When we stop dropping blocks, break out.
950 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
951 state
->path
.active
--) {
953 * See if we can combine the block with a neighbor.
954 * (action == 0) => no options, just leave
955 * (action == 1) => coalesce, then unlink
956 * (action == 2) => block empty, unlink it
958 switch (drop_blk
->magic
) {
959 case XFS_ATTR_LEAF_MAGIC
:
960 error
= xfs_attr3_leaf_toosmall(state
, &action
);
965 xfs_attr3_leaf_unbalance(state
, drop_blk
, save_blk
);
967 case XFS_DIR2_LEAFN_MAGIC
:
968 error
= xfs_dir2_leafn_toosmall(state
, &action
);
973 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
975 case XFS_DA_NODE_MAGIC
:
977 * Remove the offending node, fixup hashvals,
978 * check for a toosmall neighbor.
980 xfs_da3_node_remove(state
, drop_blk
);
981 xfs_da3_fixhashpath(state
, &state
->path
);
982 error
= xfs_da3_node_toosmall(state
, &action
);
987 xfs_da3_node_unbalance(state
, drop_blk
, save_blk
);
990 xfs_da3_fixhashpath(state
, &state
->altpath
);
991 error
= xfs_da3_blk_unlink(state
, drop_blk
, save_blk
);
992 xfs_da_state_kill_altpath(state
);
995 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
1002 * We joined all the way to the top. If it turns out that
1003 * we only have one entry in the root, make the child block
1006 xfs_da3_node_remove(state
, drop_blk
);
1007 xfs_da3_fixhashpath(state
, &state
->path
);
1008 error
= xfs_da3_root_join(state
, &state
->path
.blk
[0]);
1014 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
1016 __be16 magic
= blkinfo
->magic
;
1019 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1020 magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1021 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1022 magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1024 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1025 magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
));
1027 ASSERT(!blkinfo
->forw
);
1028 ASSERT(!blkinfo
->back
);
1031 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1035 * We have only one entry in the root. Copy the only remaining child of
1036 * the old root to block 0 as the new root node.
1040 struct xfs_da_state
*state
,
1041 struct xfs_da_state_blk
*root_blk
)
1043 struct xfs_da_intnode
*oldroot
;
1044 struct xfs_da_args
*args
;
1047 struct xfs_da3_icnode_hdr oldroothdr
;
1048 struct xfs_da_node_entry
*btree
;
1050 struct xfs_inode
*dp
= state
->args
->dp
;
1052 trace_xfs_da_root_join(state
->args
);
1054 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
1057 oldroot
= root_blk
->bp
->b_addr
;
1058 dp
->d_ops
->node_hdr_from_disk(&oldroothdr
, oldroot
);
1059 ASSERT(oldroothdr
.forw
== 0);
1060 ASSERT(oldroothdr
.back
== 0);
1063 * If the root has more than one child, then don't do anything.
1065 if (oldroothdr
.count
> 1)
1069 * Read in the (only) child block, then copy those bytes into
1070 * the root block's buffer and free the original child block.
1072 btree
= dp
->d_ops
->node_tree_p(oldroot
);
1073 child
= be32_to_cpu(btree
[0].before
);
1075 error
= xfs_da3_node_read(args
->trans
, dp
, child
, -1, &bp
,
1079 xfs_da_blkinfo_onlychild_validate(bp
->b_addr
, oldroothdr
.level
);
1082 * This could be copying a leaf back into the root block in the case of
1083 * there only being a single leaf block left in the tree. Hence we have
1084 * to update the b_ops pointer as well to match the buffer type change
1085 * that could occur. For dir3 blocks we also need to update the block
1086 * number in the buffer header.
1088 memcpy(root_blk
->bp
->b_addr
, bp
->b_addr
, args
->geo
->blksize
);
1089 root_blk
->bp
->b_ops
= bp
->b_ops
;
1090 xfs_trans_buf_copy_type(root_blk
->bp
, bp
);
1091 if (oldroothdr
.magic
== XFS_DA3_NODE_MAGIC
) {
1092 struct xfs_da3_blkinfo
*da3
= root_blk
->bp
->b_addr
;
1093 da3
->blkno
= cpu_to_be64(root_blk
->bp
->b_bn
);
1095 xfs_trans_log_buf(args
->trans
, root_blk
->bp
, 0,
1096 args
->geo
->blksize
- 1);
1097 error
= xfs_da_shrink_inode(args
, child
, bp
);
1102 * Check a node block and its neighbors to see if the block should be
1103 * collapsed into one or the other neighbor. Always keep the block
1104 * with the smaller block number.
1105 * If the current block is over 50% full, don't try to join it, return 0.
1106 * If the block is empty, fill in the state structure and return 2.
1107 * If it can be collapsed, fill in the state structure and return 1.
1108 * If nothing can be done, return 0.
1111 xfs_da3_node_toosmall(
1112 struct xfs_da_state
*state
,
1115 struct xfs_da_intnode
*node
;
1116 struct xfs_da_state_blk
*blk
;
1117 struct xfs_da_blkinfo
*info
;
1120 struct xfs_da3_icnode_hdr nodehdr
;
1126 struct xfs_inode
*dp
= state
->args
->dp
;
1128 trace_xfs_da_node_toosmall(state
->args
);
1131 * Check for the degenerate case of the block being over 50% full.
1132 * If so, it's not worth even looking to see if we might be able
1133 * to coalesce with a sibling.
1135 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
1136 info
= blk
->bp
->b_addr
;
1137 node
= (xfs_da_intnode_t
*)info
;
1138 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1139 if (nodehdr
.count
> (state
->args
->geo
->node_ents
>> 1)) {
1140 *action
= 0; /* blk over 50%, don't try to join */
1141 return 0; /* blk over 50%, don't try to join */
1145 * Check for the degenerate case of the block being empty.
1146 * If the block is empty, we'll simply delete it, no need to
1147 * coalesce it with a sibling block. We choose (arbitrarily)
1148 * to merge with the forward block unless it is NULL.
1150 if (nodehdr
.count
== 0) {
1152 * Make altpath point to the block we want to keep and
1153 * path point to the block we want to drop (this one).
1155 forward
= (info
->forw
!= 0);
1156 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1157 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1170 * Examine each sibling block to see if we can coalesce with
1171 * at least 25% free space to spare. We need to figure out
1172 * whether to merge with the forward or the backward block.
1173 * We prefer coalescing with the lower numbered sibling so as
1174 * to shrink a directory over time.
1176 count
= state
->args
->geo
->node_ents
;
1177 count
-= state
->args
->geo
->node_ents
>> 2;
1178 count
-= nodehdr
.count
;
1180 /* start with smaller blk num */
1181 forward
= nodehdr
.forw
< nodehdr
.back
;
1182 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
1183 struct xfs_da3_icnode_hdr thdr
;
1185 blkno
= nodehdr
.forw
;
1187 blkno
= nodehdr
.back
;
1190 error
= xfs_da3_node_read(state
->args
->trans
, dp
,
1191 blkno
, -1, &bp
, state
->args
->whichfork
);
1196 dp
->d_ops
->node_hdr_from_disk(&thdr
, node
);
1197 xfs_trans_brelse(state
->args
->trans
, bp
);
1199 if (count
- thdr
.count
>= 0)
1200 break; /* fits with at least 25% to spare */
1208 * Make altpath point to the block we want to keep (the lower
1209 * numbered block) and path point to the block we want to drop.
1211 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1212 if (blkno
< blk
->blkno
) {
1213 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1216 error
= xfs_da3_path_shift(state
, &state
->path
, forward
,
1230 * Pick up the last hashvalue from an intermediate node.
1233 xfs_da3_node_lasthash(
1234 struct xfs_inode
*dp
,
1238 struct xfs_da_intnode
*node
;
1239 struct xfs_da_node_entry
*btree
;
1240 struct xfs_da3_icnode_hdr nodehdr
;
1243 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1245 *count
= nodehdr
.count
;
1248 btree
= dp
->d_ops
->node_tree_p(node
);
1249 return be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1253 * Walk back up the tree adjusting hash values as necessary,
1254 * when we stop making changes, return.
1257 xfs_da3_fixhashpath(
1258 struct xfs_da_state
*state
,
1259 struct xfs_da_state_path
*path
)
1261 struct xfs_da_state_blk
*blk
;
1262 struct xfs_da_intnode
*node
;
1263 struct xfs_da_node_entry
*btree
;
1264 xfs_dahash_t lasthash
=0;
1267 struct xfs_inode
*dp
= state
->args
->dp
;
1269 trace_xfs_da_fixhashpath(state
->args
);
1271 level
= path
->active
-1;
1272 blk
= &path
->blk
[ level
];
1273 switch (blk
->magic
) {
1274 case XFS_ATTR_LEAF_MAGIC
:
1275 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
1279 case XFS_DIR2_LEAFN_MAGIC
:
1280 lasthash
= xfs_dir2_leafn_lasthash(dp
, blk
->bp
, &count
);
1284 case XFS_DA_NODE_MAGIC
:
1285 lasthash
= xfs_da3_node_lasthash(dp
, blk
->bp
, &count
);
1290 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
1291 struct xfs_da3_icnode_hdr nodehdr
;
1293 node
= blk
->bp
->b_addr
;
1294 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1295 btree
= dp
->d_ops
->node_tree_p(node
);
1296 if (be32_to_cpu(btree
[blk
->index
].hashval
) == lasthash
)
1298 blk
->hashval
= lasthash
;
1299 btree
[blk
->index
].hashval
= cpu_to_be32(lasthash
);
1300 xfs_trans_log_buf(state
->args
->trans
, blk
->bp
,
1301 XFS_DA_LOGRANGE(node
, &btree
[blk
->index
],
1304 lasthash
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1309 * Remove an entry from an intermediate node.
1312 xfs_da3_node_remove(
1313 struct xfs_da_state
*state
,
1314 struct xfs_da_state_blk
*drop_blk
)
1316 struct xfs_da_intnode
*node
;
1317 struct xfs_da3_icnode_hdr nodehdr
;
1318 struct xfs_da_node_entry
*btree
;
1321 struct xfs_inode
*dp
= state
->args
->dp
;
1323 trace_xfs_da_node_remove(state
->args
);
1325 node
= drop_blk
->bp
->b_addr
;
1326 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1327 ASSERT(drop_blk
->index
< nodehdr
.count
);
1328 ASSERT(drop_blk
->index
>= 0);
1331 * Copy over the offending entry, or just zero it out.
1333 index
= drop_blk
->index
;
1334 btree
= dp
->d_ops
->node_tree_p(node
);
1335 if (index
< nodehdr
.count
- 1) {
1336 tmp
= nodehdr
.count
- index
- 1;
1337 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
1338 memmove(&btree
[index
], &btree
[index
+ 1], tmp
);
1339 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1340 XFS_DA_LOGRANGE(node
, &btree
[index
], tmp
));
1341 index
= nodehdr
.count
- 1;
1343 memset(&btree
[index
], 0, sizeof(xfs_da_node_entry_t
));
1344 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1345 XFS_DA_LOGRANGE(node
, &btree
[index
], sizeof(btree
[index
])));
1347 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
1348 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1349 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
1352 * Copy the last hash value from the block to propagate upwards.
1354 drop_blk
->hashval
= be32_to_cpu(btree
[index
- 1].hashval
);
1358 * Unbalance the elements between two intermediate nodes,
1359 * move all Btree elements from one node into another.
1362 xfs_da3_node_unbalance(
1363 struct xfs_da_state
*state
,
1364 struct xfs_da_state_blk
*drop_blk
,
1365 struct xfs_da_state_blk
*save_blk
)
1367 struct xfs_da_intnode
*drop_node
;
1368 struct xfs_da_intnode
*save_node
;
1369 struct xfs_da_node_entry
*drop_btree
;
1370 struct xfs_da_node_entry
*save_btree
;
1371 struct xfs_da3_icnode_hdr drop_hdr
;
1372 struct xfs_da3_icnode_hdr save_hdr
;
1373 struct xfs_trans
*tp
;
1376 struct xfs_inode
*dp
= state
->args
->dp
;
1378 trace_xfs_da_node_unbalance(state
->args
);
1380 drop_node
= drop_blk
->bp
->b_addr
;
1381 save_node
= save_blk
->bp
->b_addr
;
1382 dp
->d_ops
->node_hdr_from_disk(&drop_hdr
, drop_node
);
1383 dp
->d_ops
->node_hdr_from_disk(&save_hdr
, save_node
);
1384 drop_btree
= dp
->d_ops
->node_tree_p(drop_node
);
1385 save_btree
= dp
->d_ops
->node_tree_p(save_node
);
1386 tp
= state
->args
->trans
;
1389 * If the dying block has lower hashvals, then move all the
1390 * elements in the remaining block up to make a hole.
1392 if ((be32_to_cpu(drop_btree
[0].hashval
) <
1393 be32_to_cpu(save_btree
[0].hashval
)) ||
1394 (be32_to_cpu(drop_btree
[drop_hdr
.count
- 1].hashval
) <
1395 be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
))) {
1396 /* XXX: check this - is memmove dst correct? */
1397 tmp
= save_hdr
.count
* sizeof(xfs_da_node_entry_t
);
1398 memmove(&save_btree
[drop_hdr
.count
], &save_btree
[0], tmp
);
1401 xfs_trans_log_buf(tp
, save_blk
->bp
,
1402 XFS_DA_LOGRANGE(save_node
, &save_btree
[0],
1403 (save_hdr
.count
+ drop_hdr
.count
) *
1404 sizeof(xfs_da_node_entry_t
)));
1406 sindex
= save_hdr
.count
;
1407 xfs_trans_log_buf(tp
, save_blk
->bp
,
1408 XFS_DA_LOGRANGE(save_node
, &save_btree
[sindex
],
1409 drop_hdr
.count
* sizeof(xfs_da_node_entry_t
)));
1413 * Move all the B-tree elements from drop_blk to save_blk.
1415 tmp
= drop_hdr
.count
* (uint
)sizeof(xfs_da_node_entry_t
);
1416 memcpy(&save_btree
[sindex
], &drop_btree
[0], tmp
);
1417 save_hdr
.count
+= drop_hdr
.count
;
1419 dp
->d_ops
->node_hdr_to_disk(save_node
, &save_hdr
);
1420 xfs_trans_log_buf(tp
, save_blk
->bp
,
1421 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1422 dp
->d_ops
->node_hdr_size
));
1425 * Save the last hashval in the remaining block for upward propagation.
1427 save_blk
->hashval
= be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
);
1430 /*========================================================================
1431 * Routines used for finding things in the Btree.
1432 *========================================================================*/
1435 * Walk down the Btree looking for a particular filename, filling
1436 * in the state structure as we go.
1438 * We will set the state structure to point to each of the elements
1439 * in each of the nodes where either the hashval is or should be.
1441 * We support duplicate hashval's so for each entry in the current
1442 * node that could contain the desired hashval, descend. This is a
1443 * pruned depth-first tree search.
1446 xfs_da3_node_lookup_int(
1447 struct xfs_da_state
*state
,
1450 struct xfs_da_state_blk
*blk
;
1451 struct xfs_da_blkinfo
*curr
;
1452 struct xfs_da_intnode
*node
;
1453 struct xfs_da_node_entry
*btree
;
1454 struct xfs_da3_icnode_hdr nodehdr
;
1455 struct xfs_da_args
*args
;
1457 xfs_dahash_t hashval
;
1458 xfs_dahash_t btreehashval
;
1464 struct xfs_inode
*dp
= state
->args
->dp
;
1469 * Descend thru the B-tree searching each level for the right
1470 * node to use, until the right hashval is found.
1472 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? args
->geo
->leafblk
: 0;
1473 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1474 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1475 blk
++, state
->path
.active
++) {
1477 * Read the next node down in the tree.
1480 error
= xfs_da3_node_read(args
->trans
, args
->dp
, blkno
,
1481 -1, &blk
->bp
, args
->whichfork
);
1484 state
->path
.active
--;
1487 curr
= blk
->bp
->b_addr
;
1488 blk
->magic
= be16_to_cpu(curr
->magic
);
1490 if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1491 blk
->magic
== XFS_ATTR3_LEAF_MAGIC
) {
1492 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1493 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1497 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1498 blk
->magic
== XFS_DIR3_LEAFN_MAGIC
) {
1499 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1500 blk
->hashval
= xfs_dir2_leafn_lasthash(args
->dp
,
1505 blk
->magic
= XFS_DA_NODE_MAGIC
;
1509 * Search an intermediate node for a match.
1511 node
= blk
->bp
->b_addr
;
1512 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1513 btree
= dp
->d_ops
->node_tree_p(node
);
1515 max
= nodehdr
.count
;
1516 blk
->hashval
= be32_to_cpu(btree
[max
- 1].hashval
);
1519 * Binary search. (note: small blocks will skip loop)
1521 probe
= span
= max
/ 2;
1522 hashval
= args
->hashval
;
1525 btreehashval
= be32_to_cpu(btree
[probe
].hashval
);
1526 if (btreehashval
< hashval
)
1528 else if (btreehashval
> hashval
)
1533 ASSERT((probe
>= 0) && (probe
< max
));
1534 ASSERT((span
<= 4) ||
1535 (be32_to_cpu(btree
[probe
].hashval
) == hashval
));
1538 * Since we may have duplicate hashval's, find the first
1539 * matching hashval in the node.
1542 be32_to_cpu(btree
[probe
].hashval
) >= hashval
) {
1545 while (probe
< max
&&
1546 be32_to_cpu(btree
[probe
].hashval
) < hashval
) {
1551 * Pick the right block to descend on.
1554 blk
->index
= max
- 1;
1555 blkno
= be32_to_cpu(btree
[max
- 1].before
);
1558 blkno
= be32_to_cpu(btree
[probe
].before
);
1563 * A leaf block that ends in the hashval that we are interested in
1564 * (final hashval == search hashval) means that the next block may
1565 * contain more entries with the same hashval, shift upward to the
1566 * next leaf and keep searching.
1569 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1570 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1571 &blk
->index
, state
);
1572 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1573 retval
= xfs_attr3_leaf_lookup_int(blk
->bp
, args
);
1574 blk
->index
= args
->index
;
1575 args
->blkno
= blk
->blkno
;
1578 return -EFSCORRUPTED
;
1580 if (((retval
== -ENOENT
) || (retval
== -ENOATTR
)) &&
1581 (blk
->hashval
== args
->hashval
)) {
1582 error
= xfs_da3_path_shift(state
, &state
->path
, 1, 1,
1588 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1589 /* path_shift() gives ENOENT */
1599 /*========================================================================
1601 *========================================================================*/
1604 * Compare two intermediate nodes for "order".
1608 struct xfs_inode
*dp
,
1609 struct xfs_buf
*node1_bp
,
1610 struct xfs_buf
*node2_bp
)
1612 struct xfs_da_intnode
*node1
;
1613 struct xfs_da_intnode
*node2
;
1614 struct xfs_da_node_entry
*btree1
;
1615 struct xfs_da_node_entry
*btree2
;
1616 struct xfs_da3_icnode_hdr node1hdr
;
1617 struct xfs_da3_icnode_hdr node2hdr
;
1619 node1
= node1_bp
->b_addr
;
1620 node2
= node2_bp
->b_addr
;
1621 dp
->d_ops
->node_hdr_from_disk(&node1hdr
, node1
);
1622 dp
->d_ops
->node_hdr_from_disk(&node2hdr
, node2
);
1623 btree1
= dp
->d_ops
->node_tree_p(node1
);
1624 btree2
= dp
->d_ops
->node_tree_p(node2
);
1626 if (node1hdr
.count
> 0 && node2hdr
.count
> 0 &&
1627 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
1628 (be32_to_cpu(btree2
[node2hdr
.count
- 1].hashval
) <
1629 be32_to_cpu(btree1
[node1hdr
.count
- 1].hashval
)))) {
1636 * Link a new block into a doubly linked list of blocks (of whatever type).
1640 struct xfs_da_state
*state
,
1641 struct xfs_da_state_blk
*old_blk
,
1642 struct xfs_da_state_blk
*new_blk
)
1644 struct xfs_da_blkinfo
*old_info
;
1645 struct xfs_da_blkinfo
*new_info
;
1646 struct xfs_da_blkinfo
*tmp_info
;
1647 struct xfs_da_args
*args
;
1651 struct xfs_inode
*dp
= state
->args
->dp
;
1654 * Set up environment.
1657 ASSERT(args
!= NULL
);
1658 old_info
= old_blk
->bp
->b_addr
;
1659 new_info
= new_blk
->bp
->b_addr
;
1660 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1661 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1662 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1664 switch (old_blk
->magic
) {
1665 case XFS_ATTR_LEAF_MAGIC
:
1666 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1668 case XFS_DIR2_LEAFN_MAGIC
:
1669 before
= xfs_dir2_leafn_order(dp
, old_blk
->bp
, new_blk
->bp
);
1671 case XFS_DA_NODE_MAGIC
:
1672 before
= xfs_da3_node_order(dp
, old_blk
->bp
, new_blk
->bp
);
1677 * Link blocks in appropriate order.
1681 * Link new block in before existing block.
1683 trace_xfs_da_link_before(args
);
1684 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1685 new_info
->back
= old_info
->back
;
1686 if (old_info
->back
) {
1687 error
= xfs_da3_node_read(args
->trans
, dp
,
1688 be32_to_cpu(old_info
->back
),
1689 -1, &bp
, args
->whichfork
);
1693 tmp_info
= bp
->b_addr
;
1694 ASSERT(tmp_info
->magic
== old_info
->magic
);
1695 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1696 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1697 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1699 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1702 * Link new block in after existing block.
1704 trace_xfs_da_link_after(args
);
1705 new_info
->forw
= old_info
->forw
;
1706 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1707 if (old_info
->forw
) {
1708 error
= xfs_da3_node_read(args
->trans
, dp
,
1709 be32_to_cpu(old_info
->forw
),
1710 -1, &bp
, args
->whichfork
);
1714 tmp_info
= bp
->b_addr
;
1715 ASSERT(tmp_info
->magic
== old_info
->magic
);
1716 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1717 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1718 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1720 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1723 xfs_trans_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1724 xfs_trans_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1729 * Unlink a block from a doubly linked list of blocks.
1731 STATIC
int /* error */
1733 struct xfs_da_state
*state
,
1734 struct xfs_da_state_blk
*drop_blk
,
1735 struct xfs_da_state_blk
*save_blk
)
1737 struct xfs_da_blkinfo
*drop_info
;
1738 struct xfs_da_blkinfo
*save_info
;
1739 struct xfs_da_blkinfo
*tmp_info
;
1740 struct xfs_da_args
*args
;
1745 * Set up environment.
1748 ASSERT(args
!= NULL
);
1749 save_info
= save_blk
->bp
->b_addr
;
1750 drop_info
= drop_blk
->bp
->b_addr
;
1751 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1752 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1753 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1754 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1755 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1756 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1757 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1758 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1761 * Unlink the leaf block from the doubly linked chain of leaves.
1763 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1764 trace_xfs_da_unlink_back(args
);
1765 save_info
->back
= drop_info
->back
;
1766 if (drop_info
->back
) {
1767 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1768 be32_to_cpu(drop_info
->back
),
1769 -1, &bp
, args
->whichfork
);
1773 tmp_info
= bp
->b_addr
;
1774 ASSERT(tmp_info
->magic
== save_info
->magic
);
1775 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1776 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1777 xfs_trans_log_buf(args
->trans
, bp
, 0,
1778 sizeof(*tmp_info
) - 1);
1781 trace_xfs_da_unlink_forward(args
);
1782 save_info
->forw
= drop_info
->forw
;
1783 if (drop_info
->forw
) {
1784 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1785 be32_to_cpu(drop_info
->forw
),
1786 -1, &bp
, args
->whichfork
);
1790 tmp_info
= bp
->b_addr
;
1791 ASSERT(tmp_info
->magic
== save_info
->magic
);
1792 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1793 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1794 xfs_trans_log_buf(args
->trans
, bp
, 0,
1795 sizeof(*tmp_info
) - 1);
1799 xfs_trans_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1804 * Move a path "forward" or "!forward" one block at the current level.
1806 * This routine will adjust a "path" to point to the next block
1807 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1808 * Btree, including updating pointers to the intermediate nodes between
1809 * the new bottom and the root.
1813 struct xfs_da_state
*state
,
1814 struct xfs_da_state_path
*path
,
1819 struct xfs_da_state_blk
*blk
;
1820 struct xfs_da_blkinfo
*info
;
1821 struct xfs_da_intnode
*node
;
1822 struct xfs_da_args
*args
;
1823 struct xfs_da_node_entry
*btree
;
1824 struct xfs_da3_icnode_hdr nodehdr
;
1825 xfs_dablk_t blkno
= 0;
1828 struct xfs_inode
*dp
= state
->args
->dp
;
1830 trace_xfs_da_path_shift(state
->args
);
1833 * Roll up the Btree looking for the first block where our
1834 * current index is not at the edge of the block. Note that
1835 * we skip the bottom layer because we want the sibling block.
1838 ASSERT(args
!= NULL
);
1839 ASSERT(path
!= NULL
);
1840 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1841 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1842 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1843 node
= blk
->bp
->b_addr
;
1844 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1845 btree
= dp
->d_ops
->node_tree_p(node
);
1847 if (forward
&& (blk
->index
< nodehdr
.count
- 1)) {
1849 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1851 } else if (!forward
&& (blk
->index
> 0)) {
1853 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1858 *result
= -ENOENT
; /* we're out of our tree */
1859 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1864 * Roll down the edge of the subtree until we reach the
1865 * same depth we were at originally.
1867 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1869 * Release the old block.
1870 * (if it's dirty, trans won't actually let go)
1873 xfs_trans_brelse(args
->trans
, blk
->bp
);
1876 * Read the next child block.
1879 error
= xfs_da3_node_read(args
->trans
, dp
, blkno
, -1,
1880 &blk
->bp
, args
->whichfork
);
1883 info
= blk
->bp
->b_addr
;
1884 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1885 info
->magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
1886 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1887 info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1888 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1889 info
->magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1893 * Note: we flatten the magic number to a single type so we
1894 * don't have to compare against crc/non-crc types elsewhere.
1896 switch (be16_to_cpu(info
->magic
)) {
1897 case XFS_DA_NODE_MAGIC
:
1898 case XFS_DA3_NODE_MAGIC
:
1899 blk
->magic
= XFS_DA_NODE_MAGIC
;
1900 node
= (xfs_da_intnode_t
*)info
;
1901 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1902 btree
= dp
->d_ops
->node_tree_p(node
);
1903 blk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1907 blk
->index
= nodehdr
.count
- 1;
1908 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1910 case XFS_ATTR_LEAF_MAGIC
:
1911 case XFS_ATTR3_LEAF_MAGIC
:
1912 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1913 ASSERT(level
== path
->active
-1);
1915 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1917 case XFS_DIR2_LEAFN_MAGIC
:
1918 case XFS_DIR3_LEAFN_MAGIC
:
1919 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1920 ASSERT(level
== path
->active
-1);
1922 blk
->hashval
= xfs_dir2_leafn_lasthash(args
->dp
,
1935 /*========================================================================
1937 *========================================================================*/
1940 * Implement a simple hash on a character string.
1941 * Rotate the hash value by 7 bits, then XOR each character in.
1942 * This is implemented with some source-level loop unrolling.
1945 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1950 * Do four characters at a time as long as we can.
1952 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1953 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1954 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1957 * Now do the rest of the characters.
1961 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1964 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1966 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1967 default: /* case 0: */
1974 struct xfs_da_args
*args
,
1975 const unsigned char *name
,
1978 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1979 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1983 xfs_default_hashname(
1984 struct xfs_name
*name
)
1986 return xfs_da_hashname(name
->name
, name
->len
);
1989 const struct xfs_nameops xfs_default_nameops
= {
1990 .hashname
= xfs_default_hashname
,
1991 .compname
= xfs_da_compname
1995 xfs_da_grow_inode_int(
1996 struct xfs_da_args
*args
,
2000 struct xfs_trans
*tp
= args
->trans
;
2001 struct xfs_inode
*dp
= args
->dp
;
2002 int w
= args
->whichfork
;
2003 xfs_rfsblock_t nblks
= dp
->i_d
.di_nblocks
;
2004 struct xfs_bmbt_irec map
, *mapp
;
2005 int nmap
, error
, got
, i
, mapi
;
2008 * Find a spot in the file space to put the new block.
2010 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
2015 * Try mapping it in one filesystem block.
2018 ASSERT(args
->firstblock
!= NULL
);
2019 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
2020 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
2021 args
->firstblock
, args
->total
, &map
, &nmap
,
2030 } else if (nmap
== 0 && count
> 1) {
2035 * If we didn't get it and the block might work if fragmented,
2036 * try without the CONTIG flag. Loop until we get it all.
2038 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
2039 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
2040 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
2041 c
= (int)(*bno
+ count
- b
);
2042 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
2043 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2044 args
->firstblock
, args
->total
,
2045 &mapp
[mapi
], &nmap
, args
->flist
);
2051 b
= mapp
[mapi
- 1].br_startoff
+
2052 mapp
[mapi
- 1].br_blockcount
;
2060 * Count the blocks we got, make sure it matches the total.
2062 for (i
= 0, got
= 0; i
< mapi
; i
++)
2063 got
+= mapp
[i
].br_blockcount
;
2064 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
2065 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
2071 /* account for newly allocated blocks in reserved blocks total */
2072 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
2081 * Add a block to the btree ahead of the file.
2082 * Return the new block number to the caller.
2086 struct xfs_da_args
*args
,
2087 xfs_dablk_t
*new_blkno
)
2092 trace_xfs_da_grow_inode(args
);
2094 bno
= args
->geo
->leafblk
;
2095 error
= xfs_da_grow_inode_int(args
, &bno
, args
->geo
->fsbcount
);
2097 *new_blkno
= (xfs_dablk_t
)bno
;
2102 * Ick. We need to always be able to remove a btree block, even
2103 * if there's no space reservation because the filesystem is full.
2104 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2105 * It swaps the target block with the last block in the file. The
2106 * last block in the file can always be removed since it can't cause
2107 * a bmap btree split to do that.
2110 xfs_da3_swap_lastblock(
2111 struct xfs_da_args
*args
,
2112 xfs_dablk_t
*dead_blknop
,
2113 struct xfs_buf
**dead_bufp
)
2115 struct xfs_da_blkinfo
*dead_info
;
2116 struct xfs_da_blkinfo
*sib_info
;
2117 struct xfs_da_intnode
*par_node
;
2118 struct xfs_da_intnode
*dead_node
;
2119 struct xfs_dir2_leaf
*dead_leaf2
;
2120 struct xfs_da_node_entry
*btree
;
2121 struct xfs_da3_icnode_hdr par_hdr
;
2122 struct xfs_inode
*dp
;
2123 struct xfs_trans
*tp
;
2124 struct xfs_mount
*mp
;
2125 struct xfs_buf
*dead_buf
;
2126 struct xfs_buf
*last_buf
;
2127 struct xfs_buf
*sib_buf
;
2128 struct xfs_buf
*par_buf
;
2129 xfs_dahash_t dead_hash
;
2130 xfs_fileoff_t lastoff
;
2131 xfs_dablk_t dead_blkno
;
2132 xfs_dablk_t last_blkno
;
2133 xfs_dablk_t sib_blkno
;
2134 xfs_dablk_t par_blkno
;
2141 trace_xfs_da_swap_lastblock(args
);
2143 dead_buf
= *dead_bufp
;
2144 dead_blkno
= *dead_blknop
;
2147 w
= args
->whichfork
;
2148 ASSERT(w
== XFS_DATA_FORK
);
2150 lastoff
= args
->geo
->freeblk
;
2151 error
= xfs_bmap_last_before(tp
, dp
, &lastoff
, w
);
2154 if (unlikely(lastoff
== 0)) {
2155 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
2157 return -EFSCORRUPTED
;
2160 * Read the last block in the btree space.
2162 last_blkno
= (xfs_dablk_t
)lastoff
- args
->geo
->fsbcount
;
2163 error
= xfs_da3_node_read(tp
, dp
, last_blkno
, -1, &last_buf
, w
);
2167 * Copy the last block into the dead buffer and log it.
2169 memcpy(dead_buf
->b_addr
, last_buf
->b_addr
, args
->geo
->blksize
);
2170 xfs_trans_log_buf(tp
, dead_buf
, 0, args
->geo
->blksize
- 1);
2171 dead_info
= dead_buf
->b_addr
;
2173 * Get values from the moved block.
2175 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
2176 dead_info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
2177 struct xfs_dir3_icleaf_hdr leafhdr
;
2178 struct xfs_dir2_leaf_entry
*ents
;
2180 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
2181 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, dead_leaf2
);
2182 ents
= dp
->d_ops
->leaf_ents_p(dead_leaf2
);
2184 dead_hash
= be32_to_cpu(ents
[leafhdr
.count
- 1].hashval
);
2186 struct xfs_da3_icnode_hdr deadhdr
;
2188 dead_node
= (xfs_da_intnode_t
*)dead_info
;
2189 dp
->d_ops
->node_hdr_from_disk(&deadhdr
, dead_node
);
2190 btree
= dp
->d_ops
->node_tree_p(dead_node
);
2191 dead_level
= deadhdr
.level
;
2192 dead_hash
= be32_to_cpu(btree
[deadhdr
.count
- 1].hashval
);
2194 sib_buf
= par_buf
= NULL
;
2196 * If the moved block has a left sibling, fix up the pointers.
2198 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
2199 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2202 sib_info
= sib_buf
->b_addr
;
2204 be32_to_cpu(sib_info
->forw
) != last_blkno
||
2205 sib_info
->magic
!= dead_info
->magic
)) {
2206 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2207 XFS_ERRLEVEL_LOW
, mp
);
2208 error
= -EFSCORRUPTED
;
2211 sib_info
->forw
= cpu_to_be32(dead_blkno
);
2212 xfs_trans_log_buf(tp
, sib_buf
,
2213 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
2214 sizeof(sib_info
->forw
)));
2218 * If the moved block has a right sibling, fix up the pointers.
2220 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
2221 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2224 sib_info
= sib_buf
->b_addr
;
2226 be32_to_cpu(sib_info
->back
) != last_blkno
||
2227 sib_info
->magic
!= dead_info
->magic
)) {
2228 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2229 XFS_ERRLEVEL_LOW
, mp
);
2230 error
= -EFSCORRUPTED
;
2233 sib_info
->back
= cpu_to_be32(dead_blkno
);
2234 xfs_trans_log_buf(tp
, sib_buf
,
2235 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
2236 sizeof(sib_info
->back
)));
2239 par_blkno
= args
->geo
->leafblk
;
2242 * Walk down the tree looking for the parent of the moved block.
2245 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2248 par_node
= par_buf
->b_addr
;
2249 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2250 if (level
>= 0 && level
!= par_hdr
.level
+ 1) {
2251 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2252 XFS_ERRLEVEL_LOW
, mp
);
2253 error
= -EFSCORRUPTED
;
2256 level
= par_hdr
.level
;
2257 btree
= dp
->d_ops
->node_tree_p(par_node
);
2259 entno
< par_hdr
.count
&&
2260 be32_to_cpu(btree
[entno
].hashval
) < dead_hash
;
2263 if (entno
== par_hdr
.count
) {
2264 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2265 XFS_ERRLEVEL_LOW
, mp
);
2266 error
= -EFSCORRUPTED
;
2269 par_blkno
= be32_to_cpu(btree
[entno
].before
);
2270 if (level
== dead_level
+ 1)
2272 xfs_trans_brelse(tp
, par_buf
);
2276 * We're in the right parent block.
2277 * Look for the right entry.
2281 entno
< par_hdr
.count
&&
2282 be32_to_cpu(btree
[entno
].before
) != last_blkno
;
2285 if (entno
< par_hdr
.count
)
2287 par_blkno
= par_hdr
.forw
;
2288 xfs_trans_brelse(tp
, par_buf
);
2290 if (unlikely(par_blkno
== 0)) {
2291 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2292 XFS_ERRLEVEL_LOW
, mp
);
2293 error
= -EFSCORRUPTED
;
2296 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2299 par_node
= par_buf
->b_addr
;
2300 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2301 if (par_hdr
.level
!= level
) {
2302 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2303 XFS_ERRLEVEL_LOW
, mp
);
2304 error
= -EFSCORRUPTED
;
2307 btree
= dp
->d_ops
->node_tree_p(par_node
);
2311 * Update the parent entry pointing to the moved block.
2313 btree
[entno
].before
= cpu_to_be32(dead_blkno
);
2314 xfs_trans_log_buf(tp
, par_buf
,
2315 XFS_DA_LOGRANGE(par_node
, &btree
[entno
].before
,
2316 sizeof(btree
[entno
].before
)));
2317 *dead_blknop
= last_blkno
;
2318 *dead_bufp
= last_buf
;
2322 xfs_trans_brelse(tp
, par_buf
);
2324 xfs_trans_brelse(tp
, sib_buf
);
2325 xfs_trans_brelse(tp
, last_buf
);
2330 * Remove a btree block from a directory or attribute.
2333 xfs_da_shrink_inode(
2334 xfs_da_args_t
*args
,
2335 xfs_dablk_t dead_blkno
,
2336 struct xfs_buf
*dead_buf
)
2339 int done
, error
, w
, count
;
2342 trace_xfs_da_shrink_inode(args
);
2345 w
= args
->whichfork
;
2347 count
= args
->geo
->fsbcount
;
2350 * Remove extents. If we get ENOSPC for a dir we have to move
2351 * the last block to the place we want to kill.
2353 error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
2354 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2355 0, args
->firstblock
, args
->flist
, &done
);
2356 if (error
== -ENOSPC
) {
2357 if (w
!= XFS_DATA_FORK
)
2359 error
= xfs_da3_swap_lastblock(args
, &dead_blkno
,
2367 xfs_trans_binval(tp
, dead_buf
);
2372 * See if the mapping(s) for this btree block are valid, i.e.
2373 * don't contain holes, are logically contiguous, and cover the whole range.
2376 xfs_da_map_covers_blocks(
2378 xfs_bmbt_irec_t
*mapp
,
2385 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
2386 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
2387 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
2390 if (off
!= mapp
[i
].br_startoff
) {
2393 off
+= mapp
[i
].br_blockcount
;
2395 return off
== bno
+ count
;
2399 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2401 * For the single map case, it is assumed that the caller has provided a pointer
2402 * to a valid xfs_buf_map. For the multiple map case, this function will
2403 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2404 * map pointer with the allocated map.
2407 xfs_buf_map_from_irec(
2408 struct xfs_mount
*mp
,
2409 struct xfs_buf_map
**mapp
,
2411 struct xfs_bmbt_irec
*irecs
,
2414 struct xfs_buf_map
*map
;
2417 ASSERT(*nmaps
== 1);
2418 ASSERT(nirecs
>= 1);
2421 map
= kmem_zalloc(nirecs
* sizeof(struct xfs_buf_map
),
2422 KM_SLEEP
| KM_NOFS
);
2430 for (i
= 0; i
< *nmaps
; i
++) {
2431 ASSERT(irecs
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
2432 irecs
[i
].br_startblock
!= HOLESTARTBLOCK
);
2433 map
[i
].bm_bn
= XFS_FSB_TO_DADDR(mp
, irecs
[i
].br_startblock
);
2434 map
[i
].bm_len
= XFS_FSB_TO_BB(mp
, irecs
[i
].br_blockcount
);
2440 * Map the block we are given ready for reading. There are three possible return
2442 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2443 * caller knows not to execute a subsequent read.
2444 * 0 - if we mapped the block successfully
2445 * >0 - positive error number if there was an error.
2449 struct xfs_inode
*dp
,
2451 xfs_daddr_t mappedbno
,
2453 struct xfs_buf_map
**map
,
2456 struct xfs_mount
*mp
= dp
->i_mount
;
2459 struct xfs_bmbt_irec irec
;
2460 struct xfs_bmbt_irec
*irecs
= &irec
;
2463 ASSERT(map
&& *map
);
2464 ASSERT(*nmaps
== 1);
2466 if (whichfork
== XFS_DATA_FORK
)
2467 nfsb
= mp
->m_dir_geo
->fsbcount
;
2469 nfsb
= mp
->m_attr_geo
->fsbcount
;
2472 * Caller doesn't have a mapping. -2 means don't complain
2473 * if we land in a hole.
2475 if (mappedbno
== -1 || mappedbno
== -2) {
2477 * Optimize the one-block case.
2480 irecs
= kmem_zalloc(sizeof(irec
) * nfsb
,
2481 KM_SLEEP
| KM_NOFS
);
2484 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, irecs
,
2485 &nirecs
, xfs_bmapi_aflag(whichfork
));
2489 irecs
->br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2490 irecs
->br_startoff
= (xfs_fileoff_t
)bno
;
2491 irecs
->br_blockcount
= nfsb
;
2492 irecs
->br_state
= 0;
2496 if (!xfs_da_map_covers_blocks(nirecs
, irecs
, bno
, nfsb
)) {
2497 error
= mappedbno
== -2 ? -1 : -EFSCORRUPTED
;
2498 if (unlikely(error
== -EFSCORRUPTED
)) {
2499 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2501 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2502 __func__
, (long long)bno
,
2503 (long long)dp
->i_ino
);
2504 for (i
= 0; i
< *nmaps
; i
++) {
2506 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2508 (long long)irecs
[i
].br_startoff
,
2509 (long long)irecs
[i
].br_startblock
,
2510 (long long)irecs
[i
].br_blockcount
,
2514 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2515 XFS_ERRLEVEL_LOW
, mp
);
2519 error
= xfs_buf_map_from_irec(mp
, map
, nmaps
, irecs
, nirecs
);
2527 * Get a buffer for the dir/attr block.
2531 struct xfs_trans
*trans
,
2532 struct xfs_inode
*dp
,
2534 xfs_daddr_t mappedbno
,
2535 struct xfs_buf
**bpp
,
2539 struct xfs_buf_map map
;
2540 struct xfs_buf_map
*mapp
;
2547 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2550 /* mapping a hole is not an error, but we don't continue */
2556 bp
= xfs_trans_get_buf_map(trans
, dp
->i_mount
->m_ddev_targp
,
2558 error
= bp
? bp
->b_error
: -EIO
;
2561 xfs_trans_brelse(trans
, bp
);
2575 * Get a buffer for the dir/attr block, fill in the contents.
2579 struct xfs_trans
*trans
,
2580 struct xfs_inode
*dp
,
2582 xfs_daddr_t mappedbno
,
2583 struct xfs_buf
**bpp
,
2585 const struct xfs_buf_ops
*ops
)
2588 struct xfs_buf_map map
;
2589 struct xfs_buf_map
*mapp
;
2596 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2599 /* mapping a hole is not an error, but we don't continue */
2605 error
= xfs_trans_read_buf_map(dp
->i_mount
, trans
,
2606 dp
->i_mount
->m_ddev_targp
,
2607 mapp
, nmap
, 0, &bp
, ops
);
2611 if (whichfork
== XFS_ATTR_FORK
)
2612 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2614 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2624 * Readahead the dir/attr block.
2628 struct xfs_inode
*dp
,
2630 xfs_daddr_t mappedbno
,
2632 const struct xfs_buf_ops
*ops
)
2634 struct xfs_buf_map map
;
2635 struct xfs_buf_map
*mapp
;
2641 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2644 /* mapping a hole is not an error, but we don't continue */
2650 mappedbno
= mapp
[0].bm_bn
;
2651 xfs_buf_readahead_map(dp
->i_mount
->m_ddev_targp
, mapp
, nmap
, ops
);