2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
30 #include "xfs_dir2_priv.h"
31 #include "xfs_inode.h"
32 #include "xfs_trans.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_alloc.h"
37 #include "xfs_attr_leaf.h"
38 #include "xfs_error.h"
39 #include "xfs_trace.h"
40 #include "xfs_cksum.h"
41 #include "xfs_buf_item.h"
47 * Routines to implement directories as Btrees of hashed names.
50 /*========================================================================
51 * Function prototypes for the kernel.
52 *========================================================================*/
55 * Routines used for growing the Btree.
57 STATIC
int xfs_da3_root_split(xfs_da_state_t
*state
,
58 xfs_da_state_blk_t
*existing_root
,
59 xfs_da_state_blk_t
*new_child
);
60 STATIC
int xfs_da3_node_split(xfs_da_state_t
*state
,
61 xfs_da_state_blk_t
*existing_blk
,
62 xfs_da_state_blk_t
*split_blk
,
63 xfs_da_state_blk_t
*blk_to_add
,
66 STATIC
void xfs_da3_node_rebalance(xfs_da_state_t
*state
,
67 xfs_da_state_blk_t
*node_blk_1
,
68 xfs_da_state_blk_t
*node_blk_2
);
69 STATIC
void xfs_da3_node_add(xfs_da_state_t
*state
,
70 xfs_da_state_blk_t
*old_node_blk
,
71 xfs_da_state_blk_t
*new_node_blk
);
74 * Routines used for shrinking the Btree.
76 STATIC
int xfs_da3_root_join(xfs_da_state_t
*state
,
77 xfs_da_state_blk_t
*root_blk
);
78 STATIC
int xfs_da3_node_toosmall(xfs_da_state_t
*state
, int *retval
);
79 STATIC
void xfs_da3_node_remove(xfs_da_state_t
*state
,
80 xfs_da_state_blk_t
*drop_blk
);
81 STATIC
void xfs_da3_node_unbalance(xfs_da_state_t
*state
,
82 xfs_da_state_blk_t
*src_node_blk
,
83 xfs_da_state_blk_t
*dst_node_blk
);
88 STATIC
int xfs_da3_blk_unlink(xfs_da_state_t
*state
,
89 xfs_da_state_blk_t
*drop_blk
,
90 xfs_da_state_blk_t
*save_blk
);
93 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
96 * Allocate a dir-state structure.
97 * We don't put them on the stack since they're large.
100 xfs_da_state_alloc(void)
102 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
106 * Kill the altpath contents of a da-state structure.
109 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
113 for (i
= 0; i
< state
->altpath
.active
; i
++)
114 state
->altpath
.blk
[i
].bp
= NULL
;
115 state
->altpath
.active
= 0;
119 * Free a da-state structure.
122 xfs_da_state_free(xfs_da_state_t
*state
)
124 xfs_da_state_kill_altpath(state
);
126 memset((char *)state
, 0, sizeof(*state
));
128 kmem_zone_free(xfs_da_state_zone
, state
);
135 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
136 struct xfs_da_intnode
*hdr
= bp
->b_addr
;
137 struct xfs_da3_icnode_hdr ichdr
;
138 const struct xfs_dir_ops
*ops
;
140 ops
= xfs_dir_get_ops(mp
, NULL
);
142 ops
->node_hdr_from_disk(&ichdr
, hdr
);
144 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
145 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
147 if (ichdr
.magic
!= XFS_DA3_NODE_MAGIC
)
150 if (!uuid_equal(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_meta_uuid
))
152 if (be64_to_cpu(hdr3
->info
.blkno
) != bp
->b_bn
)
154 if (!xfs_log_check_lsn(mp
, be64_to_cpu(hdr3
->info
.lsn
)))
157 if (ichdr
.magic
!= XFS_DA_NODE_MAGIC
)
160 if (ichdr
.level
== 0)
162 if (ichdr
.level
> XFS_DA_NODE_MAXDEPTH
)
164 if (ichdr
.count
== 0)
168 * we don't know if the node is for and attribute or directory tree,
169 * so only fail if the count is outside both bounds
171 if (ichdr
.count
> mp
->m_dir_geo
->node_ents
&&
172 ichdr
.count
> mp
->m_attr_geo
->node_ents
)
175 /* XXX: hash order check? */
181 xfs_da3_node_write_verify(
184 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
185 struct xfs_buf_log_item
*bip
= bp
->b_fspriv
;
186 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
188 if (!xfs_da3_node_verify(bp
)) {
189 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
190 xfs_verifier_error(bp
);
194 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
198 hdr3
->info
.lsn
= cpu_to_be64(bip
->bli_item
.li_lsn
);
200 xfs_buf_update_cksum(bp
, XFS_DA3_NODE_CRC_OFF
);
204 * leaf/node format detection on trees is sketchy, so a node read can be done on
205 * leaf level blocks when detection identifies the tree as a node format tree
206 * incorrectly. In this case, we need to swap the verifier to match the correct
207 * format of the block being read.
210 xfs_da3_node_read_verify(
213 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
215 switch (be16_to_cpu(info
->magic
)) {
216 case XFS_DA3_NODE_MAGIC
:
217 if (!xfs_buf_verify_cksum(bp
, XFS_DA3_NODE_CRC_OFF
)) {
218 xfs_buf_ioerror(bp
, -EFSBADCRC
);
222 case XFS_DA_NODE_MAGIC
:
223 if (!xfs_da3_node_verify(bp
)) {
224 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
228 case XFS_ATTR_LEAF_MAGIC
:
229 case XFS_ATTR3_LEAF_MAGIC
:
230 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
231 bp
->b_ops
->verify_read(bp
);
233 case XFS_DIR2_LEAFN_MAGIC
:
234 case XFS_DIR3_LEAFN_MAGIC
:
235 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
236 bp
->b_ops
->verify_read(bp
);
239 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
244 xfs_verifier_error(bp
);
247 const struct xfs_buf_ops xfs_da3_node_buf_ops
= {
248 .name
= "xfs_da3_node",
249 .verify_read
= xfs_da3_node_read_verify
,
250 .verify_write
= xfs_da3_node_write_verify
,
255 struct xfs_trans
*tp
,
256 struct xfs_inode
*dp
,
258 xfs_daddr_t mappedbno
,
259 struct xfs_buf
**bpp
,
264 err
= xfs_da_read_buf(tp
, dp
, bno
, mappedbno
, bpp
,
265 which_fork
, &xfs_da3_node_buf_ops
);
266 if (!err
&& tp
&& *bpp
) {
267 struct xfs_da_blkinfo
*info
= (*bpp
)->b_addr
;
270 switch (be16_to_cpu(info
->magic
)) {
271 case XFS_DA_NODE_MAGIC
:
272 case XFS_DA3_NODE_MAGIC
:
273 type
= XFS_BLFT_DA_NODE_BUF
;
275 case XFS_ATTR_LEAF_MAGIC
:
276 case XFS_ATTR3_LEAF_MAGIC
:
277 type
= XFS_BLFT_ATTR_LEAF_BUF
;
279 case XFS_DIR2_LEAFN_MAGIC
:
280 case XFS_DIR3_LEAFN_MAGIC
:
281 type
= XFS_BLFT_DIR_LEAFN_BUF
;
288 xfs_trans_buf_set_type(tp
, *bpp
, type
);
293 /*========================================================================
294 * Routines used for growing the Btree.
295 *========================================================================*/
298 * Create the initial contents of an intermediate node.
302 struct xfs_da_args
*args
,
305 struct xfs_buf
**bpp
,
308 struct xfs_da_intnode
*node
;
309 struct xfs_trans
*tp
= args
->trans
;
310 struct xfs_mount
*mp
= tp
->t_mountp
;
311 struct xfs_da3_icnode_hdr ichdr
= {0};
314 struct xfs_inode
*dp
= args
->dp
;
316 trace_xfs_da_node_create(args
);
317 ASSERT(level
<= XFS_DA_NODE_MAXDEPTH
);
319 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, whichfork
);
322 bp
->b_ops
= &xfs_da3_node_buf_ops
;
323 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
326 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
327 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
329 memset(hdr3
, 0, sizeof(struct xfs_da3_node_hdr
));
330 ichdr
.magic
= XFS_DA3_NODE_MAGIC
;
331 hdr3
->info
.blkno
= cpu_to_be64(bp
->b_bn
);
332 hdr3
->info
.owner
= cpu_to_be64(args
->dp
->i_ino
);
333 uuid_copy(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_meta_uuid
);
335 ichdr
.magic
= XFS_DA_NODE_MAGIC
;
339 dp
->d_ops
->node_hdr_to_disk(node
, &ichdr
);
340 xfs_trans_log_buf(tp
, bp
,
341 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
348 * Split a leaf node, rebalance, then possibly split
349 * intermediate nodes, rebalance, etc.
353 struct xfs_da_state
*state
)
355 struct xfs_da_state_blk
*oldblk
;
356 struct xfs_da_state_blk
*newblk
;
357 struct xfs_da_state_blk
*addblk
;
358 struct xfs_da_intnode
*node
;
364 trace_xfs_da_split(state
->args
);
367 * Walk back up the tree splitting/inserting/adjusting as necessary.
368 * If we need to insert and there isn't room, split the node, then
369 * decide which fragment to insert the new block from below into.
370 * Note that we may split the root this way, but we need more fixup.
372 max
= state
->path
.active
- 1;
373 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
374 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
375 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
377 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
378 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
379 oldblk
= &state
->path
.blk
[i
];
380 newblk
= &state
->altpath
.blk
[i
];
383 * If a leaf node then
384 * Allocate a new leaf node, then rebalance across them.
385 * else if an intermediate node then
386 * We split on the last layer, must we split the node?
388 switch (oldblk
->magic
) {
389 case XFS_ATTR_LEAF_MAGIC
:
390 error
= xfs_attr3_leaf_split(state
, oldblk
, newblk
);
391 if ((error
!= 0) && (error
!= -ENOSPC
)) {
392 return error
; /* GROT: attr is inconsistent */
399 * Entry wouldn't fit, split the leaf again. The new
400 * extrablk will be consumed by xfs_da3_node_split if
403 state
->extravalid
= 1;
405 state
->extraafter
= 0; /* before newblk */
406 trace_xfs_attr_leaf_split_before(state
->args
);
407 error
= xfs_attr3_leaf_split(state
, oldblk
,
410 state
->extraafter
= 1; /* after newblk */
411 trace_xfs_attr_leaf_split_after(state
->args
);
412 error
= xfs_attr3_leaf_split(state
, newblk
,
416 return error
; /* GROT: attr inconsistent */
419 case XFS_DIR2_LEAFN_MAGIC
:
420 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
425 case XFS_DA_NODE_MAGIC
:
426 error
= xfs_da3_node_split(state
, oldblk
, newblk
, addblk
,
430 return error
; /* GROT: dir is inconsistent */
432 * Record the newly split block for the next time thru?
442 * Update the btree to show the new hashval for this child.
444 xfs_da3_fixhashpath(state
, &state
->path
);
450 * xfs_da3_node_split() should have consumed any extra blocks we added
451 * during a double leaf split in the attr fork. This is guaranteed as
452 * we can't be here if the attr fork only has a single leaf block.
454 ASSERT(state
->extravalid
== 0 ||
455 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
458 * Split the root node.
460 ASSERT(state
->path
.active
== 0);
461 oldblk
= &state
->path
.blk
[0];
462 error
= xfs_da3_root_split(state
, oldblk
, addblk
);
465 return error
; /* GROT: dir is inconsistent */
469 * Update pointers to the node which used to be block 0 and just got
470 * bumped because of the addition of a new root node. Note that the
471 * original block 0 could be at any position in the list of blocks in
474 * Note: the magic numbers and sibling pointers are in the same physical
475 * place for both v2 and v3 headers (by design). Hence it doesn't matter
476 * which version of the xfs_da_intnode structure we use here as the
477 * result will be the same using either structure.
479 node
= oldblk
->bp
->b_addr
;
480 if (node
->hdr
.info
.forw
) {
481 ASSERT(be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
);
482 node
= addblk
->bp
->b_addr
;
483 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
484 xfs_trans_log_buf(state
->args
->trans
, addblk
->bp
,
485 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
486 sizeof(node
->hdr
.info
)));
488 node
= oldblk
->bp
->b_addr
;
489 if (node
->hdr
.info
.back
) {
490 ASSERT(be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
);
491 node
= addblk
->bp
->b_addr
;
492 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
493 xfs_trans_log_buf(state
->args
->trans
, addblk
->bp
,
494 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
495 sizeof(node
->hdr
.info
)));
502 * Split the root. We have to create a new root and point to the two
503 * parts (the split old root) that we just created. Copy block zero to
504 * the EOF, extending the inode in process.
506 STATIC
int /* error */
508 struct xfs_da_state
*state
,
509 struct xfs_da_state_blk
*blk1
,
510 struct xfs_da_state_blk
*blk2
)
512 struct xfs_da_intnode
*node
;
513 struct xfs_da_intnode
*oldroot
;
514 struct xfs_da_node_entry
*btree
;
515 struct xfs_da3_icnode_hdr nodehdr
;
516 struct xfs_da_args
*args
;
518 struct xfs_inode
*dp
;
519 struct xfs_trans
*tp
;
520 struct xfs_dir2_leaf
*leaf
;
526 trace_xfs_da_root_split(state
->args
);
529 * Copy the existing (incorrect) block from the root node position
530 * to a free space somewhere.
533 error
= xfs_da_grow_inode(args
, &blkno
);
539 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
543 oldroot
= blk1
->bp
->b_addr
;
544 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
545 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
)) {
546 struct xfs_da3_icnode_hdr icnodehdr
;
548 dp
->d_ops
->node_hdr_from_disk(&icnodehdr
, oldroot
);
549 btree
= dp
->d_ops
->node_tree_p(oldroot
);
550 size
= (int)((char *)&btree
[icnodehdr
.count
] - (char *)oldroot
);
551 level
= icnodehdr
.level
;
554 * we are about to copy oldroot to bp, so set up the type
555 * of bp while we know exactly what it will be.
557 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
559 struct xfs_dir3_icleaf_hdr leafhdr
;
560 struct xfs_dir2_leaf_entry
*ents
;
562 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
563 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, leaf
);
564 ents
= dp
->d_ops
->leaf_ents_p(leaf
);
566 ASSERT(leafhdr
.magic
== XFS_DIR2_LEAFN_MAGIC
||
567 leafhdr
.magic
== XFS_DIR3_LEAFN_MAGIC
);
568 size
= (int)((char *)&ents
[leafhdr
.count
] - (char *)leaf
);
572 * we are about to copy oldroot to bp, so set up the type
573 * of bp while we know exactly what it will be.
575 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DIR_LEAFN_BUF
);
579 * we can copy most of the information in the node from one block to
580 * another, but for CRC enabled headers we have to make sure that the
581 * block specific identifiers are kept intact. We update the buffer
584 memcpy(node
, oldroot
, size
);
585 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
586 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
587 struct xfs_da3_intnode
*node3
= (struct xfs_da3_intnode
*)node
;
589 node3
->hdr
.info
.blkno
= cpu_to_be64(bp
->b_bn
);
591 xfs_trans_log_buf(tp
, bp
, 0, size
- 1);
593 bp
->b_ops
= blk1
->bp
->b_ops
;
594 xfs_trans_buf_copy_type(bp
, blk1
->bp
);
599 * Set up the new root node.
601 error
= xfs_da3_node_create(args
,
602 (args
->whichfork
== XFS_DATA_FORK
) ? args
->geo
->leafblk
: 0,
603 level
+ 1, &bp
, args
->whichfork
);
608 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
609 btree
= dp
->d_ops
->node_tree_p(node
);
610 btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
611 btree
[0].before
= cpu_to_be32(blk1
->blkno
);
612 btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
613 btree
[1].before
= cpu_to_be32(blk2
->blkno
);
615 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
618 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
619 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
620 ASSERT(blk1
->blkno
>= args
->geo
->leafblk
&&
621 blk1
->blkno
< args
->geo
->freeblk
);
622 ASSERT(blk2
->blkno
>= args
->geo
->leafblk
&&
623 blk2
->blkno
< args
->geo
->freeblk
);
627 /* Header is already logged by xfs_da_node_create */
628 xfs_trans_log_buf(tp
, bp
,
629 XFS_DA_LOGRANGE(node
, btree
, sizeof(xfs_da_node_entry_t
) * 2));
635 * Split the node, rebalance, then add the new entry.
637 STATIC
int /* error */
639 struct xfs_da_state
*state
,
640 struct xfs_da_state_blk
*oldblk
,
641 struct xfs_da_state_blk
*newblk
,
642 struct xfs_da_state_blk
*addblk
,
646 struct xfs_da_intnode
*node
;
647 struct xfs_da3_icnode_hdr nodehdr
;
652 struct xfs_inode
*dp
= state
->args
->dp
;
654 trace_xfs_da_node_split(state
->args
);
656 node
= oldblk
->bp
->b_addr
;
657 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
660 * With V2 dirs the extra block is data or freespace.
662 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
663 newcount
= 1 + useextra
;
665 * Do we have to split the node?
667 if (nodehdr
.count
+ newcount
> state
->args
->geo
->node_ents
) {
669 * Allocate a new node, add to the doubly linked chain of
670 * nodes, then move some of our excess entries into it.
672 error
= xfs_da_grow_inode(state
->args
, &blkno
);
674 return error
; /* GROT: dir is inconsistent */
676 error
= xfs_da3_node_create(state
->args
, blkno
, treelevel
,
677 &newblk
->bp
, state
->args
->whichfork
);
679 return error
; /* GROT: dir is inconsistent */
680 newblk
->blkno
= blkno
;
681 newblk
->magic
= XFS_DA_NODE_MAGIC
;
682 xfs_da3_node_rebalance(state
, oldblk
, newblk
);
683 error
= xfs_da3_blk_link(state
, oldblk
, newblk
);
692 * Insert the new entry(s) into the correct block
693 * (updating last hashval in the process).
695 * xfs_da3_node_add() inserts BEFORE the given index,
696 * and as a result of using node_lookup_int() we always
697 * point to a valid entry (not after one), but a split
698 * operation always results in a new block whose hashvals
699 * FOLLOW the current block.
701 * If we had double-split op below us, then add the extra block too.
703 node
= oldblk
->bp
->b_addr
;
704 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
705 if (oldblk
->index
<= nodehdr
.count
) {
707 xfs_da3_node_add(state
, oldblk
, addblk
);
709 if (state
->extraafter
)
711 xfs_da3_node_add(state
, oldblk
, &state
->extrablk
);
712 state
->extravalid
= 0;
716 xfs_da3_node_add(state
, newblk
, addblk
);
718 if (state
->extraafter
)
720 xfs_da3_node_add(state
, newblk
, &state
->extrablk
);
721 state
->extravalid
= 0;
729 * Balance the btree elements between two intermediate nodes,
730 * usually one full and one empty.
732 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
735 xfs_da3_node_rebalance(
736 struct xfs_da_state
*state
,
737 struct xfs_da_state_blk
*blk1
,
738 struct xfs_da_state_blk
*blk2
)
740 struct xfs_da_intnode
*node1
;
741 struct xfs_da_intnode
*node2
;
742 struct xfs_da_intnode
*tmpnode
;
743 struct xfs_da_node_entry
*btree1
;
744 struct xfs_da_node_entry
*btree2
;
745 struct xfs_da_node_entry
*btree_s
;
746 struct xfs_da_node_entry
*btree_d
;
747 struct xfs_da3_icnode_hdr nodehdr1
;
748 struct xfs_da3_icnode_hdr nodehdr2
;
749 struct xfs_trans
*tp
;
753 struct xfs_inode
*dp
= state
->args
->dp
;
755 trace_xfs_da_node_rebalance(state
->args
);
757 node1
= blk1
->bp
->b_addr
;
758 node2
= blk2
->bp
->b_addr
;
759 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
760 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
761 btree1
= dp
->d_ops
->node_tree_p(node1
);
762 btree2
= dp
->d_ops
->node_tree_p(node2
);
765 * Figure out how many entries need to move, and in which direction.
766 * Swap the nodes around if that makes it simpler.
768 if (nodehdr1
.count
> 0 && nodehdr2
.count
> 0 &&
769 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
770 (be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
) <
771 be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
)))) {
775 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
776 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
777 btree1
= dp
->d_ops
->node_tree_p(node1
);
778 btree2
= dp
->d_ops
->node_tree_p(node2
);
782 count
= (nodehdr1
.count
- nodehdr2
.count
) / 2;
785 tp
= state
->args
->trans
;
787 * Two cases: high-to-low and low-to-high.
791 * Move elements in node2 up to make a hole.
793 tmp
= nodehdr2
.count
;
795 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
796 btree_s
= &btree2
[0];
797 btree_d
= &btree2
[count
];
798 memmove(btree_d
, btree_s
, tmp
);
802 * Move the req'd B-tree elements from high in node1 to
805 nodehdr2
.count
+= count
;
806 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
807 btree_s
= &btree1
[nodehdr1
.count
- count
];
808 btree_d
= &btree2
[0];
809 memcpy(btree_d
, btree_s
, tmp
);
810 nodehdr1
.count
-= count
;
813 * Move the req'd B-tree elements from low in node2 to
817 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
818 btree_s
= &btree2
[0];
819 btree_d
= &btree1
[nodehdr1
.count
];
820 memcpy(btree_d
, btree_s
, tmp
);
821 nodehdr1
.count
+= count
;
823 xfs_trans_log_buf(tp
, blk1
->bp
,
824 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
827 * Move elements in node2 down to fill the hole.
829 tmp
= nodehdr2
.count
- count
;
830 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
831 btree_s
= &btree2
[count
];
832 btree_d
= &btree2
[0];
833 memmove(btree_d
, btree_s
, tmp
);
834 nodehdr2
.count
-= count
;
838 * Log header of node 1 and all current bits of node 2.
840 dp
->d_ops
->node_hdr_to_disk(node1
, &nodehdr1
);
841 xfs_trans_log_buf(tp
, blk1
->bp
,
842 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, dp
->d_ops
->node_hdr_size
));
844 dp
->d_ops
->node_hdr_to_disk(node2
, &nodehdr2
);
845 xfs_trans_log_buf(tp
, blk2
->bp
,
846 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
847 dp
->d_ops
->node_hdr_size
+
848 (sizeof(btree2
[0]) * nodehdr2
.count
)));
851 * Record the last hashval from each block for upward propagation.
852 * (note: don't use the swapped node pointers)
855 node1
= blk1
->bp
->b_addr
;
856 node2
= blk2
->bp
->b_addr
;
857 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
858 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
859 btree1
= dp
->d_ops
->node_tree_p(node1
);
860 btree2
= dp
->d_ops
->node_tree_p(node2
);
862 blk1
->hashval
= be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
);
863 blk2
->hashval
= be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
);
866 * Adjust the expected index for insertion.
868 if (blk1
->index
>= nodehdr1
.count
) {
869 blk2
->index
= blk1
->index
- nodehdr1
.count
;
870 blk1
->index
= nodehdr1
.count
+ 1; /* make it invalid */
875 * Add a new entry to an intermediate node.
879 struct xfs_da_state
*state
,
880 struct xfs_da_state_blk
*oldblk
,
881 struct xfs_da_state_blk
*newblk
)
883 struct xfs_da_intnode
*node
;
884 struct xfs_da3_icnode_hdr nodehdr
;
885 struct xfs_da_node_entry
*btree
;
887 struct xfs_inode
*dp
= state
->args
->dp
;
889 trace_xfs_da_node_add(state
->args
);
891 node
= oldblk
->bp
->b_addr
;
892 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
893 btree
= dp
->d_ops
->node_tree_p(node
);
895 ASSERT(oldblk
->index
>= 0 && oldblk
->index
<= nodehdr
.count
);
896 ASSERT(newblk
->blkno
!= 0);
897 if (state
->args
->whichfork
== XFS_DATA_FORK
)
898 ASSERT(newblk
->blkno
>= state
->args
->geo
->leafblk
&&
899 newblk
->blkno
< state
->args
->geo
->freeblk
);
902 * We may need to make some room before we insert the new node.
905 if (oldblk
->index
< nodehdr
.count
) {
906 tmp
= (nodehdr
.count
- oldblk
->index
) * (uint
)sizeof(*btree
);
907 memmove(&btree
[oldblk
->index
+ 1], &btree
[oldblk
->index
], tmp
);
909 btree
[oldblk
->index
].hashval
= cpu_to_be32(newblk
->hashval
);
910 btree
[oldblk
->index
].before
= cpu_to_be32(newblk
->blkno
);
911 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
912 XFS_DA_LOGRANGE(node
, &btree
[oldblk
->index
],
913 tmp
+ sizeof(*btree
)));
916 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
917 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
918 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
921 * Copy the last hash value from the oldblk to propagate upwards.
923 oldblk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
926 /*========================================================================
927 * Routines used for shrinking the Btree.
928 *========================================================================*/
931 * Deallocate an empty leaf node, remove it from its parent,
932 * possibly deallocating that block, etc...
936 struct xfs_da_state
*state
)
938 struct xfs_da_state_blk
*drop_blk
;
939 struct xfs_da_state_blk
*save_blk
;
943 trace_xfs_da_join(state
->args
);
945 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
946 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
947 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
948 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
949 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
952 * Walk back up the tree joining/deallocating as necessary.
953 * When we stop dropping blocks, break out.
955 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
956 state
->path
.active
--) {
958 * See if we can combine the block with a neighbor.
959 * (action == 0) => no options, just leave
960 * (action == 1) => coalesce, then unlink
961 * (action == 2) => block empty, unlink it
963 switch (drop_blk
->magic
) {
964 case XFS_ATTR_LEAF_MAGIC
:
965 error
= xfs_attr3_leaf_toosmall(state
, &action
);
970 xfs_attr3_leaf_unbalance(state
, drop_blk
, save_blk
);
972 case XFS_DIR2_LEAFN_MAGIC
:
973 error
= xfs_dir2_leafn_toosmall(state
, &action
);
978 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
980 case XFS_DA_NODE_MAGIC
:
982 * Remove the offending node, fixup hashvals,
983 * check for a toosmall neighbor.
985 xfs_da3_node_remove(state
, drop_blk
);
986 xfs_da3_fixhashpath(state
, &state
->path
);
987 error
= xfs_da3_node_toosmall(state
, &action
);
992 xfs_da3_node_unbalance(state
, drop_blk
, save_blk
);
995 xfs_da3_fixhashpath(state
, &state
->altpath
);
996 error
= xfs_da3_blk_unlink(state
, drop_blk
, save_blk
);
997 xfs_da_state_kill_altpath(state
);
1000 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
1002 drop_blk
->bp
= NULL
;
1007 * We joined all the way to the top. If it turns out that
1008 * we only have one entry in the root, make the child block
1011 xfs_da3_node_remove(state
, drop_blk
);
1012 xfs_da3_fixhashpath(state
, &state
->path
);
1013 error
= xfs_da3_root_join(state
, &state
->path
.blk
[0]);
1019 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
1021 __be16 magic
= blkinfo
->magic
;
1024 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1025 magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1026 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1027 magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1029 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1030 magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
));
1032 ASSERT(!blkinfo
->forw
);
1033 ASSERT(!blkinfo
->back
);
1036 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1040 * We have only one entry in the root. Copy the only remaining child of
1041 * the old root to block 0 as the new root node.
1045 struct xfs_da_state
*state
,
1046 struct xfs_da_state_blk
*root_blk
)
1048 struct xfs_da_intnode
*oldroot
;
1049 struct xfs_da_args
*args
;
1052 struct xfs_da3_icnode_hdr oldroothdr
;
1053 struct xfs_da_node_entry
*btree
;
1055 struct xfs_inode
*dp
= state
->args
->dp
;
1057 trace_xfs_da_root_join(state
->args
);
1059 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
1062 oldroot
= root_blk
->bp
->b_addr
;
1063 dp
->d_ops
->node_hdr_from_disk(&oldroothdr
, oldroot
);
1064 ASSERT(oldroothdr
.forw
== 0);
1065 ASSERT(oldroothdr
.back
== 0);
1068 * If the root has more than one child, then don't do anything.
1070 if (oldroothdr
.count
> 1)
1074 * Read in the (only) child block, then copy those bytes into
1075 * the root block's buffer and free the original child block.
1077 btree
= dp
->d_ops
->node_tree_p(oldroot
);
1078 child
= be32_to_cpu(btree
[0].before
);
1080 error
= xfs_da3_node_read(args
->trans
, dp
, child
, -1, &bp
,
1084 xfs_da_blkinfo_onlychild_validate(bp
->b_addr
, oldroothdr
.level
);
1087 * This could be copying a leaf back into the root block in the case of
1088 * there only being a single leaf block left in the tree. Hence we have
1089 * to update the b_ops pointer as well to match the buffer type change
1090 * that could occur. For dir3 blocks we also need to update the block
1091 * number in the buffer header.
1093 memcpy(root_blk
->bp
->b_addr
, bp
->b_addr
, args
->geo
->blksize
);
1094 root_blk
->bp
->b_ops
= bp
->b_ops
;
1095 xfs_trans_buf_copy_type(root_blk
->bp
, bp
);
1096 if (oldroothdr
.magic
== XFS_DA3_NODE_MAGIC
) {
1097 struct xfs_da3_blkinfo
*da3
= root_blk
->bp
->b_addr
;
1098 da3
->blkno
= cpu_to_be64(root_blk
->bp
->b_bn
);
1100 xfs_trans_log_buf(args
->trans
, root_blk
->bp
, 0,
1101 args
->geo
->blksize
- 1);
1102 error
= xfs_da_shrink_inode(args
, child
, bp
);
1107 * Check a node block and its neighbors to see if the block should be
1108 * collapsed into one or the other neighbor. Always keep the block
1109 * with the smaller block number.
1110 * If the current block is over 50% full, don't try to join it, return 0.
1111 * If the block is empty, fill in the state structure and return 2.
1112 * If it can be collapsed, fill in the state structure and return 1.
1113 * If nothing can be done, return 0.
1116 xfs_da3_node_toosmall(
1117 struct xfs_da_state
*state
,
1120 struct xfs_da_intnode
*node
;
1121 struct xfs_da_state_blk
*blk
;
1122 struct xfs_da_blkinfo
*info
;
1125 struct xfs_da3_icnode_hdr nodehdr
;
1131 struct xfs_inode
*dp
= state
->args
->dp
;
1133 trace_xfs_da_node_toosmall(state
->args
);
1136 * Check for the degenerate case of the block being over 50% full.
1137 * If so, it's not worth even looking to see if we might be able
1138 * to coalesce with a sibling.
1140 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
1141 info
= blk
->bp
->b_addr
;
1142 node
= (xfs_da_intnode_t
*)info
;
1143 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1144 if (nodehdr
.count
> (state
->args
->geo
->node_ents
>> 1)) {
1145 *action
= 0; /* blk over 50%, don't try to join */
1146 return 0; /* blk over 50%, don't try to join */
1150 * Check for the degenerate case of the block being empty.
1151 * If the block is empty, we'll simply delete it, no need to
1152 * coalesce it with a sibling block. We choose (arbitrarily)
1153 * to merge with the forward block unless it is NULL.
1155 if (nodehdr
.count
== 0) {
1157 * Make altpath point to the block we want to keep and
1158 * path point to the block we want to drop (this one).
1160 forward
= (info
->forw
!= 0);
1161 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1162 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1175 * Examine each sibling block to see if we can coalesce with
1176 * at least 25% free space to spare. We need to figure out
1177 * whether to merge with the forward or the backward block.
1178 * We prefer coalescing with the lower numbered sibling so as
1179 * to shrink a directory over time.
1181 count
= state
->args
->geo
->node_ents
;
1182 count
-= state
->args
->geo
->node_ents
>> 2;
1183 count
-= nodehdr
.count
;
1185 /* start with smaller blk num */
1186 forward
= nodehdr
.forw
< nodehdr
.back
;
1187 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
1188 struct xfs_da3_icnode_hdr thdr
;
1190 blkno
= nodehdr
.forw
;
1192 blkno
= nodehdr
.back
;
1195 error
= xfs_da3_node_read(state
->args
->trans
, dp
,
1196 blkno
, -1, &bp
, state
->args
->whichfork
);
1201 dp
->d_ops
->node_hdr_from_disk(&thdr
, node
);
1202 xfs_trans_brelse(state
->args
->trans
, bp
);
1204 if (count
- thdr
.count
>= 0)
1205 break; /* fits with at least 25% to spare */
1213 * Make altpath point to the block we want to keep (the lower
1214 * numbered block) and path point to the block we want to drop.
1216 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1217 if (blkno
< blk
->blkno
) {
1218 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1221 error
= xfs_da3_path_shift(state
, &state
->path
, forward
,
1235 * Pick up the last hashvalue from an intermediate node.
1238 xfs_da3_node_lasthash(
1239 struct xfs_inode
*dp
,
1243 struct xfs_da_intnode
*node
;
1244 struct xfs_da_node_entry
*btree
;
1245 struct xfs_da3_icnode_hdr nodehdr
;
1248 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1250 *count
= nodehdr
.count
;
1253 btree
= dp
->d_ops
->node_tree_p(node
);
1254 return be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1258 * Walk back up the tree adjusting hash values as necessary,
1259 * when we stop making changes, return.
1262 xfs_da3_fixhashpath(
1263 struct xfs_da_state
*state
,
1264 struct xfs_da_state_path
*path
)
1266 struct xfs_da_state_blk
*blk
;
1267 struct xfs_da_intnode
*node
;
1268 struct xfs_da_node_entry
*btree
;
1269 xfs_dahash_t lasthash
=0;
1272 struct xfs_inode
*dp
= state
->args
->dp
;
1274 trace_xfs_da_fixhashpath(state
->args
);
1276 level
= path
->active
-1;
1277 blk
= &path
->blk
[ level
];
1278 switch (blk
->magic
) {
1279 case XFS_ATTR_LEAF_MAGIC
:
1280 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
1284 case XFS_DIR2_LEAFN_MAGIC
:
1285 lasthash
= xfs_dir2_leafn_lasthash(dp
, blk
->bp
, &count
);
1289 case XFS_DA_NODE_MAGIC
:
1290 lasthash
= xfs_da3_node_lasthash(dp
, blk
->bp
, &count
);
1295 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
1296 struct xfs_da3_icnode_hdr nodehdr
;
1298 node
= blk
->bp
->b_addr
;
1299 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1300 btree
= dp
->d_ops
->node_tree_p(node
);
1301 if (be32_to_cpu(btree
[blk
->index
].hashval
) == lasthash
)
1303 blk
->hashval
= lasthash
;
1304 btree
[blk
->index
].hashval
= cpu_to_be32(lasthash
);
1305 xfs_trans_log_buf(state
->args
->trans
, blk
->bp
,
1306 XFS_DA_LOGRANGE(node
, &btree
[blk
->index
],
1309 lasthash
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1314 * Remove an entry from an intermediate node.
1317 xfs_da3_node_remove(
1318 struct xfs_da_state
*state
,
1319 struct xfs_da_state_blk
*drop_blk
)
1321 struct xfs_da_intnode
*node
;
1322 struct xfs_da3_icnode_hdr nodehdr
;
1323 struct xfs_da_node_entry
*btree
;
1326 struct xfs_inode
*dp
= state
->args
->dp
;
1328 trace_xfs_da_node_remove(state
->args
);
1330 node
= drop_blk
->bp
->b_addr
;
1331 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1332 ASSERT(drop_blk
->index
< nodehdr
.count
);
1333 ASSERT(drop_blk
->index
>= 0);
1336 * Copy over the offending entry, or just zero it out.
1338 index
= drop_blk
->index
;
1339 btree
= dp
->d_ops
->node_tree_p(node
);
1340 if (index
< nodehdr
.count
- 1) {
1341 tmp
= nodehdr
.count
- index
- 1;
1342 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
1343 memmove(&btree
[index
], &btree
[index
+ 1], tmp
);
1344 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1345 XFS_DA_LOGRANGE(node
, &btree
[index
], tmp
));
1346 index
= nodehdr
.count
- 1;
1348 memset(&btree
[index
], 0, sizeof(xfs_da_node_entry_t
));
1349 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1350 XFS_DA_LOGRANGE(node
, &btree
[index
], sizeof(btree
[index
])));
1352 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
1353 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1354 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
1357 * Copy the last hash value from the block to propagate upwards.
1359 drop_blk
->hashval
= be32_to_cpu(btree
[index
- 1].hashval
);
1363 * Unbalance the elements between two intermediate nodes,
1364 * move all Btree elements from one node into another.
1367 xfs_da3_node_unbalance(
1368 struct xfs_da_state
*state
,
1369 struct xfs_da_state_blk
*drop_blk
,
1370 struct xfs_da_state_blk
*save_blk
)
1372 struct xfs_da_intnode
*drop_node
;
1373 struct xfs_da_intnode
*save_node
;
1374 struct xfs_da_node_entry
*drop_btree
;
1375 struct xfs_da_node_entry
*save_btree
;
1376 struct xfs_da3_icnode_hdr drop_hdr
;
1377 struct xfs_da3_icnode_hdr save_hdr
;
1378 struct xfs_trans
*tp
;
1381 struct xfs_inode
*dp
= state
->args
->dp
;
1383 trace_xfs_da_node_unbalance(state
->args
);
1385 drop_node
= drop_blk
->bp
->b_addr
;
1386 save_node
= save_blk
->bp
->b_addr
;
1387 dp
->d_ops
->node_hdr_from_disk(&drop_hdr
, drop_node
);
1388 dp
->d_ops
->node_hdr_from_disk(&save_hdr
, save_node
);
1389 drop_btree
= dp
->d_ops
->node_tree_p(drop_node
);
1390 save_btree
= dp
->d_ops
->node_tree_p(save_node
);
1391 tp
= state
->args
->trans
;
1394 * If the dying block has lower hashvals, then move all the
1395 * elements in the remaining block up to make a hole.
1397 if ((be32_to_cpu(drop_btree
[0].hashval
) <
1398 be32_to_cpu(save_btree
[0].hashval
)) ||
1399 (be32_to_cpu(drop_btree
[drop_hdr
.count
- 1].hashval
) <
1400 be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
))) {
1401 /* XXX: check this - is memmove dst correct? */
1402 tmp
= save_hdr
.count
* sizeof(xfs_da_node_entry_t
);
1403 memmove(&save_btree
[drop_hdr
.count
], &save_btree
[0], tmp
);
1406 xfs_trans_log_buf(tp
, save_blk
->bp
,
1407 XFS_DA_LOGRANGE(save_node
, &save_btree
[0],
1408 (save_hdr
.count
+ drop_hdr
.count
) *
1409 sizeof(xfs_da_node_entry_t
)));
1411 sindex
= save_hdr
.count
;
1412 xfs_trans_log_buf(tp
, save_blk
->bp
,
1413 XFS_DA_LOGRANGE(save_node
, &save_btree
[sindex
],
1414 drop_hdr
.count
* sizeof(xfs_da_node_entry_t
)));
1418 * Move all the B-tree elements from drop_blk to save_blk.
1420 tmp
= drop_hdr
.count
* (uint
)sizeof(xfs_da_node_entry_t
);
1421 memcpy(&save_btree
[sindex
], &drop_btree
[0], tmp
);
1422 save_hdr
.count
+= drop_hdr
.count
;
1424 dp
->d_ops
->node_hdr_to_disk(save_node
, &save_hdr
);
1425 xfs_trans_log_buf(tp
, save_blk
->bp
,
1426 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1427 dp
->d_ops
->node_hdr_size
));
1430 * Save the last hashval in the remaining block for upward propagation.
1432 save_blk
->hashval
= be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
);
1435 /*========================================================================
1436 * Routines used for finding things in the Btree.
1437 *========================================================================*/
1440 * Walk down the Btree looking for a particular filename, filling
1441 * in the state structure as we go.
1443 * We will set the state structure to point to each of the elements
1444 * in each of the nodes where either the hashval is or should be.
1446 * We support duplicate hashval's so for each entry in the current
1447 * node that could contain the desired hashval, descend. This is a
1448 * pruned depth-first tree search.
1451 xfs_da3_node_lookup_int(
1452 struct xfs_da_state
*state
,
1455 struct xfs_da_state_blk
*blk
;
1456 struct xfs_da_blkinfo
*curr
;
1457 struct xfs_da_intnode
*node
;
1458 struct xfs_da_node_entry
*btree
;
1459 struct xfs_da3_icnode_hdr nodehdr
;
1460 struct xfs_da_args
*args
;
1462 xfs_dahash_t hashval
;
1463 xfs_dahash_t btreehashval
;
1469 struct xfs_inode
*dp
= state
->args
->dp
;
1474 * Descend thru the B-tree searching each level for the right
1475 * node to use, until the right hashval is found.
1477 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? args
->geo
->leafblk
: 0;
1478 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1479 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1480 blk
++, state
->path
.active
++) {
1482 * Read the next node down in the tree.
1485 error
= xfs_da3_node_read(args
->trans
, args
->dp
, blkno
,
1486 -1, &blk
->bp
, args
->whichfork
);
1489 state
->path
.active
--;
1492 curr
= blk
->bp
->b_addr
;
1493 blk
->magic
= be16_to_cpu(curr
->magic
);
1495 if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1496 blk
->magic
== XFS_ATTR3_LEAF_MAGIC
) {
1497 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1498 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1502 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1503 blk
->magic
== XFS_DIR3_LEAFN_MAGIC
) {
1504 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1505 blk
->hashval
= xfs_dir2_leafn_lasthash(args
->dp
,
1510 blk
->magic
= XFS_DA_NODE_MAGIC
;
1514 * Search an intermediate node for a match.
1516 node
= blk
->bp
->b_addr
;
1517 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1518 btree
= dp
->d_ops
->node_tree_p(node
);
1520 max
= nodehdr
.count
;
1521 blk
->hashval
= be32_to_cpu(btree
[max
- 1].hashval
);
1524 * Binary search. (note: small blocks will skip loop)
1526 probe
= span
= max
/ 2;
1527 hashval
= args
->hashval
;
1530 btreehashval
= be32_to_cpu(btree
[probe
].hashval
);
1531 if (btreehashval
< hashval
)
1533 else if (btreehashval
> hashval
)
1538 ASSERT((probe
>= 0) && (probe
< max
));
1539 ASSERT((span
<= 4) ||
1540 (be32_to_cpu(btree
[probe
].hashval
) == hashval
));
1543 * Since we may have duplicate hashval's, find the first
1544 * matching hashval in the node.
1547 be32_to_cpu(btree
[probe
].hashval
) >= hashval
) {
1550 while (probe
< max
&&
1551 be32_to_cpu(btree
[probe
].hashval
) < hashval
) {
1556 * Pick the right block to descend on.
1559 blk
->index
= max
- 1;
1560 blkno
= be32_to_cpu(btree
[max
- 1].before
);
1563 blkno
= be32_to_cpu(btree
[probe
].before
);
1568 * A leaf block that ends in the hashval that we are interested in
1569 * (final hashval == search hashval) means that the next block may
1570 * contain more entries with the same hashval, shift upward to the
1571 * next leaf and keep searching.
1574 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1575 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1576 &blk
->index
, state
);
1577 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1578 retval
= xfs_attr3_leaf_lookup_int(blk
->bp
, args
);
1579 blk
->index
= args
->index
;
1580 args
->blkno
= blk
->blkno
;
1583 return -EFSCORRUPTED
;
1585 if (((retval
== -ENOENT
) || (retval
== -ENOATTR
)) &&
1586 (blk
->hashval
== args
->hashval
)) {
1587 error
= xfs_da3_path_shift(state
, &state
->path
, 1, 1,
1593 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1594 /* path_shift() gives ENOENT */
1604 /*========================================================================
1606 *========================================================================*/
1609 * Compare two intermediate nodes for "order".
1613 struct xfs_inode
*dp
,
1614 struct xfs_buf
*node1_bp
,
1615 struct xfs_buf
*node2_bp
)
1617 struct xfs_da_intnode
*node1
;
1618 struct xfs_da_intnode
*node2
;
1619 struct xfs_da_node_entry
*btree1
;
1620 struct xfs_da_node_entry
*btree2
;
1621 struct xfs_da3_icnode_hdr node1hdr
;
1622 struct xfs_da3_icnode_hdr node2hdr
;
1624 node1
= node1_bp
->b_addr
;
1625 node2
= node2_bp
->b_addr
;
1626 dp
->d_ops
->node_hdr_from_disk(&node1hdr
, node1
);
1627 dp
->d_ops
->node_hdr_from_disk(&node2hdr
, node2
);
1628 btree1
= dp
->d_ops
->node_tree_p(node1
);
1629 btree2
= dp
->d_ops
->node_tree_p(node2
);
1631 if (node1hdr
.count
> 0 && node2hdr
.count
> 0 &&
1632 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
1633 (be32_to_cpu(btree2
[node2hdr
.count
- 1].hashval
) <
1634 be32_to_cpu(btree1
[node1hdr
.count
- 1].hashval
)))) {
1641 * Link a new block into a doubly linked list of blocks (of whatever type).
1645 struct xfs_da_state
*state
,
1646 struct xfs_da_state_blk
*old_blk
,
1647 struct xfs_da_state_blk
*new_blk
)
1649 struct xfs_da_blkinfo
*old_info
;
1650 struct xfs_da_blkinfo
*new_info
;
1651 struct xfs_da_blkinfo
*tmp_info
;
1652 struct xfs_da_args
*args
;
1656 struct xfs_inode
*dp
= state
->args
->dp
;
1659 * Set up environment.
1662 ASSERT(args
!= NULL
);
1663 old_info
= old_blk
->bp
->b_addr
;
1664 new_info
= new_blk
->bp
->b_addr
;
1665 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1666 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1667 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1669 switch (old_blk
->magic
) {
1670 case XFS_ATTR_LEAF_MAGIC
:
1671 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1673 case XFS_DIR2_LEAFN_MAGIC
:
1674 before
= xfs_dir2_leafn_order(dp
, old_blk
->bp
, new_blk
->bp
);
1676 case XFS_DA_NODE_MAGIC
:
1677 before
= xfs_da3_node_order(dp
, old_blk
->bp
, new_blk
->bp
);
1682 * Link blocks in appropriate order.
1686 * Link new block in before existing block.
1688 trace_xfs_da_link_before(args
);
1689 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1690 new_info
->back
= old_info
->back
;
1691 if (old_info
->back
) {
1692 error
= xfs_da3_node_read(args
->trans
, dp
,
1693 be32_to_cpu(old_info
->back
),
1694 -1, &bp
, args
->whichfork
);
1698 tmp_info
= bp
->b_addr
;
1699 ASSERT(tmp_info
->magic
== old_info
->magic
);
1700 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1701 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1702 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1704 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1707 * Link new block in after existing block.
1709 trace_xfs_da_link_after(args
);
1710 new_info
->forw
= old_info
->forw
;
1711 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1712 if (old_info
->forw
) {
1713 error
= xfs_da3_node_read(args
->trans
, dp
,
1714 be32_to_cpu(old_info
->forw
),
1715 -1, &bp
, args
->whichfork
);
1719 tmp_info
= bp
->b_addr
;
1720 ASSERT(tmp_info
->magic
== old_info
->magic
);
1721 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1722 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1723 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1725 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1728 xfs_trans_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1729 xfs_trans_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1734 * Unlink a block from a doubly linked list of blocks.
1736 STATIC
int /* error */
1738 struct xfs_da_state
*state
,
1739 struct xfs_da_state_blk
*drop_blk
,
1740 struct xfs_da_state_blk
*save_blk
)
1742 struct xfs_da_blkinfo
*drop_info
;
1743 struct xfs_da_blkinfo
*save_info
;
1744 struct xfs_da_blkinfo
*tmp_info
;
1745 struct xfs_da_args
*args
;
1750 * Set up environment.
1753 ASSERT(args
!= NULL
);
1754 save_info
= save_blk
->bp
->b_addr
;
1755 drop_info
= drop_blk
->bp
->b_addr
;
1756 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1757 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1758 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1759 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1760 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1761 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1762 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1763 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1766 * Unlink the leaf block from the doubly linked chain of leaves.
1768 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1769 trace_xfs_da_unlink_back(args
);
1770 save_info
->back
= drop_info
->back
;
1771 if (drop_info
->back
) {
1772 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1773 be32_to_cpu(drop_info
->back
),
1774 -1, &bp
, args
->whichfork
);
1778 tmp_info
= bp
->b_addr
;
1779 ASSERT(tmp_info
->magic
== save_info
->magic
);
1780 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1781 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1782 xfs_trans_log_buf(args
->trans
, bp
, 0,
1783 sizeof(*tmp_info
) - 1);
1786 trace_xfs_da_unlink_forward(args
);
1787 save_info
->forw
= drop_info
->forw
;
1788 if (drop_info
->forw
) {
1789 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1790 be32_to_cpu(drop_info
->forw
),
1791 -1, &bp
, args
->whichfork
);
1795 tmp_info
= bp
->b_addr
;
1796 ASSERT(tmp_info
->magic
== save_info
->magic
);
1797 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1798 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1799 xfs_trans_log_buf(args
->trans
, bp
, 0,
1800 sizeof(*tmp_info
) - 1);
1804 xfs_trans_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1809 * Move a path "forward" or "!forward" one block at the current level.
1811 * This routine will adjust a "path" to point to the next block
1812 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1813 * Btree, including updating pointers to the intermediate nodes between
1814 * the new bottom and the root.
1818 struct xfs_da_state
*state
,
1819 struct xfs_da_state_path
*path
,
1824 struct xfs_da_state_blk
*blk
;
1825 struct xfs_da_blkinfo
*info
;
1826 struct xfs_da_intnode
*node
;
1827 struct xfs_da_args
*args
;
1828 struct xfs_da_node_entry
*btree
;
1829 struct xfs_da3_icnode_hdr nodehdr
;
1831 xfs_dablk_t blkno
= 0;
1834 struct xfs_inode
*dp
= state
->args
->dp
;
1836 trace_xfs_da_path_shift(state
->args
);
1839 * Roll up the Btree looking for the first block where our
1840 * current index is not at the edge of the block. Note that
1841 * we skip the bottom layer because we want the sibling block.
1844 ASSERT(args
!= NULL
);
1845 ASSERT(path
!= NULL
);
1846 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1847 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1848 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1849 node
= blk
->bp
->b_addr
;
1850 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1851 btree
= dp
->d_ops
->node_tree_p(node
);
1853 if (forward
&& (blk
->index
< nodehdr
.count
- 1)) {
1855 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1857 } else if (!forward
&& (blk
->index
> 0)) {
1859 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1864 *result
= -ENOENT
; /* we're out of our tree */
1865 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1870 * Roll down the edge of the subtree until we reach the
1871 * same depth we were at originally.
1873 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1875 * Read the next child block into a local buffer.
1877 error
= xfs_da3_node_read(args
->trans
, dp
, blkno
, -1, &bp
,
1883 * Release the old block (if it's dirty, the trans doesn't
1884 * actually let go) and swap the local buffer into the path
1885 * structure. This ensures failure of the above read doesn't set
1886 * a NULL buffer in an active slot in the path.
1889 xfs_trans_brelse(args
->trans
, blk
->bp
);
1893 info
= blk
->bp
->b_addr
;
1894 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1895 info
->magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
1896 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1897 info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1898 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1899 info
->magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1903 * Note: we flatten the magic number to a single type so we
1904 * don't have to compare against crc/non-crc types elsewhere.
1906 switch (be16_to_cpu(info
->magic
)) {
1907 case XFS_DA_NODE_MAGIC
:
1908 case XFS_DA3_NODE_MAGIC
:
1909 blk
->magic
= XFS_DA_NODE_MAGIC
;
1910 node
= (xfs_da_intnode_t
*)info
;
1911 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1912 btree
= dp
->d_ops
->node_tree_p(node
);
1913 blk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1917 blk
->index
= nodehdr
.count
- 1;
1918 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1920 case XFS_ATTR_LEAF_MAGIC
:
1921 case XFS_ATTR3_LEAF_MAGIC
:
1922 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1923 ASSERT(level
== path
->active
-1);
1925 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1927 case XFS_DIR2_LEAFN_MAGIC
:
1928 case XFS_DIR3_LEAFN_MAGIC
:
1929 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1930 ASSERT(level
== path
->active
-1);
1932 blk
->hashval
= xfs_dir2_leafn_lasthash(args
->dp
,
1945 /*========================================================================
1947 *========================================================================*/
1950 * Implement a simple hash on a character string.
1951 * Rotate the hash value by 7 bits, then XOR each character in.
1952 * This is implemented with some source-level loop unrolling.
1955 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1960 * Do four characters at a time as long as we can.
1962 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1963 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1964 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1967 * Now do the rest of the characters.
1971 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1974 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1976 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1977 default: /* case 0: */
1984 struct xfs_da_args
*args
,
1985 const unsigned char *name
,
1988 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1989 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1993 xfs_default_hashname(
1994 struct xfs_name
*name
)
1996 return xfs_da_hashname(name
->name
, name
->len
);
1999 const struct xfs_nameops xfs_default_nameops
= {
2000 .hashname
= xfs_default_hashname
,
2001 .compname
= xfs_da_compname
2005 xfs_da_grow_inode_int(
2006 struct xfs_da_args
*args
,
2010 struct xfs_trans
*tp
= args
->trans
;
2011 struct xfs_inode
*dp
= args
->dp
;
2012 int w
= args
->whichfork
;
2013 xfs_rfsblock_t nblks
= dp
->i_d
.di_nblocks
;
2014 struct xfs_bmbt_irec map
, *mapp
;
2015 int nmap
, error
, got
, i
, mapi
;
2018 * Find a spot in the file space to put the new block.
2020 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
2025 * Try mapping it in one filesystem block.
2028 ASSERT(args
->firstblock
!= NULL
);
2029 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
2030 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
2031 args
->firstblock
, args
->total
, &map
, &nmap
,
2040 } else if (nmap
== 0 && count
> 1) {
2045 * If we didn't get it and the block might work if fragmented,
2046 * try without the CONTIG flag. Loop until we get it all.
2048 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
2049 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
2050 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
2051 c
= (int)(*bno
+ count
- b
);
2052 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
2053 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2054 args
->firstblock
, args
->total
,
2055 &mapp
[mapi
], &nmap
, args
->dfops
);
2061 b
= mapp
[mapi
- 1].br_startoff
+
2062 mapp
[mapi
- 1].br_blockcount
;
2070 * Count the blocks we got, make sure it matches the total.
2072 for (i
= 0, got
= 0; i
< mapi
; i
++)
2073 got
+= mapp
[i
].br_blockcount
;
2074 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
2075 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
2081 /* account for newly allocated blocks in reserved blocks total */
2082 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
2091 * Add a block to the btree ahead of the file.
2092 * Return the new block number to the caller.
2096 struct xfs_da_args
*args
,
2097 xfs_dablk_t
*new_blkno
)
2102 trace_xfs_da_grow_inode(args
);
2104 bno
= args
->geo
->leafblk
;
2105 error
= xfs_da_grow_inode_int(args
, &bno
, args
->geo
->fsbcount
);
2107 *new_blkno
= (xfs_dablk_t
)bno
;
2112 * Ick. We need to always be able to remove a btree block, even
2113 * if there's no space reservation because the filesystem is full.
2114 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2115 * It swaps the target block with the last block in the file. The
2116 * last block in the file can always be removed since it can't cause
2117 * a bmap btree split to do that.
2120 xfs_da3_swap_lastblock(
2121 struct xfs_da_args
*args
,
2122 xfs_dablk_t
*dead_blknop
,
2123 struct xfs_buf
**dead_bufp
)
2125 struct xfs_da_blkinfo
*dead_info
;
2126 struct xfs_da_blkinfo
*sib_info
;
2127 struct xfs_da_intnode
*par_node
;
2128 struct xfs_da_intnode
*dead_node
;
2129 struct xfs_dir2_leaf
*dead_leaf2
;
2130 struct xfs_da_node_entry
*btree
;
2131 struct xfs_da3_icnode_hdr par_hdr
;
2132 struct xfs_inode
*dp
;
2133 struct xfs_trans
*tp
;
2134 struct xfs_mount
*mp
;
2135 struct xfs_buf
*dead_buf
;
2136 struct xfs_buf
*last_buf
;
2137 struct xfs_buf
*sib_buf
;
2138 struct xfs_buf
*par_buf
;
2139 xfs_dahash_t dead_hash
;
2140 xfs_fileoff_t lastoff
;
2141 xfs_dablk_t dead_blkno
;
2142 xfs_dablk_t last_blkno
;
2143 xfs_dablk_t sib_blkno
;
2144 xfs_dablk_t par_blkno
;
2151 trace_xfs_da_swap_lastblock(args
);
2153 dead_buf
= *dead_bufp
;
2154 dead_blkno
= *dead_blknop
;
2157 w
= args
->whichfork
;
2158 ASSERT(w
== XFS_DATA_FORK
);
2160 lastoff
= args
->geo
->freeblk
;
2161 error
= xfs_bmap_last_before(tp
, dp
, &lastoff
, w
);
2164 if (unlikely(lastoff
== 0)) {
2165 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
2167 return -EFSCORRUPTED
;
2170 * Read the last block in the btree space.
2172 last_blkno
= (xfs_dablk_t
)lastoff
- args
->geo
->fsbcount
;
2173 error
= xfs_da3_node_read(tp
, dp
, last_blkno
, -1, &last_buf
, w
);
2177 * Copy the last block into the dead buffer and log it.
2179 memcpy(dead_buf
->b_addr
, last_buf
->b_addr
, args
->geo
->blksize
);
2180 xfs_trans_log_buf(tp
, dead_buf
, 0, args
->geo
->blksize
- 1);
2181 dead_info
= dead_buf
->b_addr
;
2183 * Get values from the moved block.
2185 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
2186 dead_info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
2187 struct xfs_dir3_icleaf_hdr leafhdr
;
2188 struct xfs_dir2_leaf_entry
*ents
;
2190 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
2191 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, dead_leaf2
);
2192 ents
= dp
->d_ops
->leaf_ents_p(dead_leaf2
);
2194 dead_hash
= be32_to_cpu(ents
[leafhdr
.count
- 1].hashval
);
2196 struct xfs_da3_icnode_hdr deadhdr
;
2198 dead_node
= (xfs_da_intnode_t
*)dead_info
;
2199 dp
->d_ops
->node_hdr_from_disk(&deadhdr
, dead_node
);
2200 btree
= dp
->d_ops
->node_tree_p(dead_node
);
2201 dead_level
= deadhdr
.level
;
2202 dead_hash
= be32_to_cpu(btree
[deadhdr
.count
- 1].hashval
);
2204 sib_buf
= par_buf
= NULL
;
2206 * If the moved block has a left sibling, fix up the pointers.
2208 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
2209 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2212 sib_info
= sib_buf
->b_addr
;
2214 be32_to_cpu(sib_info
->forw
) != last_blkno
||
2215 sib_info
->magic
!= dead_info
->magic
)) {
2216 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2217 XFS_ERRLEVEL_LOW
, mp
);
2218 error
= -EFSCORRUPTED
;
2221 sib_info
->forw
= cpu_to_be32(dead_blkno
);
2222 xfs_trans_log_buf(tp
, sib_buf
,
2223 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
2224 sizeof(sib_info
->forw
)));
2228 * If the moved block has a right sibling, fix up the pointers.
2230 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
2231 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2234 sib_info
= sib_buf
->b_addr
;
2236 be32_to_cpu(sib_info
->back
) != last_blkno
||
2237 sib_info
->magic
!= dead_info
->magic
)) {
2238 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2239 XFS_ERRLEVEL_LOW
, mp
);
2240 error
= -EFSCORRUPTED
;
2243 sib_info
->back
= cpu_to_be32(dead_blkno
);
2244 xfs_trans_log_buf(tp
, sib_buf
,
2245 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
2246 sizeof(sib_info
->back
)));
2249 par_blkno
= args
->geo
->leafblk
;
2252 * Walk down the tree looking for the parent of the moved block.
2255 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2258 par_node
= par_buf
->b_addr
;
2259 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2260 if (level
>= 0 && level
!= par_hdr
.level
+ 1) {
2261 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2262 XFS_ERRLEVEL_LOW
, mp
);
2263 error
= -EFSCORRUPTED
;
2266 level
= par_hdr
.level
;
2267 btree
= dp
->d_ops
->node_tree_p(par_node
);
2269 entno
< par_hdr
.count
&&
2270 be32_to_cpu(btree
[entno
].hashval
) < dead_hash
;
2273 if (entno
== par_hdr
.count
) {
2274 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2275 XFS_ERRLEVEL_LOW
, mp
);
2276 error
= -EFSCORRUPTED
;
2279 par_blkno
= be32_to_cpu(btree
[entno
].before
);
2280 if (level
== dead_level
+ 1)
2282 xfs_trans_brelse(tp
, par_buf
);
2286 * We're in the right parent block.
2287 * Look for the right entry.
2291 entno
< par_hdr
.count
&&
2292 be32_to_cpu(btree
[entno
].before
) != last_blkno
;
2295 if (entno
< par_hdr
.count
)
2297 par_blkno
= par_hdr
.forw
;
2298 xfs_trans_brelse(tp
, par_buf
);
2300 if (unlikely(par_blkno
== 0)) {
2301 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2302 XFS_ERRLEVEL_LOW
, mp
);
2303 error
= -EFSCORRUPTED
;
2306 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2309 par_node
= par_buf
->b_addr
;
2310 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2311 if (par_hdr
.level
!= level
) {
2312 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2313 XFS_ERRLEVEL_LOW
, mp
);
2314 error
= -EFSCORRUPTED
;
2317 btree
= dp
->d_ops
->node_tree_p(par_node
);
2321 * Update the parent entry pointing to the moved block.
2323 btree
[entno
].before
= cpu_to_be32(dead_blkno
);
2324 xfs_trans_log_buf(tp
, par_buf
,
2325 XFS_DA_LOGRANGE(par_node
, &btree
[entno
].before
,
2326 sizeof(btree
[entno
].before
)));
2327 *dead_blknop
= last_blkno
;
2328 *dead_bufp
= last_buf
;
2332 xfs_trans_brelse(tp
, par_buf
);
2334 xfs_trans_brelse(tp
, sib_buf
);
2335 xfs_trans_brelse(tp
, last_buf
);
2340 * Remove a btree block from a directory or attribute.
2343 xfs_da_shrink_inode(
2344 xfs_da_args_t
*args
,
2345 xfs_dablk_t dead_blkno
,
2346 struct xfs_buf
*dead_buf
)
2349 int done
, error
, w
, count
;
2352 trace_xfs_da_shrink_inode(args
);
2355 w
= args
->whichfork
;
2357 count
= args
->geo
->fsbcount
;
2360 * Remove extents. If we get ENOSPC for a dir we have to move
2361 * the last block to the place we want to kill.
2363 error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
2364 xfs_bmapi_aflag(w
), 0, args
->firstblock
,
2365 args
->dfops
, &done
);
2366 if (error
== -ENOSPC
) {
2367 if (w
!= XFS_DATA_FORK
)
2369 error
= xfs_da3_swap_lastblock(args
, &dead_blkno
,
2377 xfs_trans_binval(tp
, dead_buf
);
2382 * See if the mapping(s) for this btree block are valid, i.e.
2383 * don't contain holes, are logically contiguous, and cover the whole range.
2386 xfs_da_map_covers_blocks(
2388 xfs_bmbt_irec_t
*mapp
,
2395 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
2396 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
2397 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
2400 if (off
!= mapp
[i
].br_startoff
) {
2403 off
+= mapp
[i
].br_blockcount
;
2405 return off
== bno
+ count
;
2409 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2411 * For the single map case, it is assumed that the caller has provided a pointer
2412 * to a valid xfs_buf_map. For the multiple map case, this function will
2413 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2414 * map pointer with the allocated map.
2417 xfs_buf_map_from_irec(
2418 struct xfs_mount
*mp
,
2419 struct xfs_buf_map
**mapp
,
2421 struct xfs_bmbt_irec
*irecs
,
2424 struct xfs_buf_map
*map
;
2427 ASSERT(*nmaps
== 1);
2428 ASSERT(nirecs
>= 1);
2431 map
= kmem_zalloc(nirecs
* sizeof(struct xfs_buf_map
),
2432 KM_SLEEP
| KM_NOFS
);
2440 for (i
= 0; i
< *nmaps
; i
++) {
2441 ASSERT(irecs
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
2442 irecs
[i
].br_startblock
!= HOLESTARTBLOCK
);
2443 map
[i
].bm_bn
= XFS_FSB_TO_DADDR(mp
, irecs
[i
].br_startblock
);
2444 map
[i
].bm_len
= XFS_FSB_TO_BB(mp
, irecs
[i
].br_blockcount
);
2450 * Map the block we are given ready for reading. There are three possible return
2452 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2453 * caller knows not to execute a subsequent read.
2454 * 0 - if we mapped the block successfully
2455 * >0 - positive error number if there was an error.
2459 struct xfs_inode
*dp
,
2461 xfs_daddr_t mappedbno
,
2463 struct xfs_buf_map
**map
,
2466 struct xfs_mount
*mp
= dp
->i_mount
;
2469 struct xfs_bmbt_irec irec
;
2470 struct xfs_bmbt_irec
*irecs
= &irec
;
2473 ASSERT(map
&& *map
);
2474 ASSERT(*nmaps
== 1);
2476 if (whichfork
== XFS_DATA_FORK
)
2477 nfsb
= mp
->m_dir_geo
->fsbcount
;
2479 nfsb
= mp
->m_attr_geo
->fsbcount
;
2482 * Caller doesn't have a mapping. -2 means don't complain
2483 * if we land in a hole.
2485 if (mappedbno
== -1 || mappedbno
== -2) {
2487 * Optimize the one-block case.
2490 irecs
= kmem_zalloc(sizeof(irec
) * nfsb
,
2491 KM_SLEEP
| KM_NOFS
);
2494 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, irecs
,
2495 &nirecs
, xfs_bmapi_aflag(whichfork
));
2499 irecs
->br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2500 irecs
->br_startoff
= (xfs_fileoff_t
)bno
;
2501 irecs
->br_blockcount
= nfsb
;
2502 irecs
->br_state
= 0;
2506 if (!xfs_da_map_covers_blocks(nirecs
, irecs
, bno
, nfsb
)) {
2507 error
= mappedbno
== -2 ? -1 : -EFSCORRUPTED
;
2508 if (unlikely(error
== -EFSCORRUPTED
)) {
2509 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2511 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2512 __func__
, (long long)bno
,
2513 (long long)dp
->i_ino
);
2514 for (i
= 0; i
< *nmaps
; i
++) {
2516 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2518 (long long)irecs
[i
].br_startoff
,
2519 (long long)irecs
[i
].br_startblock
,
2520 (long long)irecs
[i
].br_blockcount
,
2524 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2525 XFS_ERRLEVEL_LOW
, mp
);
2529 error
= xfs_buf_map_from_irec(mp
, map
, nmaps
, irecs
, nirecs
);
2537 * Get a buffer for the dir/attr block.
2541 struct xfs_trans
*trans
,
2542 struct xfs_inode
*dp
,
2544 xfs_daddr_t mappedbno
,
2545 struct xfs_buf
**bpp
,
2549 struct xfs_buf_map map
;
2550 struct xfs_buf_map
*mapp
;
2557 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2560 /* mapping a hole is not an error, but we don't continue */
2566 bp
= xfs_trans_get_buf_map(trans
, dp
->i_mount
->m_ddev_targp
,
2568 error
= bp
? bp
->b_error
: -EIO
;
2571 xfs_trans_brelse(trans
, bp
);
2585 * Get a buffer for the dir/attr block, fill in the contents.
2589 struct xfs_trans
*trans
,
2590 struct xfs_inode
*dp
,
2592 xfs_daddr_t mappedbno
,
2593 struct xfs_buf
**bpp
,
2595 const struct xfs_buf_ops
*ops
)
2598 struct xfs_buf_map map
;
2599 struct xfs_buf_map
*mapp
;
2606 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2609 /* mapping a hole is not an error, but we don't continue */
2615 error
= xfs_trans_read_buf_map(dp
->i_mount
, trans
,
2616 dp
->i_mount
->m_ddev_targp
,
2617 mapp
, nmap
, 0, &bp
, ops
);
2621 if (whichfork
== XFS_ATTR_FORK
)
2622 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2624 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2634 * Readahead the dir/attr block.
2638 struct xfs_inode
*dp
,
2640 xfs_daddr_t mappedbno
,
2642 const struct xfs_buf_ops
*ops
)
2644 struct xfs_buf_map map
;
2645 struct xfs_buf_map
*mapp
;
2651 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2654 /* mapping a hole is not an error, but we don't continue */
2660 mappedbno
= mapp
[0].bm_bn
;
2661 xfs_buf_readahead_map(dp
->i_mount
->m_ddev_targp
, mapp
, nmap
, ops
);