1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
16 #include "xfs_btree.h"
17 #include "xfs_ialloc.h"
18 #include "xfs_ialloc_btree.h"
19 #include "xfs_alloc.h"
20 #include "xfs_errortag.h"
21 #include "xfs_error.h"
23 #include "xfs_trans.h"
24 #include "xfs_buf_item.h"
25 #include "xfs_icreate_item.h"
26 #include "xfs_icache.h"
27 #include "xfs_trace.h"
32 * Lookup a record by ino in the btree given by cur.
36 struct xfs_btree_cur
*cur
, /* btree cursor */
37 xfs_agino_t ino
, /* starting inode of chunk */
38 xfs_lookup_t dir
, /* <=, >=, == */
39 int *stat
) /* success/failure */
41 cur
->bc_rec
.i
.ir_startino
= ino
;
42 cur
->bc_rec
.i
.ir_holemask
= 0;
43 cur
->bc_rec
.i
.ir_count
= 0;
44 cur
->bc_rec
.i
.ir_freecount
= 0;
45 cur
->bc_rec
.i
.ir_free
= 0;
46 return xfs_btree_lookup(cur
, dir
, stat
);
50 * Update the record referred to by cur to the value given.
51 * This either works (return 0) or gets an EFSCORRUPTED error.
53 STATIC
int /* error */
55 struct xfs_btree_cur
*cur
, /* btree cursor */
56 xfs_inobt_rec_incore_t
*irec
) /* btree record */
58 union xfs_btree_rec rec
;
60 rec
.inobt
.ir_startino
= cpu_to_be32(irec
->ir_startino
);
61 if (xfs_sb_version_hassparseinodes(&cur
->bc_mp
->m_sb
)) {
62 rec
.inobt
.ir_u
.sp
.ir_holemask
= cpu_to_be16(irec
->ir_holemask
);
63 rec
.inobt
.ir_u
.sp
.ir_count
= irec
->ir_count
;
64 rec
.inobt
.ir_u
.sp
.ir_freecount
= irec
->ir_freecount
;
66 /* ir_holemask/ir_count not supported on-disk */
67 rec
.inobt
.ir_u
.f
.ir_freecount
= cpu_to_be32(irec
->ir_freecount
);
69 rec
.inobt
.ir_free
= cpu_to_be64(irec
->ir_free
);
70 return xfs_btree_update(cur
, &rec
);
73 /* Convert on-disk btree record to incore inobt record. */
75 xfs_inobt_btrec_to_irec(
77 union xfs_btree_rec
*rec
,
78 struct xfs_inobt_rec_incore
*irec
)
80 irec
->ir_startino
= be32_to_cpu(rec
->inobt
.ir_startino
);
81 if (xfs_sb_version_hassparseinodes(&mp
->m_sb
)) {
82 irec
->ir_holemask
= be16_to_cpu(rec
->inobt
.ir_u
.sp
.ir_holemask
);
83 irec
->ir_count
= rec
->inobt
.ir_u
.sp
.ir_count
;
84 irec
->ir_freecount
= rec
->inobt
.ir_u
.sp
.ir_freecount
;
87 * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
88 * values for full inode chunks.
90 irec
->ir_holemask
= XFS_INOBT_HOLEMASK_FULL
;
91 irec
->ir_count
= XFS_INODES_PER_CHUNK
;
93 be32_to_cpu(rec
->inobt
.ir_u
.f
.ir_freecount
);
95 irec
->ir_free
= be64_to_cpu(rec
->inobt
.ir_free
);
99 * Get the data from the pointed-to record.
103 struct xfs_btree_cur
*cur
,
104 struct xfs_inobt_rec_incore
*irec
,
107 struct xfs_mount
*mp
= cur
->bc_mp
;
108 xfs_agnumber_t agno
= cur
->bc_ag
.agno
;
109 union xfs_btree_rec
*rec
;
113 error
= xfs_btree_get_rec(cur
, &rec
, stat
);
114 if (error
|| *stat
== 0)
117 xfs_inobt_btrec_to_irec(mp
, rec
, irec
);
119 if (!xfs_verify_agino(mp
, agno
, irec
->ir_startino
))
121 if (irec
->ir_count
< XFS_INODES_PER_HOLEMASK_BIT
||
122 irec
->ir_count
> XFS_INODES_PER_CHUNK
)
124 if (irec
->ir_freecount
> XFS_INODES_PER_CHUNK
)
127 /* if there are no holes, return the first available offset */
128 if (!xfs_inobt_issparse(irec
->ir_holemask
))
129 realfree
= irec
->ir_free
;
131 realfree
= irec
->ir_free
& xfs_inobt_irec_to_allocmask(irec
);
132 if (hweight64(realfree
) != irec
->ir_freecount
)
139 "%s Inode BTree record corruption in AG %d detected!",
140 cur
->bc_btnum
== XFS_BTNUM_INO
? "Used" : "Free", agno
);
142 "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
143 irec
->ir_startino
, irec
->ir_count
, irec
->ir_freecount
,
144 irec
->ir_free
, irec
->ir_holemask
);
145 return -EFSCORRUPTED
;
149 * Insert a single inobt record. Cursor must already point to desired location.
152 xfs_inobt_insert_rec(
153 struct xfs_btree_cur
*cur
,
160 cur
->bc_rec
.i
.ir_holemask
= holemask
;
161 cur
->bc_rec
.i
.ir_count
= count
;
162 cur
->bc_rec
.i
.ir_freecount
= freecount
;
163 cur
->bc_rec
.i
.ir_free
= free
;
164 return xfs_btree_insert(cur
, stat
);
168 * Insert records describing a newly allocated inode chunk into the inobt.
172 struct xfs_mount
*mp
,
173 struct xfs_trans
*tp
,
174 struct xfs_buf
*agbp
,
179 struct xfs_btree_cur
*cur
;
180 struct xfs_agi
*agi
= agbp
->b_addr
;
181 xfs_agnumber_t agno
= be32_to_cpu(agi
->agi_seqno
);
186 cur
= xfs_inobt_init_cursor(mp
, tp
, agbp
, agno
, btnum
);
188 for (thisino
= newino
;
189 thisino
< newino
+ newlen
;
190 thisino
+= XFS_INODES_PER_CHUNK
) {
191 error
= xfs_inobt_lookup(cur
, thisino
, XFS_LOOKUP_EQ
, &i
);
193 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
198 error
= xfs_inobt_insert_rec(cur
, XFS_INOBT_HOLEMASK_FULL
,
199 XFS_INODES_PER_CHUNK
,
200 XFS_INODES_PER_CHUNK
,
201 XFS_INOBT_ALL_FREE
, &i
);
203 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
209 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
215 * Verify that the number of free inodes in the AGI is correct.
219 xfs_check_agi_freecount(
220 struct xfs_btree_cur
*cur
,
223 if (cur
->bc_nlevels
== 1) {
224 xfs_inobt_rec_incore_t rec
;
229 error
= xfs_inobt_lookup(cur
, 0, XFS_LOOKUP_GE
, &i
);
234 error
= xfs_inobt_get_rec(cur
, &rec
, &i
);
239 freecount
+= rec
.ir_freecount
;
240 error
= xfs_btree_increment(cur
, 0, &i
);
246 if (!XFS_FORCED_SHUTDOWN(cur
->bc_mp
))
247 ASSERT(freecount
== be32_to_cpu(agi
->agi_freecount
));
252 #define xfs_check_agi_freecount(cur, agi) 0
256 * Initialise a new set of inodes. When called without a transaction context
257 * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
258 * than logging them (which in a transaction context puts them into the AIL
259 * for writeback rather than the xfsbufd queue).
262 xfs_ialloc_inode_init(
263 struct xfs_mount
*mp
,
264 struct xfs_trans
*tp
,
265 struct list_head
*buffer_list
,
269 xfs_agblock_t length
,
272 struct xfs_buf
*fbuf
;
273 struct xfs_dinode
*free
;
282 * Loop over the new block(s), filling in the inodes. For small block
283 * sizes, manipulate the inodes in buffers which are multiples of the
286 nbufs
= length
/ M_IGEO(mp
)->blocks_per_cluster
;
289 * Figure out what version number to use in the inodes we create. If
290 * the superblock version has caught up to the one that supports the new
291 * inode format, then use the new inode version. Otherwise use the old
292 * version so that old kernels will continue to be able to use the file
295 * For v3 inodes, we also need to write the inode number into the inode,
296 * so calculate the first inode number of the chunk here as
297 * XFS_AGB_TO_AGINO() only works within a filesystem block, not
298 * across multiple filesystem blocks (such as a cluster) and so cannot
299 * be used in the cluster buffer loop below.
301 * Further, because we are writing the inode directly into the buffer
302 * and calculating a CRC on the entire inode, we have ot log the entire
303 * inode so that the entire range the CRC covers is present in the log.
304 * That means for v3 inode we log the entire buffer rather than just the
307 if (xfs_sb_version_has_v3inode(&mp
->m_sb
)) {
309 ino
= XFS_AGINO_TO_INO(mp
, agno
, XFS_AGB_TO_AGINO(mp
, agbno
));
312 * log the initialisation that is about to take place as an
313 * logical operation. This means the transaction does not
314 * need to log the physical changes to the inode buffers as log
315 * recovery will know what initialisation is actually needed.
316 * Hence we only need to log the buffers as "ordered" buffers so
317 * they track in the AIL as if they were physically logged.
320 xfs_icreate_log(tp
, agno
, agbno
, icount
,
321 mp
->m_sb
.sb_inodesize
, length
, gen
);
325 for (j
= 0; j
< nbufs
; j
++) {
329 d
= XFS_AGB_TO_DADDR(mp
, agno
, agbno
+
330 (j
* M_IGEO(mp
)->blocks_per_cluster
));
331 error
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
, d
,
332 mp
->m_bsize
* M_IGEO(mp
)->blocks_per_cluster
,
333 XBF_UNMAPPED
, &fbuf
);
337 /* Initialize the inode buffers and log them appropriately. */
338 fbuf
->b_ops
= &xfs_inode_buf_ops
;
339 xfs_buf_zero(fbuf
, 0, BBTOB(fbuf
->b_length
));
340 for (i
= 0; i
< M_IGEO(mp
)->inodes_per_cluster
; i
++) {
341 int ioffset
= i
<< mp
->m_sb
.sb_inodelog
;
342 uint isize
= XFS_DINODE_SIZE(&mp
->m_sb
);
344 free
= xfs_make_iptr(mp
, fbuf
, i
);
345 free
->di_magic
= cpu_to_be16(XFS_DINODE_MAGIC
);
346 free
->di_version
= version
;
347 free
->di_gen
= cpu_to_be32(gen
);
348 free
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
351 free
->di_ino
= cpu_to_be64(ino
);
353 uuid_copy(&free
->di_uuid
,
354 &mp
->m_sb
.sb_meta_uuid
);
355 xfs_dinode_calc_crc(mp
, free
);
357 /* just log the inode core */
358 xfs_trans_log_buf(tp
, fbuf
, ioffset
,
359 ioffset
+ isize
- 1);
365 * Mark the buffer as an inode allocation buffer so it
366 * sticks in AIL at the point of this allocation
367 * transaction. This ensures the they are on disk before
368 * the tail of the log can be moved past this
369 * transaction (i.e. by preventing relogging from moving
370 * it forward in the log).
372 xfs_trans_inode_alloc_buf(tp
, fbuf
);
375 * Mark the buffer as ordered so that they are
376 * not physically logged in the transaction but
377 * still tracked in the AIL as part of the
378 * transaction and pin the log appropriately.
380 xfs_trans_ordered_buf(tp
, fbuf
);
383 fbuf
->b_flags
|= XBF_DONE
;
384 xfs_buf_delwri_queue(fbuf
, buffer_list
);
392 * Align startino and allocmask for a recently allocated sparse chunk such that
393 * they are fit for insertion (or merge) into the on-disk inode btrees.
397 * When enabled, sparse inode support increases the inode alignment from cluster
398 * size to inode chunk size. This means that the minimum range between two
399 * non-adjacent inode records in the inobt is large enough for a full inode
400 * record. This allows for cluster sized, cluster aligned block allocation
401 * without need to worry about whether the resulting inode record overlaps with
402 * another record in the tree. Without this basic rule, we would have to deal
403 * with the consequences of overlap by potentially undoing recent allocations in
404 * the inode allocation codepath.
406 * Because of this alignment rule (which is enforced on mount), there are two
407 * inobt possibilities for newly allocated sparse chunks. One is that the
408 * aligned inode record for the chunk covers a range of inodes not already
409 * covered in the inobt (i.e., it is safe to insert a new sparse record). The
410 * other is that a record already exists at the aligned startino that considers
411 * the newly allocated range as sparse. In the latter case, record content is
412 * merged in hope that sparse inode chunks fill to full chunks over time.
415 xfs_align_sparse_ino(
416 struct xfs_mount
*mp
,
417 xfs_agino_t
*startino
,
424 agbno
= XFS_AGINO_TO_AGBNO(mp
, *startino
);
425 mod
= agbno
% mp
->m_sb
.sb_inoalignmt
;
429 /* calculate the inode offset and align startino */
430 offset
= XFS_AGB_TO_AGINO(mp
, mod
);
434 * Since startino has been aligned down, left shift allocmask such that
435 * it continues to represent the same physical inodes relative to the
438 *allocmask
<<= offset
/ XFS_INODES_PER_HOLEMASK_BIT
;
442 * Determine whether the source inode record can merge into the target. Both
443 * records must be sparse, the inode ranges must match and there must be no
444 * allocation overlap between the records.
447 __xfs_inobt_can_merge(
448 struct xfs_inobt_rec_incore
*trec
, /* tgt record */
449 struct xfs_inobt_rec_incore
*srec
) /* src record */
454 /* records must cover the same inode range */
455 if (trec
->ir_startino
!= srec
->ir_startino
)
458 /* both records must be sparse */
459 if (!xfs_inobt_issparse(trec
->ir_holemask
) ||
460 !xfs_inobt_issparse(srec
->ir_holemask
))
463 /* both records must track some inodes */
464 if (!trec
->ir_count
|| !srec
->ir_count
)
467 /* can't exceed capacity of a full record */
468 if (trec
->ir_count
+ srec
->ir_count
> XFS_INODES_PER_CHUNK
)
471 /* verify there is no allocation overlap */
472 talloc
= xfs_inobt_irec_to_allocmask(trec
);
473 salloc
= xfs_inobt_irec_to_allocmask(srec
);
481 * Merge the source inode record into the target. The caller must call
482 * __xfs_inobt_can_merge() to ensure the merge is valid.
485 __xfs_inobt_rec_merge(
486 struct xfs_inobt_rec_incore
*trec
, /* target */
487 struct xfs_inobt_rec_incore
*srec
) /* src */
489 ASSERT(trec
->ir_startino
== srec
->ir_startino
);
491 /* combine the counts */
492 trec
->ir_count
+= srec
->ir_count
;
493 trec
->ir_freecount
+= srec
->ir_freecount
;
496 * Merge the holemask and free mask. For both fields, 0 bits refer to
497 * allocated inodes. We combine the allocated ranges with bitwise AND.
499 trec
->ir_holemask
&= srec
->ir_holemask
;
500 trec
->ir_free
&= srec
->ir_free
;
504 * Insert a new sparse inode chunk into the associated inode btree. The inode
505 * record for the sparse chunk is pre-aligned to a startino that should match
506 * any pre-existing sparse inode record in the tree. This allows sparse chunks
509 * This function supports two modes of handling preexisting records depending on
510 * the merge flag. If merge is true, the provided record is merged with the
511 * existing record and updated in place. The merged record is returned in nrec.
512 * If merge is false, an existing record is replaced with the provided record.
513 * If no preexisting record exists, the provided record is always inserted.
515 * It is considered corruption if a merge is requested and not possible. Given
516 * the sparse inode alignment constraints, this should never happen.
519 xfs_inobt_insert_sprec(
520 struct xfs_mount
*mp
,
521 struct xfs_trans
*tp
,
522 struct xfs_buf
*agbp
,
524 struct xfs_inobt_rec_incore
*nrec
, /* in/out: new/merged rec. */
525 bool merge
) /* merge or replace */
527 struct xfs_btree_cur
*cur
;
528 struct xfs_agi
*agi
= agbp
->b_addr
;
529 xfs_agnumber_t agno
= be32_to_cpu(agi
->agi_seqno
);
532 struct xfs_inobt_rec_incore rec
;
534 cur
= xfs_inobt_init_cursor(mp
, tp
, agbp
, agno
, btnum
);
536 /* the new record is pre-aligned so we know where to look */
537 error
= xfs_inobt_lookup(cur
, nrec
->ir_startino
, XFS_LOOKUP_EQ
, &i
);
540 /* if nothing there, insert a new record and return */
542 error
= xfs_inobt_insert_rec(cur
, nrec
->ir_holemask
,
543 nrec
->ir_count
, nrec
->ir_freecount
,
547 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
548 error
= -EFSCORRUPTED
;
556 * A record exists at this startino. Merge or replace the record
557 * depending on what we've been asked to do.
560 error
= xfs_inobt_get_rec(cur
, &rec
, &i
);
563 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
564 error
= -EFSCORRUPTED
;
567 if (XFS_IS_CORRUPT(mp
, rec
.ir_startino
!= nrec
->ir_startino
)) {
568 error
= -EFSCORRUPTED
;
573 * This should never fail. If we have coexisting records that
574 * cannot merge, something is seriously wrong.
576 if (XFS_IS_CORRUPT(mp
, !__xfs_inobt_can_merge(nrec
, &rec
))) {
577 error
= -EFSCORRUPTED
;
581 trace_xfs_irec_merge_pre(mp
, agno
, rec
.ir_startino
,
582 rec
.ir_holemask
, nrec
->ir_startino
,
585 /* merge to nrec to output the updated record */
586 __xfs_inobt_rec_merge(nrec
, &rec
);
588 trace_xfs_irec_merge_post(mp
, agno
, nrec
->ir_startino
,
591 error
= xfs_inobt_rec_check_count(mp
, nrec
);
596 error
= xfs_inobt_update(cur
, nrec
);
601 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
604 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
609 * Allocate new inodes in the allocation group specified by agbp.
610 * Returns 0 if inodes were allocated in this AG; 1 if there was no space
611 * in this AG; or the usual negative error code.
615 struct xfs_trans
*tp
,
616 struct xfs_buf
*agbp
)
619 struct xfs_alloc_arg args
;
622 xfs_agino_t newino
; /* new first inode's number */
623 xfs_agino_t newlen
; /* new number of inodes */
624 int isaligned
= 0; /* inode allocation at stripe */
626 /* init. to full chunk */
627 uint16_t allocmask
= (uint16_t) -1;
628 struct xfs_inobt_rec_incore rec
;
629 struct xfs_perag
*pag
;
630 struct xfs_ino_geometry
*igeo
= M_IGEO(tp
->t_mountp
);
633 memset(&args
, 0, sizeof(args
));
635 args
.mp
= tp
->t_mountp
;
636 args
.fsbno
= NULLFSBLOCK
;
637 args
.oinfo
= XFS_RMAP_OINFO_INODES
;
640 /* randomly do sparse inode allocations */
641 if (xfs_sb_version_hassparseinodes(&tp
->t_mountp
->m_sb
) &&
642 igeo
->ialloc_min_blks
< igeo
->ialloc_blks
)
643 do_sparse
= prandom_u32() & 1;
647 * Locking will ensure that we don't have two callers in here
650 newlen
= igeo
->ialloc_inos
;
651 if (igeo
->maxicount
&&
652 percpu_counter_read_positive(&args
.mp
->m_icount
) + newlen
>
655 args
.minlen
= args
.maxlen
= igeo
->ialloc_blks
;
657 * First try to allocate inodes contiguous with the last-allocated
658 * chunk of inodes. If the filesystem is striped, this will fill
659 * an entire stripe unit with inodes.
662 newino
= be32_to_cpu(agi
->agi_newino
);
663 agno
= be32_to_cpu(agi
->agi_seqno
);
664 args
.agbno
= XFS_AGINO_TO_AGBNO(args
.mp
, newino
) +
668 if (likely(newino
!= NULLAGINO
&&
669 (args
.agbno
< be32_to_cpu(agi
->agi_length
)))) {
670 args
.fsbno
= XFS_AGB_TO_FSB(args
.mp
, agno
, args
.agbno
);
671 args
.type
= XFS_ALLOCTYPE_THIS_BNO
;
675 * We need to take into account alignment here to ensure that
676 * we don't modify the free list if we fail to have an exact
677 * block. If we don't have an exact match, and every oher
678 * attempt allocation attempt fails, we'll end up cancelling
679 * a dirty transaction and shutting down.
681 * For an exact allocation, alignment must be 1,
682 * however we need to take cluster alignment into account when
683 * fixing up the freelist. Use the minalignslop field to
684 * indicate that extra blocks might be required for alignment,
685 * but not to use them in the actual exact allocation.
688 args
.minalignslop
= igeo
->cluster_align
- 1;
690 /* Allow space for the inode btree to split. */
691 args
.minleft
= igeo
->inobt_maxlevels
;
692 if ((error
= xfs_alloc_vextent(&args
)))
696 * This request might have dirtied the transaction if the AG can
697 * satisfy the request, but the exact block was not available.
698 * If the allocation did fail, subsequent requests will relax
699 * the exact agbno requirement and increase the alignment
700 * instead. It is critical that the total size of the request
701 * (len + alignment + slop) does not increase from this point
702 * on, so reset minalignslop to ensure it is not included in
703 * subsequent requests.
705 args
.minalignslop
= 0;
708 if (unlikely(args
.fsbno
== NULLFSBLOCK
)) {
710 * Set the alignment for the allocation.
711 * If stripe alignment is turned on then align at stripe unit
713 * If the cluster size is smaller than a filesystem block
714 * then we're doing I/O for inodes in filesystem block size
715 * pieces, so don't need alignment anyway.
718 if (igeo
->ialloc_align
) {
719 ASSERT(!(args
.mp
->m_flags
& XFS_MOUNT_NOALIGN
));
720 args
.alignment
= args
.mp
->m_dalign
;
723 args
.alignment
= igeo
->cluster_align
;
725 * Need to figure out where to allocate the inode blocks.
726 * Ideally they should be spaced out through the a.g.
727 * For now, just allocate blocks up front.
729 args
.agbno
= be32_to_cpu(agi
->agi_root
);
730 args
.fsbno
= XFS_AGB_TO_FSB(args
.mp
, agno
, args
.agbno
);
732 * Allocate a fixed-size extent of inodes.
734 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
737 * Allow space for the inode btree to split.
739 args
.minleft
= igeo
->inobt_maxlevels
;
740 if ((error
= xfs_alloc_vextent(&args
)))
745 * If stripe alignment is turned on, then try again with cluster
748 if (isaligned
&& args
.fsbno
== NULLFSBLOCK
) {
749 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
750 args
.agbno
= be32_to_cpu(agi
->agi_root
);
751 args
.fsbno
= XFS_AGB_TO_FSB(args
.mp
, agno
, args
.agbno
);
752 args
.alignment
= igeo
->cluster_align
;
753 if ((error
= xfs_alloc_vextent(&args
)))
758 * Finally, try a sparse allocation if the filesystem supports it and
759 * the sparse allocation length is smaller than a full chunk.
761 if (xfs_sb_version_hassparseinodes(&args
.mp
->m_sb
) &&
762 igeo
->ialloc_min_blks
< igeo
->ialloc_blks
&&
763 args
.fsbno
== NULLFSBLOCK
) {
765 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
766 args
.agbno
= be32_to_cpu(agi
->agi_root
);
767 args
.fsbno
= XFS_AGB_TO_FSB(args
.mp
, agno
, args
.agbno
);
768 args
.alignment
= args
.mp
->m_sb
.sb_spino_align
;
771 args
.minlen
= igeo
->ialloc_min_blks
;
772 args
.maxlen
= args
.minlen
;
775 * The inode record will be aligned to full chunk size. We must
776 * prevent sparse allocation from AG boundaries that result in
777 * invalid inode records, such as records that start at agbno 0
778 * or extend beyond the AG.
780 * Set min agbno to the first aligned, non-zero agbno and max to
781 * the last aligned agbno that is at least one full chunk from
784 args
.min_agbno
= args
.mp
->m_sb
.sb_inoalignmt
;
785 args
.max_agbno
= round_down(args
.mp
->m_sb
.sb_agblocks
,
786 args
.mp
->m_sb
.sb_inoalignmt
) -
789 error
= xfs_alloc_vextent(&args
);
793 newlen
= XFS_AGB_TO_AGINO(args
.mp
, args
.len
);
794 ASSERT(newlen
<= XFS_INODES_PER_CHUNK
);
795 allocmask
= (1 << (newlen
/ XFS_INODES_PER_HOLEMASK_BIT
)) - 1;
798 if (args
.fsbno
== NULLFSBLOCK
)
801 ASSERT(args
.len
== args
.minlen
);
804 * Stamp and write the inode buffers.
806 * Seed the new inode cluster with a random generation number. This
807 * prevents short-term reuse of generation numbers if a chunk is
808 * freed and then immediately reallocated. We use random numbers
809 * rather than a linear progression to prevent the next generation
810 * number from being easily guessable.
812 error
= xfs_ialloc_inode_init(args
.mp
, tp
, NULL
, newlen
, agno
,
813 args
.agbno
, args
.len
, prandom_u32());
818 * Convert the results.
820 newino
= XFS_AGB_TO_AGINO(args
.mp
, args
.agbno
);
822 if (xfs_inobt_issparse(~allocmask
)) {
824 * We've allocated a sparse chunk. Align the startino and mask.
826 xfs_align_sparse_ino(args
.mp
, &newino
, &allocmask
);
828 rec
.ir_startino
= newino
;
829 rec
.ir_holemask
= ~allocmask
;
830 rec
.ir_count
= newlen
;
831 rec
.ir_freecount
= newlen
;
832 rec
.ir_free
= XFS_INOBT_ALL_FREE
;
835 * Insert the sparse record into the inobt and allow for a merge
836 * if necessary. If a merge does occur, rec is updated to the
839 error
= xfs_inobt_insert_sprec(args
.mp
, tp
, agbp
, XFS_BTNUM_INO
,
841 if (error
== -EFSCORRUPTED
) {
843 "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
844 XFS_AGINO_TO_INO(args
.mp
, agno
,
846 rec
.ir_holemask
, rec
.ir_count
);
847 xfs_force_shutdown(args
.mp
, SHUTDOWN_CORRUPT_INCORE
);
853 * We can't merge the part we've just allocated as for the inobt
854 * due to finobt semantics. The original record may or may not
855 * exist independent of whether physical inodes exist in this
858 * We must update the finobt record based on the inobt record.
859 * rec contains the fully merged and up to date inobt record
860 * from the previous call. Set merge false to replace any
861 * existing record with this one.
863 if (xfs_sb_version_hasfinobt(&args
.mp
->m_sb
)) {
864 error
= xfs_inobt_insert_sprec(args
.mp
, tp
, agbp
,
865 XFS_BTNUM_FINO
, &rec
,
871 /* full chunk - insert new records to both btrees */
872 error
= xfs_inobt_insert(args
.mp
, tp
, agbp
, newino
, newlen
,
877 if (xfs_sb_version_hasfinobt(&args
.mp
->m_sb
)) {
878 error
= xfs_inobt_insert(args
.mp
, tp
, agbp
, newino
,
879 newlen
, XFS_BTNUM_FINO
);
886 * Update AGI counts and newino.
888 be32_add_cpu(&agi
->agi_count
, newlen
);
889 be32_add_cpu(&agi
->agi_freecount
, newlen
);
891 pag
->pagi_freecount
+= newlen
;
892 pag
->pagi_count
+= newlen
;
893 agi
->agi_newino
= cpu_to_be32(newino
);
896 * Log allocation group header fields
898 xfs_ialloc_log_agi(tp
, agbp
,
899 XFS_AGI_COUNT
| XFS_AGI_FREECOUNT
| XFS_AGI_NEWINO
);
901 * Modify/log superblock values for inode count and inode free count.
903 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_ICOUNT
, (long)newlen
);
904 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_IFREE
, (long)newlen
);
908 STATIC xfs_agnumber_t
914 spin_lock(&mp
->m_agirotor_lock
);
915 agno
= mp
->m_agirotor
;
916 if (++mp
->m_agirotor
>= mp
->m_maxagi
)
918 spin_unlock(&mp
->m_agirotor_lock
);
924 * Select an allocation group to look for a free inode in, based on the parent
925 * inode and the mode. Return the allocation group buffer.
927 STATIC xfs_agnumber_t
928 xfs_ialloc_ag_select(
929 xfs_trans_t
*tp
, /* transaction pointer */
930 xfs_ino_t parent
, /* parent directory inode number */
931 umode_t mode
) /* bits set to indicate file type */
933 xfs_agnumber_t agcount
; /* number of ag's in the filesystem */
934 xfs_agnumber_t agno
; /* current ag number */
935 int flags
; /* alloc buffer locking flags */
936 xfs_extlen_t ineed
; /* blocks needed for inode allocation */
937 xfs_extlen_t longest
= 0; /* longest extent available */
938 xfs_mount_t
*mp
; /* mount point structure */
939 int needspace
; /* file mode implies space allocated */
940 xfs_perag_t
*pag
; /* per allocation group data */
941 xfs_agnumber_t pagno
; /* parent (starting) ag number */
945 * Files of these types need at least one block if length > 0
946 * (and they won't fit in the inode, but that's hard to figure out).
948 needspace
= S_ISDIR(mode
) || S_ISREG(mode
) || S_ISLNK(mode
);
950 agcount
= mp
->m_maxagi
;
952 pagno
= xfs_ialloc_next_ag(mp
);
954 pagno
= XFS_INO_TO_AGNO(mp
, parent
);
955 if (pagno
>= agcount
)
959 ASSERT(pagno
< agcount
);
962 * Loop through allocation groups, looking for one with a little
963 * free space in it. Note we don't look for free inodes, exactly.
964 * Instead, we include whether there is a need to allocate inodes
965 * to mean that blocks must be allocated for them,
966 * if none are currently free.
969 flags
= XFS_ALLOC_FLAG_TRYLOCK
;
971 pag
= xfs_perag_get(mp
, agno
);
972 if (!pag
->pagi_inodeok
) {
973 xfs_ialloc_next_ag(mp
);
977 if (!pag
->pagi_init
) {
978 error
= xfs_ialloc_pagi_init(mp
, tp
, agno
);
983 if (pag
->pagi_freecount
) {
988 if (!pag
->pagf_init
) {
989 error
= xfs_alloc_pagf_init(mp
, tp
, agno
, flags
);
995 * Check that there is enough free space for the file plus a
996 * chunk of inodes if we need to allocate some. If this is the
997 * first pass across the AGs, take into account the potential
998 * space needed for alignment of inode chunks when checking the
999 * longest contiguous free space in the AG - this prevents us
1000 * from getting ENOSPC because we have free space larger than
1001 * ialloc_blks but alignment constraints prevent us from using
1004 * If we can't find an AG with space for full alignment slack to
1005 * be taken into account, we must be near ENOSPC in all AGs.
1006 * Hence we don't include alignment for the second pass and so
1007 * if we fail allocation due to alignment issues then it is most
1008 * likely a real ENOSPC condition.
1010 ineed
= M_IGEO(mp
)->ialloc_min_blks
;
1011 if (flags
&& ineed
> 1)
1012 ineed
+= M_IGEO(mp
)->cluster_align
;
1013 longest
= pag
->pagf_longest
;
1015 longest
= pag
->pagf_flcount
> 0;
1017 if (pag
->pagf_freeblks
>= needspace
+ ineed
&&
1025 * No point in iterating over the rest, if we're shutting
1028 if (XFS_FORCED_SHUTDOWN(mp
))
1029 return NULLAGNUMBER
;
1031 if (agno
>= agcount
)
1033 if (agno
== pagno
) {
1035 return NULLAGNUMBER
;
1042 * Try to retrieve the next record to the left/right from the current one.
1045 xfs_ialloc_next_rec(
1046 struct xfs_btree_cur
*cur
,
1047 xfs_inobt_rec_incore_t
*rec
,
1055 error
= xfs_btree_decrement(cur
, 0, &i
);
1057 error
= xfs_btree_increment(cur
, 0, &i
);
1063 error
= xfs_inobt_get_rec(cur
, rec
, &i
);
1066 if (XFS_IS_CORRUPT(cur
->bc_mp
, i
!= 1))
1067 return -EFSCORRUPTED
;
1075 struct xfs_btree_cur
*cur
,
1077 xfs_inobt_rec_incore_t
*rec
,
1083 error
= xfs_inobt_lookup(cur
, agino
, XFS_LOOKUP_EQ
, &i
);
1088 error
= xfs_inobt_get_rec(cur
, rec
, &i
);
1091 if (XFS_IS_CORRUPT(cur
->bc_mp
, i
!= 1))
1092 return -EFSCORRUPTED
;
1099 * Return the offset of the first free inode in the record. If the inode chunk
1100 * is sparsely allocated, we convert the record holemask to inode granularity
1101 * and mask off the unallocated regions from the inode free mask.
1104 xfs_inobt_first_free_inode(
1105 struct xfs_inobt_rec_incore
*rec
)
1107 xfs_inofree_t realfree
;
1109 /* if there are no holes, return the first available offset */
1110 if (!xfs_inobt_issparse(rec
->ir_holemask
))
1111 return xfs_lowbit64(rec
->ir_free
);
1113 realfree
= xfs_inobt_irec_to_allocmask(rec
);
1114 realfree
&= rec
->ir_free
;
1116 return xfs_lowbit64(realfree
);
1120 * Allocate an inode using the inobt-only algorithm.
1123 xfs_dialloc_ag_inobt(
1124 struct xfs_trans
*tp
,
1125 struct xfs_buf
*agbp
,
1129 struct xfs_mount
*mp
= tp
->t_mountp
;
1130 struct xfs_agi
*agi
= agbp
->b_addr
;
1131 xfs_agnumber_t agno
= be32_to_cpu(agi
->agi_seqno
);
1132 xfs_agnumber_t pagno
= XFS_INO_TO_AGNO(mp
, parent
);
1133 xfs_agino_t pagino
= XFS_INO_TO_AGINO(mp
, parent
);
1134 struct xfs_perag
*pag
= agbp
->b_pag
;
1135 struct xfs_btree_cur
*cur
, *tcur
;
1136 struct xfs_inobt_rec_incore rec
, trec
;
1141 int searchdistance
= 10;
1143 ASSERT(pag
->pagi_init
);
1144 ASSERT(pag
->pagi_inodeok
);
1145 ASSERT(pag
->pagi_freecount
> 0);
1148 cur
= xfs_inobt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_INO
);
1150 * If pagino is 0 (this is the root inode allocation) use newino.
1151 * This must work because we've just allocated some.
1154 pagino
= be32_to_cpu(agi
->agi_newino
);
1156 error
= xfs_check_agi_freecount(cur
, agi
);
1161 * If in the same AG as the parent, try to get near the parent.
1163 if (pagno
== agno
) {
1164 int doneleft
; /* done, to the left */
1165 int doneright
; /* done, to the right */
1167 error
= xfs_inobt_lookup(cur
, pagino
, XFS_LOOKUP_LE
, &i
);
1170 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1171 error
= -EFSCORRUPTED
;
1175 error
= xfs_inobt_get_rec(cur
, &rec
, &j
);
1178 if (XFS_IS_CORRUPT(mp
, j
!= 1)) {
1179 error
= -EFSCORRUPTED
;
1183 if (rec
.ir_freecount
> 0) {
1185 * Found a free inode in the same chunk
1186 * as the parent, done.
1193 * In the same AG as parent, but parent's chunk is full.
1196 /* duplicate the cursor, search left & right simultaneously */
1197 error
= xfs_btree_dup_cursor(cur
, &tcur
);
1202 * Skip to last blocks looked up if same parent inode.
1204 if (pagino
!= NULLAGINO
&&
1205 pag
->pagl_pagino
== pagino
&&
1206 pag
->pagl_leftrec
!= NULLAGINO
&&
1207 pag
->pagl_rightrec
!= NULLAGINO
) {
1208 error
= xfs_ialloc_get_rec(tcur
, pag
->pagl_leftrec
,
1213 error
= xfs_ialloc_get_rec(cur
, pag
->pagl_rightrec
,
1218 /* search left with tcur, back up 1 record */
1219 error
= xfs_ialloc_next_rec(tcur
, &trec
, &doneleft
, 1);
1223 /* search right with cur, go forward 1 record. */
1224 error
= xfs_ialloc_next_rec(cur
, &rec
, &doneright
, 0);
1230 * Loop until we find an inode chunk with a free inode.
1232 while (--searchdistance
> 0 && (!doneleft
|| !doneright
)) {
1233 int useleft
; /* using left inode chunk this time */
1235 /* figure out the closer block if both are valid. */
1236 if (!doneleft
&& !doneright
) {
1238 (trec
.ir_startino
+ XFS_INODES_PER_CHUNK
- 1) <
1239 rec
.ir_startino
- pagino
;
1241 useleft
= !doneleft
;
1244 /* free inodes to the left? */
1245 if (useleft
&& trec
.ir_freecount
) {
1246 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
1249 pag
->pagl_leftrec
= trec
.ir_startino
;
1250 pag
->pagl_rightrec
= rec
.ir_startino
;
1251 pag
->pagl_pagino
= pagino
;
1256 /* free inodes to the right? */
1257 if (!useleft
&& rec
.ir_freecount
) {
1258 xfs_btree_del_cursor(tcur
, XFS_BTREE_NOERROR
);
1260 pag
->pagl_leftrec
= trec
.ir_startino
;
1261 pag
->pagl_rightrec
= rec
.ir_startino
;
1262 pag
->pagl_pagino
= pagino
;
1266 /* get next record to check */
1268 error
= xfs_ialloc_next_rec(tcur
, &trec
,
1271 error
= xfs_ialloc_next_rec(cur
, &rec
,
1278 if (searchdistance
<= 0) {
1280 * Not in range - save last search
1281 * location and allocate a new inode
1283 xfs_btree_del_cursor(tcur
, XFS_BTREE_NOERROR
);
1284 pag
->pagl_leftrec
= trec
.ir_startino
;
1285 pag
->pagl_rightrec
= rec
.ir_startino
;
1286 pag
->pagl_pagino
= pagino
;
1290 * We've reached the end of the btree. because
1291 * we are only searching a small chunk of the
1292 * btree each search, there is obviously free
1293 * inodes closer to the parent inode than we
1294 * are now. restart the search again.
1296 pag
->pagl_pagino
= NULLAGINO
;
1297 pag
->pagl_leftrec
= NULLAGINO
;
1298 pag
->pagl_rightrec
= NULLAGINO
;
1299 xfs_btree_del_cursor(tcur
, XFS_BTREE_NOERROR
);
1300 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
1306 * In a different AG from the parent.
1307 * See if the most recently allocated block has any free.
1309 if (agi
->agi_newino
!= cpu_to_be32(NULLAGINO
)) {
1310 error
= xfs_inobt_lookup(cur
, be32_to_cpu(agi
->agi_newino
),
1316 error
= xfs_inobt_get_rec(cur
, &rec
, &j
);
1320 if (j
== 1 && rec
.ir_freecount
> 0) {
1322 * The last chunk allocated in the group
1323 * still has a free inode.
1331 * None left in the last group, search the whole AG
1333 error
= xfs_inobt_lookup(cur
, 0, XFS_LOOKUP_GE
, &i
);
1336 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1337 error
= -EFSCORRUPTED
;
1342 error
= xfs_inobt_get_rec(cur
, &rec
, &i
);
1345 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1346 error
= -EFSCORRUPTED
;
1349 if (rec
.ir_freecount
> 0)
1351 error
= xfs_btree_increment(cur
, 0, &i
);
1354 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1355 error
= -EFSCORRUPTED
;
1361 offset
= xfs_inobt_first_free_inode(&rec
);
1362 ASSERT(offset
>= 0);
1363 ASSERT(offset
< XFS_INODES_PER_CHUNK
);
1364 ASSERT((XFS_AGINO_TO_OFFSET(mp
, rec
.ir_startino
) %
1365 XFS_INODES_PER_CHUNK
) == 0);
1366 ino
= XFS_AGINO_TO_INO(mp
, agno
, rec
.ir_startino
+ offset
);
1367 rec
.ir_free
&= ~XFS_INOBT_MASK(offset
);
1369 error
= xfs_inobt_update(cur
, &rec
);
1372 be32_add_cpu(&agi
->agi_freecount
, -1);
1373 xfs_ialloc_log_agi(tp
, agbp
, XFS_AGI_FREECOUNT
);
1374 pag
->pagi_freecount
--;
1376 error
= xfs_check_agi_freecount(cur
, agi
);
1380 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
1381 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_IFREE
, -1);
1385 xfs_btree_del_cursor(tcur
, XFS_BTREE_ERROR
);
1387 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
1392 * Use the free inode btree to allocate an inode based on distance from the
1393 * parent. Note that the provided cursor may be deleted and replaced.
1396 xfs_dialloc_ag_finobt_near(
1398 struct xfs_btree_cur
**ocur
,
1399 struct xfs_inobt_rec_incore
*rec
)
1401 struct xfs_btree_cur
*lcur
= *ocur
; /* left search cursor */
1402 struct xfs_btree_cur
*rcur
; /* right search cursor */
1403 struct xfs_inobt_rec_incore rrec
;
1407 error
= xfs_inobt_lookup(lcur
, pagino
, XFS_LOOKUP_LE
, &i
);
1412 error
= xfs_inobt_get_rec(lcur
, rec
, &i
);
1415 if (XFS_IS_CORRUPT(lcur
->bc_mp
, i
!= 1))
1416 return -EFSCORRUPTED
;
1419 * See if we've landed in the parent inode record. The finobt
1420 * only tracks chunks with at least one free inode, so record
1421 * existence is enough.
1423 if (pagino
>= rec
->ir_startino
&&
1424 pagino
< (rec
->ir_startino
+ XFS_INODES_PER_CHUNK
))
1428 error
= xfs_btree_dup_cursor(lcur
, &rcur
);
1432 error
= xfs_inobt_lookup(rcur
, pagino
, XFS_LOOKUP_GE
, &j
);
1436 error
= xfs_inobt_get_rec(rcur
, &rrec
, &j
);
1439 if (XFS_IS_CORRUPT(lcur
->bc_mp
, j
!= 1)) {
1440 error
= -EFSCORRUPTED
;
1445 if (XFS_IS_CORRUPT(lcur
->bc_mp
, i
!= 1 && j
!= 1)) {
1446 error
= -EFSCORRUPTED
;
1449 if (i
== 1 && j
== 1) {
1451 * Both the left and right records are valid. Choose the closer
1452 * inode chunk to the target.
1454 if ((pagino
- rec
->ir_startino
+ XFS_INODES_PER_CHUNK
- 1) >
1455 (rrec
.ir_startino
- pagino
)) {
1457 xfs_btree_del_cursor(lcur
, XFS_BTREE_NOERROR
);
1460 xfs_btree_del_cursor(rcur
, XFS_BTREE_NOERROR
);
1462 } else if (j
== 1) {
1463 /* only the right record is valid */
1465 xfs_btree_del_cursor(lcur
, XFS_BTREE_NOERROR
);
1467 } else if (i
== 1) {
1468 /* only the left record is valid */
1469 xfs_btree_del_cursor(rcur
, XFS_BTREE_NOERROR
);
1475 xfs_btree_del_cursor(rcur
, XFS_BTREE_ERROR
);
1480 * Use the free inode btree to find a free inode based on a newino hint. If
1481 * the hint is NULL, find the first free inode in the AG.
1484 xfs_dialloc_ag_finobt_newino(
1485 struct xfs_agi
*agi
,
1486 struct xfs_btree_cur
*cur
,
1487 struct xfs_inobt_rec_incore
*rec
)
1492 if (agi
->agi_newino
!= cpu_to_be32(NULLAGINO
)) {
1493 error
= xfs_inobt_lookup(cur
, be32_to_cpu(agi
->agi_newino
),
1498 error
= xfs_inobt_get_rec(cur
, rec
, &i
);
1501 if (XFS_IS_CORRUPT(cur
->bc_mp
, i
!= 1))
1502 return -EFSCORRUPTED
;
1508 * Find the first inode available in the AG.
1510 error
= xfs_inobt_lookup(cur
, 0, XFS_LOOKUP_GE
, &i
);
1513 if (XFS_IS_CORRUPT(cur
->bc_mp
, i
!= 1))
1514 return -EFSCORRUPTED
;
1516 error
= xfs_inobt_get_rec(cur
, rec
, &i
);
1519 if (XFS_IS_CORRUPT(cur
->bc_mp
, i
!= 1))
1520 return -EFSCORRUPTED
;
1526 * Update the inobt based on a modification made to the finobt. Also ensure that
1527 * the records from both trees are equivalent post-modification.
1530 xfs_dialloc_ag_update_inobt(
1531 struct xfs_btree_cur
*cur
, /* inobt cursor */
1532 struct xfs_inobt_rec_incore
*frec
, /* finobt record */
1533 int offset
) /* inode offset */
1535 struct xfs_inobt_rec_incore rec
;
1539 error
= xfs_inobt_lookup(cur
, frec
->ir_startino
, XFS_LOOKUP_EQ
, &i
);
1542 if (XFS_IS_CORRUPT(cur
->bc_mp
, i
!= 1))
1543 return -EFSCORRUPTED
;
1545 error
= xfs_inobt_get_rec(cur
, &rec
, &i
);
1548 if (XFS_IS_CORRUPT(cur
->bc_mp
, i
!= 1))
1549 return -EFSCORRUPTED
;
1550 ASSERT((XFS_AGINO_TO_OFFSET(cur
->bc_mp
, rec
.ir_startino
) %
1551 XFS_INODES_PER_CHUNK
) == 0);
1553 rec
.ir_free
&= ~XFS_INOBT_MASK(offset
);
1556 if (XFS_IS_CORRUPT(cur
->bc_mp
,
1557 rec
.ir_free
!= frec
->ir_free
||
1558 rec
.ir_freecount
!= frec
->ir_freecount
))
1559 return -EFSCORRUPTED
;
1561 return xfs_inobt_update(cur
, &rec
);
1565 * Allocate an inode using the free inode btree, if available. Otherwise, fall
1566 * back to the inobt search algorithm.
1568 * The caller selected an AG for us, and made sure that free inodes are
1573 struct xfs_trans
*tp
,
1574 struct xfs_buf
*agbp
,
1578 struct xfs_mount
*mp
= tp
->t_mountp
;
1579 struct xfs_agi
*agi
= agbp
->b_addr
;
1580 xfs_agnumber_t agno
= be32_to_cpu(agi
->agi_seqno
);
1581 xfs_agnumber_t pagno
= XFS_INO_TO_AGNO(mp
, parent
);
1582 xfs_agino_t pagino
= XFS_INO_TO_AGINO(mp
, parent
);
1583 struct xfs_btree_cur
*cur
; /* finobt cursor */
1584 struct xfs_btree_cur
*icur
; /* inobt cursor */
1585 struct xfs_inobt_rec_incore rec
;
1591 if (!xfs_sb_version_hasfinobt(&mp
->m_sb
))
1592 return xfs_dialloc_ag_inobt(tp
, agbp
, parent
, inop
);
1595 * If pagino is 0 (this is the root inode allocation) use newino.
1596 * This must work because we've just allocated some.
1599 pagino
= be32_to_cpu(agi
->agi_newino
);
1601 cur
= xfs_inobt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_FINO
);
1603 error
= xfs_check_agi_freecount(cur
, agi
);
1608 * The search algorithm depends on whether we're in the same AG as the
1609 * parent. If so, find the closest available inode to the parent. If
1610 * not, consider the agi hint or find the first free inode in the AG.
1613 error
= xfs_dialloc_ag_finobt_near(pagino
, &cur
, &rec
);
1615 error
= xfs_dialloc_ag_finobt_newino(agi
, cur
, &rec
);
1619 offset
= xfs_inobt_first_free_inode(&rec
);
1620 ASSERT(offset
>= 0);
1621 ASSERT(offset
< XFS_INODES_PER_CHUNK
);
1622 ASSERT((XFS_AGINO_TO_OFFSET(mp
, rec
.ir_startino
) %
1623 XFS_INODES_PER_CHUNK
) == 0);
1624 ino
= XFS_AGINO_TO_INO(mp
, agno
, rec
.ir_startino
+ offset
);
1627 * Modify or remove the finobt record.
1629 rec
.ir_free
&= ~XFS_INOBT_MASK(offset
);
1631 if (rec
.ir_freecount
)
1632 error
= xfs_inobt_update(cur
, &rec
);
1634 error
= xfs_btree_delete(cur
, &i
);
1639 * The finobt has now been updated appropriately. We haven't updated the
1640 * agi and superblock yet, so we can create an inobt cursor and validate
1641 * the original freecount. If all is well, make the equivalent update to
1642 * the inobt using the finobt record and offset information.
1644 icur
= xfs_inobt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_INO
);
1646 error
= xfs_check_agi_freecount(icur
, agi
);
1650 error
= xfs_dialloc_ag_update_inobt(icur
, &rec
, offset
);
1655 * Both trees have now been updated. We must update the perag and
1656 * superblock before we can check the freecount for each btree.
1658 be32_add_cpu(&agi
->agi_freecount
, -1);
1659 xfs_ialloc_log_agi(tp
, agbp
, XFS_AGI_FREECOUNT
);
1660 agbp
->b_pag
->pagi_freecount
--;
1662 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_IFREE
, -1);
1664 error
= xfs_check_agi_freecount(icur
, agi
);
1667 error
= xfs_check_agi_freecount(cur
, agi
);
1671 xfs_btree_del_cursor(icur
, XFS_BTREE_NOERROR
);
1672 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
1677 xfs_btree_del_cursor(icur
, XFS_BTREE_ERROR
);
1679 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
1685 struct xfs_trans
**tpp
,
1686 struct xfs_buf
*agibp
)
1688 struct xfs_trans
*tp
= *tpp
;
1689 struct xfs_dquot_acct
*dqinfo
;
1693 * Hold to on to the agibp across the commit so no other allocation can
1694 * come in and take the free inodes we just allocated for our caller.
1696 xfs_trans_bhold(tp
, agibp
);
1699 * We want the quota changes to be associated with the next transaction,
1700 * NOT this one. So, detach the dqinfo from this and attach it to the
1703 dqinfo
= tp
->t_dqinfo
;
1704 tp
->t_dqinfo
= NULL
;
1706 error
= xfs_trans_roll(&tp
);
1708 /* Re-attach the quota info that we detached from prev trx. */
1709 tp
->t_dqinfo
= dqinfo
;
1714 xfs_trans_bjoin(tp
, agibp
);
1719 * Select and prepare an AG for inode allocation.
1721 * Mode is used to tell whether the new inode is a directory and hence where to
1724 * This function will ensure that the selected AG has free inodes available to
1725 * allocate from. The selected AGI will be returned locked to the caller, and it
1726 * will allocate more free inodes if required. If no free inodes are found or
1727 * can be allocated, no AGI will be returned.
1730 xfs_dialloc_select_ag(
1731 struct xfs_trans
**tpp
,
1734 struct xfs_buf
**IO_agbp
)
1736 struct xfs_mount
*mp
= (*tpp
)->t_mountp
;
1737 struct xfs_buf
*agbp
;
1738 xfs_agnumber_t agno
;
1740 bool noroom
= false;
1741 xfs_agnumber_t start_agno
;
1742 struct xfs_perag
*pag
;
1743 struct xfs_ino_geometry
*igeo
= M_IGEO(mp
);
1744 bool okalloc
= true;
1749 * We do not have an agbp, so select an initial allocation
1750 * group for inode allocation.
1752 start_agno
= xfs_ialloc_ag_select(*tpp
, parent
, mode
);
1753 if (start_agno
== NULLAGNUMBER
)
1757 * If we have already hit the ceiling of inode blocks then clear
1758 * okalloc so we scan all available agi structures for a free
1761 * Read rough value of mp->m_icount by percpu_counter_read_positive,
1762 * which will sacrifice the preciseness but improve the performance.
1764 if (igeo
->maxicount
&&
1765 percpu_counter_read_positive(&mp
->m_icount
) + igeo
->ialloc_inos
1766 > igeo
->maxicount
) {
1772 * Loop until we find an allocation group that either has free inodes
1773 * or in which we can allocate some inodes. Iterate through the
1774 * allocation groups upward, wrapping at the end.
1778 pag
= xfs_perag_get(mp
, agno
);
1779 if (!pag
->pagi_inodeok
) {
1780 xfs_ialloc_next_ag(mp
);
1784 if (!pag
->pagi_init
) {
1785 error
= xfs_ialloc_pagi_init(mp
, *tpp
, agno
);
1791 * Do a first racy fast path check if this AG is usable.
1793 if (!pag
->pagi_freecount
&& !okalloc
)
1797 * Then read in the AGI buffer and recheck with the AGI buffer
1800 error
= xfs_ialloc_read_agi(mp
, *tpp
, agno
, &agbp
);
1804 if (pag
->pagi_freecount
) {
1810 goto nextag_relse_buffer
;
1812 error
= xfs_ialloc_ag_alloc(*tpp
, agbp
);
1814 xfs_trans_brelse(*tpp
, agbp
);
1816 if (error
== -ENOSPC
)
1823 * We successfully allocated space for an inode cluster
1824 * in this AG. Roll the transaction so that we can
1825 * allocate one of the new inodes.
1827 ASSERT(pag
->pagi_freecount
> 0);
1830 error
= xfs_dialloc_roll(tpp
, agbp
);
1832 xfs_buf_relse(agbp
);
1838 nextag_relse_buffer
:
1839 xfs_trans_brelse(*tpp
, agbp
);
1842 if (++agno
== mp
->m_sb
.sb_agcount
)
1844 if (agno
== start_agno
)
1845 return noroom
? -ENOSPC
: 0;
1856 * Free the blocks of an inode chunk. We must consider that the inode chunk
1857 * might be sparse and only free the regions that are allocated as part of the
1861 xfs_difree_inode_chunk(
1862 struct xfs_trans
*tp
,
1863 xfs_agnumber_t agno
,
1864 struct xfs_inobt_rec_incore
*rec
)
1866 struct xfs_mount
*mp
= tp
->t_mountp
;
1867 xfs_agblock_t sagbno
= XFS_AGINO_TO_AGBNO(mp
,
1869 int startidx
, endidx
;
1871 xfs_agblock_t agbno
;
1873 DECLARE_BITMAP(holemask
, XFS_INOBT_HOLEMASK_BITS
);
1875 if (!xfs_inobt_issparse(rec
->ir_holemask
)) {
1876 /* not sparse, calculate extent info directly */
1877 xfs_bmap_add_free(tp
, XFS_AGB_TO_FSB(mp
, agno
, sagbno
),
1878 M_IGEO(mp
)->ialloc_blks
,
1879 &XFS_RMAP_OINFO_INODES
);
1883 /* holemask is only 16-bits (fits in an unsigned long) */
1884 ASSERT(sizeof(rec
->ir_holemask
) <= sizeof(holemask
[0]));
1885 holemask
[0] = rec
->ir_holemask
;
1888 * Find contiguous ranges of zeroes (i.e., allocated regions) in the
1889 * holemask and convert the start/end index of each range to an extent.
1890 * We start with the start and end index both pointing at the first 0 in
1893 startidx
= endidx
= find_first_zero_bit(holemask
,
1894 XFS_INOBT_HOLEMASK_BITS
);
1895 nextbit
= startidx
+ 1;
1896 while (startidx
< XFS_INOBT_HOLEMASK_BITS
) {
1897 nextbit
= find_next_zero_bit(holemask
, XFS_INOBT_HOLEMASK_BITS
,
1900 * If the next zero bit is contiguous, update the end index of
1901 * the current range and continue.
1903 if (nextbit
!= XFS_INOBT_HOLEMASK_BITS
&&
1904 nextbit
== endidx
+ 1) {
1910 * nextbit is not contiguous with the current end index. Convert
1911 * the current start/end to an extent and add it to the free
1914 agbno
= sagbno
+ (startidx
* XFS_INODES_PER_HOLEMASK_BIT
) /
1915 mp
->m_sb
.sb_inopblock
;
1916 contigblk
= ((endidx
- startidx
+ 1) *
1917 XFS_INODES_PER_HOLEMASK_BIT
) /
1918 mp
->m_sb
.sb_inopblock
;
1920 ASSERT(agbno
% mp
->m_sb
.sb_spino_align
== 0);
1921 ASSERT(contigblk
% mp
->m_sb
.sb_spino_align
== 0);
1922 xfs_bmap_add_free(tp
, XFS_AGB_TO_FSB(mp
, agno
, agbno
),
1923 contigblk
, &XFS_RMAP_OINFO_INODES
);
1925 /* reset range to current bit and carry on... */
1926 startidx
= endidx
= nextbit
;
1935 struct xfs_mount
*mp
,
1936 struct xfs_trans
*tp
,
1937 struct xfs_buf
*agbp
,
1939 struct xfs_icluster
*xic
,
1940 struct xfs_inobt_rec_incore
*orec
)
1942 struct xfs_agi
*agi
= agbp
->b_addr
;
1943 xfs_agnumber_t agno
= be32_to_cpu(agi
->agi_seqno
);
1944 struct xfs_btree_cur
*cur
;
1945 struct xfs_inobt_rec_incore rec
;
1951 ASSERT(agi
->agi_magicnum
== cpu_to_be32(XFS_AGI_MAGIC
));
1952 ASSERT(XFS_AGINO_TO_AGBNO(mp
, agino
) < be32_to_cpu(agi
->agi_length
));
1955 * Initialize the cursor.
1957 cur
= xfs_inobt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_INO
);
1959 error
= xfs_check_agi_freecount(cur
, agi
);
1964 * Look for the entry describing this inode.
1966 if ((error
= xfs_inobt_lookup(cur
, agino
, XFS_LOOKUP_LE
, &i
))) {
1967 xfs_warn(mp
, "%s: xfs_inobt_lookup() returned error %d.",
1971 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1972 error
= -EFSCORRUPTED
;
1975 error
= xfs_inobt_get_rec(cur
, &rec
, &i
);
1977 xfs_warn(mp
, "%s: xfs_inobt_get_rec() returned error %d.",
1981 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
1982 error
= -EFSCORRUPTED
;
1986 * Get the offset in the inode chunk.
1988 off
= agino
- rec
.ir_startino
;
1989 ASSERT(off
>= 0 && off
< XFS_INODES_PER_CHUNK
);
1990 ASSERT(!(rec
.ir_free
& XFS_INOBT_MASK(off
)));
1992 * Mark the inode free & increment the count.
1994 rec
.ir_free
|= XFS_INOBT_MASK(off
);
1998 * When an inode chunk is free, it becomes eligible for removal. Don't
1999 * remove the chunk if the block size is large enough for multiple inode
2000 * chunks (that might not be free).
2002 if (!(mp
->m_flags
& XFS_MOUNT_IKEEP
) &&
2003 rec
.ir_free
== XFS_INOBT_ALL_FREE
&&
2004 mp
->m_sb
.sb_inopblock
<= XFS_INODES_PER_CHUNK
) {
2005 struct xfs_perag
*pag
= agbp
->b_pag
;
2007 xic
->deleted
= true;
2008 xic
->first_ino
= XFS_AGINO_TO_INO(mp
, agno
, rec
.ir_startino
);
2009 xic
->alloc
= xfs_inobt_irec_to_allocmask(&rec
);
2012 * Remove the inode cluster from the AGI B+Tree, adjust the
2013 * AGI and Superblock inode counts, and mark the disk space
2014 * to be freed when the transaction is committed.
2016 ilen
= rec
.ir_freecount
;
2017 be32_add_cpu(&agi
->agi_count
, -ilen
);
2018 be32_add_cpu(&agi
->agi_freecount
, -(ilen
- 1));
2019 xfs_ialloc_log_agi(tp
, agbp
, XFS_AGI_COUNT
| XFS_AGI_FREECOUNT
);
2020 pag
->pagi_freecount
-= ilen
- 1;
2021 pag
->pagi_count
-= ilen
;
2022 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_ICOUNT
, -ilen
);
2023 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_IFREE
, -(ilen
- 1));
2025 if ((error
= xfs_btree_delete(cur
, &i
))) {
2026 xfs_warn(mp
, "%s: xfs_btree_delete returned error %d.",
2031 xfs_difree_inode_chunk(tp
, agno
, &rec
);
2033 xic
->deleted
= false;
2035 error
= xfs_inobt_update(cur
, &rec
);
2037 xfs_warn(mp
, "%s: xfs_inobt_update returned error %d.",
2043 * Change the inode free counts and log the ag/sb changes.
2045 be32_add_cpu(&agi
->agi_freecount
, 1);
2046 xfs_ialloc_log_agi(tp
, agbp
, XFS_AGI_FREECOUNT
);
2047 agbp
->b_pag
->pagi_freecount
++;
2048 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_IFREE
, 1);
2051 error
= xfs_check_agi_freecount(cur
, agi
);
2056 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
2060 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
2065 * Free an inode in the free inode btree.
2069 struct xfs_mount
*mp
,
2070 struct xfs_trans
*tp
,
2071 struct xfs_buf
*agbp
,
2073 struct xfs_inobt_rec_incore
*ibtrec
) /* inobt record */
2075 struct xfs_agi
*agi
= agbp
->b_addr
;
2076 xfs_agnumber_t agno
= be32_to_cpu(agi
->agi_seqno
);
2077 struct xfs_btree_cur
*cur
;
2078 struct xfs_inobt_rec_incore rec
;
2079 int offset
= agino
- ibtrec
->ir_startino
;
2083 cur
= xfs_inobt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_FINO
);
2085 error
= xfs_inobt_lookup(cur
, ibtrec
->ir_startino
, XFS_LOOKUP_EQ
, &i
);
2090 * If the record does not exist in the finobt, we must have just
2091 * freed an inode in a previously fully allocated chunk. If not,
2092 * something is out of sync.
2094 if (XFS_IS_CORRUPT(mp
, ibtrec
->ir_freecount
!= 1)) {
2095 error
= -EFSCORRUPTED
;
2099 error
= xfs_inobt_insert_rec(cur
, ibtrec
->ir_holemask
,
2101 ibtrec
->ir_freecount
,
2102 ibtrec
->ir_free
, &i
);
2111 * Read and update the existing record. We could just copy the ibtrec
2112 * across here, but that would defeat the purpose of having redundant
2113 * metadata. By making the modifications independently, we can catch
2114 * corruptions that we wouldn't see if we just copied from one record
2117 error
= xfs_inobt_get_rec(cur
, &rec
, &i
);
2120 if (XFS_IS_CORRUPT(mp
, i
!= 1)) {
2121 error
= -EFSCORRUPTED
;
2125 rec
.ir_free
|= XFS_INOBT_MASK(offset
);
2128 if (XFS_IS_CORRUPT(mp
,
2129 rec
.ir_free
!= ibtrec
->ir_free
||
2130 rec
.ir_freecount
!= ibtrec
->ir_freecount
)) {
2131 error
= -EFSCORRUPTED
;
2136 * The content of inobt records should always match between the inobt
2137 * and finobt. The lifecycle of records in the finobt is different from
2138 * the inobt in that the finobt only tracks records with at least one
2139 * free inode. Hence, if all of the inodes are free and we aren't
2140 * keeping inode chunks permanently on disk, remove the record.
2141 * Otherwise, update the record with the new information.
2143 * Note that we currently can't free chunks when the block size is large
2144 * enough for multiple chunks. Leave the finobt record to remain in sync
2147 if (rec
.ir_free
== XFS_INOBT_ALL_FREE
&&
2148 mp
->m_sb
.sb_inopblock
<= XFS_INODES_PER_CHUNK
&&
2149 !(mp
->m_flags
& XFS_MOUNT_IKEEP
)) {
2150 error
= xfs_btree_delete(cur
, &i
);
2155 error
= xfs_inobt_update(cur
, &rec
);
2161 error
= xfs_check_agi_freecount(cur
, agi
);
2165 xfs_btree_del_cursor(cur
, XFS_BTREE_NOERROR
);
2169 xfs_btree_del_cursor(cur
, XFS_BTREE_ERROR
);
2174 * Free disk inode. Carefully avoids touching the incore inode, all
2175 * manipulations incore are the caller's responsibility.
2176 * The on-disk inode is not changed by this operation, only the
2177 * btree (free inode mask) is changed.
2181 struct xfs_trans
*tp
, /* transaction pointer */
2182 xfs_ino_t inode
, /* inode to be freed */
2183 struct xfs_icluster
*xic
) /* cluster info if deleted */
2186 xfs_agblock_t agbno
; /* block number containing inode */
2187 struct xfs_buf
*agbp
; /* buffer for allocation group header */
2188 xfs_agino_t agino
; /* allocation group inode number */
2189 xfs_agnumber_t agno
; /* allocation group number */
2190 int error
; /* error return value */
2191 struct xfs_mount
*mp
; /* mount structure for filesystem */
2192 struct xfs_inobt_rec_incore rec
;/* btree record */
2197 * Break up inode number into its components.
2199 agno
= XFS_INO_TO_AGNO(mp
, inode
);
2200 if (agno
>= mp
->m_sb
.sb_agcount
) {
2201 xfs_warn(mp
, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
2202 __func__
, agno
, mp
->m_sb
.sb_agcount
);
2206 agino
= XFS_INO_TO_AGINO(mp
, inode
);
2207 if (inode
!= XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
2208 xfs_warn(mp
, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
2209 __func__
, (unsigned long long)inode
,
2210 (unsigned long long)XFS_AGINO_TO_INO(mp
, agno
, agino
));
2214 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
2215 if (agbno
>= mp
->m_sb
.sb_agblocks
) {
2216 xfs_warn(mp
, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
2217 __func__
, agbno
, mp
->m_sb
.sb_agblocks
);
2222 * Get the allocation group header.
2224 error
= xfs_ialloc_read_agi(mp
, tp
, agno
, &agbp
);
2226 xfs_warn(mp
, "%s: xfs_ialloc_read_agi() returned error %d.",
2232 * Fix up the inode allocation btree.
2234 error
= xfs_difree_inobt(mp
, tp
, agbp
, agino
, xic
, &rec
);
2239 * Fix up the free inode btree.
2241 if (xfs_sb_version_hasfinobt(&mp
->m_sb
)) {
2242 error
= xfs_difree_finobt(mp
, tp
, agbp
, agino
, &rec
);
2255 struct xfs_mount
*mp
,
2256 struct xfs_trans
*tp
,
2257 xfs_agnumber_t agno
,
2259 xfs_agblock_t agbno
,
2260 xfs_agblock_t
*chunk_agbno
,
2261 xfs_agblock_t
*offset_agbno
,
2264 struct xfs_inobt_rec_incore rec
;
2265 struct xfs_btree_cur
*cur
;
2266 struct xfs_buf
*agbp
;
2270 error
= xfs_ialloc_read_agi(mp
, tp
, agno
, &agbp
);
2273 "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
2274 __func__
, error
, agno
);
2279 * Lookup the inode record for the given agino. If the record cannot be
2280 * found, then it's an invalid inode number and we should abort. Once
2281 * we have a record, we need to ensure it contains the inode number
2282 * we are looking up.
2284 cur
= xfs_inobt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_INO
);
2285 error
= xfs_inobt_lookup(cur
, agino
, XFS_LOOKUP_LE
, &i
);
2288 error
= xfs_inobt_get_rec(cur
, &rec
, &i
);
2289 if (!error
&& i
== 0)
2293 xfs_trans_brelse(tp
, agbp
);
2294 xfs_btree_del_cursor(cur
, error
);
2298 /* check that the returned record contains the required inode */
2299 if (rec
.ir_startino
> agino
||
2300 rec
.ir_startino
+ M_IGEO(mp
)->ialloc_inos
<= agino
)
2303 /* for untrusted inodes check it is allocated first */
2304 if ((flags
& XFS_IGET_UNTRUSTED
) &&
2305 (rec
.ir_free
& XFS_INOBT_MASK(agino
- rec
.ir_startino
)))
2308 *chunk_agbno
= XFS_AGINO_TO_AGBNO(mp
, rec
.ir_startino
);
2309 *offset_agbno
= agbno
- *chunk_agbno
;
2314 * Return the location of the inode in imap, for mapping it into a buffer.
2318 xfs_mount_t
*mp
, /* file system mount structure */
2319 xfs_trans_t
*tp
, /* transaction pointer */
2320 xfs_ino_t ino
, /* inode to locate */
2321 struct xfs_imap
*imap
, /* location map structure */
2322 uint flags
) /* flags for inode btree lookup */
2324 xfs_agblock_t agbno
; /* block number of inode in the alloc group */
2325 xfs_agino_t agino
; /* inode number within alloc group */
2326 xfs_agnumber_t agno
; /* allocation group number */
2327 xfs_agblock_t chunk_agbno
; /* first block in inode chunk */
2328 xfs_agblock_t cluster_agbno
; /* first block in inode cluster */
2329 int error
; /* error code */
2330 int offset
; /* index of inode in its buffer */
2331 xfs_agblock_t offset_agbno
; /* blks from chunk start to inode */
2333 ASSERT(ino
!= NULLFSINO
);
2336 * Split up the inode number into its parts.
2338 agno
= XFS_INO_TO_AGNO(mp
, ino
);
2339 agino
= XFS_INO_TO_AGINO(mp
, ino
);
2340 agbno
= XFS_AGINO_TO_AGBNO(mp
, agino
);
2341 if (agno
>= mp
->m_sb
.sb_agcount
|| agbno
>= mp
->m_sb
.sb_agblocks
||
2342 ino
!= XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
2345 * Don't output diagnostic information for untrusted inodes
2346 * as they can be invalid without implying corruption.
2348 if (flags
& XFS_IGET_UNTRUSTED
)
2350 if (agno
>= mp
->m_sb
.sb_agcount
) {
2352 "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
2353 __func__
, agno
, mp
->m_sb
.sb_agcount
);
2355 if (agbno
>= mp
->m_sb
.sb_agblocks
) {
2357 "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
2358 __func__
, (unsigned long long)agbno
,
2359 (unsigned long)mp
->m_sb
.sb_agblocks
);
2361 if (ino
!= XFS_AGINO_TO_INO(mp
, agno
, agino
)) {
2363 "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
2365 XFS_AGINO_TO_INO(mp
, agno
, agino
));
2373 * For bulkstat and handle lookups, we have an untrusted inode number
2374 * that we have to verify is valid. We cannot do this just by reading
2375 * the inode buffer as it may have been unlinked and removed leaving
2376 * inodes in stale state on disk. Hence we have to do a btree lookup
2377 * in all cases where an untrusted inode number is passed.
2379 if (flags
& XFS_IGET_UNTRUSTED
) {
2380 error
= xfs_imap_lookup(mp
, tp
, agno
, agino
, agbno
,
2381 &chunk_agbno
, &offset_agbno
, flags
);
2388 * If the inode cluster size is the same as the blocksize or
2389 * smaller we get to the buffer by simple arithmetics.
2391 if (M_IGEO(mp
)->blocks_per_cluster
== 1) {
2392 offset
= XFS_INO_TO_OFFSET(mp
, ino
);
2393 ASSERT(offset
< mp
->m_sb
.sb_inopblock
);
2395 imap
->im_blkno
= XFS_AGB_TO_DADDR(mp
, agno
, agbno
);
2396 imap
->im_len
= XFS_FSB_TO_BB(mp
, 1);
2397 imap
->im_boffset
= (unsigned short)(offset
<<
2398 mp
->m_sb
.sb_inodelog
);
2403 * If the inode chunks are aligned then use simple maths to
2404 * find the location. Otherwise we have to do a btree
2405 * lookup to find the location.
2407 if (M_IGEO(mp
)->inoalign_mask
) {
2408 offset_agbno
= agbno
& M_IGEO(mp
)->inoalign_mask
;
2409 chunk_agbno
= agbno
- offset_agbno
;
2411 error
= xfs_imap_lookup(mp
, tp
, agno
, agino
, agbno
,
2412 &chunk_agbno
, &offset_agbno
, flags
);
2418 ASSERT(agbno
>= chunk_agbno
);
2419 cluster_agbno
= chunk_agbno
+
2420 ((offset_agbno
/ M_IGEO(mp
)->blocks_per_cluster
) *
2421 M_IGEO(mp
)->blocks_per_cluster
);
2422 offset
= ((agbno
- cluster_agbno
) * mp
->m_sb
.sb_inopblock
) +
2423 XFS_INO_TO_OFFSET(mp
, ino
);
2425 imap
->im_blkno
= XFS_AGB_TO_DADDR(mp
, agno
, cluster_agbno
);
2426 imap
->im_len
= XFS_FSB_TO_BB(mp
, M_IGEO(mp
)->blocks_per_cluster
);
2427 imap
->im_boffset
= (unsigned short)(offset
<< mp
->m_sb
.sb_inodelog
);
2430 * If the inode number maps to a block outside the bounds
2431 * of the file system then return NULL rather than calling
2432 * read_buf and panicing when we get an error from the
2435 if ((imap
->im_blkno
+ imap
->im_len
) >
2436 XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_dblocks
)) {
2438 "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
2439 __func__
, (unsigned long long) imap
->im_blkno
,
2440 (unsigned long long) imap
->im_len
,
2441 XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_dblocks
));
2448 * Log specified fields for the ag hdr (inode section). The growth of the agi
2449 * structure over time requires that we interpret the buffer as two logical
2450 * regions delineated by the end of the unlinked list. This is due to the size
2451 * of the hash table and its location in the middle of the agi.
2453 * For example, a request to log a field before agi_unlinked and a field after
2454 * agi_unlinked could cause us to log the entire hash table and use an excessive
2455 * amount of log space. To avoid this behavior, log the region up through
2456 * agi_unlinked in one call and the region after agi_unlinked through the end of
2457 * the structure in another.
2461 xfs_trans_t
*tp
, /* transaction pointer */
2462 struct xfs_buf
*bp
, /* allocation group header buffer */
2463 int fields
) /* bitmask of fields to log */
2465 int first
; /* first byte number */
2466 int last
; /* last byte number */
2467 static const short offsets
[] = { /* field starting offsets */
2468 /* keep in sync with bit definitions */
2469 offsetof(xfs_agi_t
, agi_magicnum
),
2470 offsetof(xfs_agi_t
, agi_versionnum
),
2471 offsetof(xfs_agi_t
, agi_seqno
),
2472 offsetof(xfs_agi_t
, agi_length
),
2473 offsetof(xfs_agi_t
, agi_count
),
2474 offsetof(xfs_agi_t
, agi_root
),
2475 offsetof(xfs_agi_t
, agi_level
),
2476 offsetof(xfs_agi_t
, agi_freecount
),
2477 offsetof(xfs_agi_t
, agi_newino
),
2478 offsetof(xfs_agi_t
, agi_dirino
),
2479 offsetof(xfs_agi_t
, agi_unlinked
),
2480 offsetof(xfs_agi_t
, agi_free_root
),
2481 offsetof(xfs_agi_t
, agi_free_level
),
2482 offsetof(xfs_agi_t
, agi_iblocks
),
2486 struct xfs_agi
*agi
= bp
->b_addr
;
2488 ASSERT(agi
->agi_magicnum
== cpu_to_be32(XFS_AGI_MAGIC
));
2492 * Compute byte offsets for the first and last fields in the first
2493 * region and log the agi buffer. This only logs up through
2496 if (fields
& XFS_AGI_ALL_BITS_R1
) {
2497 xfs_btree_offsets(fields
, offsets
, XFS_AGI_NUM_BITS_R1
,
2499 xfs_trans_log_buf(tp
, bp
, first
, last
);
2503 * Mask off the bits in the first region and calculate the first and
2504 * last field offsets for any bits in the second region.
2506 fields
&= ~XFS_AGI_ALL_BITS_R1
;
2508 xfs_btree_offsets(fields
, offsets
, XFS_AGI_NUM_BITS_R2
,
2510 xfs_trans_log_buf(tp
, bp
, first
, last
);
2514 static xfs_failaddr_t
2518 struct xfs_mount
*mp
= bp
->b_mount
;
2519 struct xfs_agi
*agi
= bp
->b_addr
;
2522 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
2523 if (!uuid_equal(&agi
->agi_uuid
, &mp
->m_sb
.sb_meta_uuid
))
2524 return __this_address
;
2525 if (!xfs_log_check_lsn(mp
, be64_to_cpu(agi
->agi_lsn
)))
2526 return __this_address
;
2530 * Validate the magic number of the agi block.
2532 if (!xfs_verify_magic(bp
, agi
->agi_magicnum
))
2533 return __this_address
;
2534 if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi
->agi_versionnum
)))
2535 return __this_address
;
2537 if (be32_to_cpu(agi
->agi_level
) < 1 ||
2538 be32_to_cpu(agi
->agi_level
) > XFS_BTREE_MAXLEVELS
)
2539 return __this_address
;
2541 if (xfs_sb_version_hasfinobt(&mp
->m_sb
) &&
2542 (be32_to_cpu(agi
->agi_free_level
) < 1 ||
2543 be32_to_cpu(agi
->agi_free_level
) > XFS_BTREE_MAXLEVELS
))
2544 return __this_address
;
2547 * during growfs operations, the perag is not fully initialised,
2548 * so we can't use it for any useful checking. growfs ensures we can't
2549 * use it by using uncached buffers that don't have the perag attached
2550 * so we can detect and avoid this problem.
2552 if (bp
->b_pag
&& be32_to_cpu(agi
->agi_seqno
) != bp
->b_pag
->pag_agno
)
2553 return __this_address
;
2555 for (i
= 0; i
< XFS_AGI_UNLINKED_BUCKETS
; i
++) {
2556 if (agi
->agi_unlinked
[i
] == cpu_to_be32(NULLAGINO
))
2558 if (!xfs_verify_ino(mp
, be32_to_cpu(agi
->agi_unlinked
[i
])))
2559 return __this_address
;
2566 xfs_agi_read_verify(
2569 struct xfs_mount
*mp
= bp
->b_mount
;
2572 if (xfs_sb_version_hascrc(&mp
->m_sb
) &&
2573 !xfs_buf_verify_cksum(bp
, XFS_AGI_CRC_OFF
))
2574 xfs_verifier_error(bp
, -EFSBADCRC
, __this_address
);
2576 fa
= xfs_agi_verify(bp
);
2577 if (XFS_TEST_ERROR(fa
, mp
, XFS_ERRTAG_IALLOC_READ_AGI
))
2578 xfs_verifier_error(bp
, -EFSCORRUPTED
, fa
);
2583 xfs_agi_write_verify(
2586 struct xfs_mount
*mp
= bp
->b_mount
;
2587 struct xfs_buf_log_item
*bip
= bp
->b_log_item
;
2588 struct xfs_agi
*agi
= bp
->b_addr
;
2591 fa
= xfs_agi_verify(bp
);
2593 xfs_verifier_error(bp
, -EFSCORRUPTED
, fa
);
2597 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
2601 agi
->agi_lsn
= cpu_to_be64(bip
->bli_item
.li_lsn
);
2602 xfs_buf_update_cksum(bp
, XFS_AGI_CRC_OFF
);
2605 const struct xfs_buf_ops xfs_agi_buf_ops
= {
2607 .magic
= { cpu_to_be32(XFS_AGI_MAGIC
), cpu_to_be32(XFS_AGI_MAGIC
) },
2608 .verify_read
= xfs_agi_read_verify
,
2609 .verify_write
= xfs_agi_write_verify
,
2610 .verify_struct
= xfs_agi_verify
,
2614 * Read in the allocation group header (inode allocation section)
2618 struct xfs_mount
*mp
, /* file system mount structure */
2619 struct xfs_trans
*tp
, /* transaction pointer */
2620 xfs_agnumber_t agno
, /* allocation group number */
2621 struct xfs_buf
**bpp
) /* allocation group hdr buf */
2625 trace_xfs_read_agi(mp
, agno
);
2627 ASSERT(agno
!= NULLAGNUMBER
);
2628 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
,
2629 XFS_AG_DADDR(mp
, agno
, XFS_AGI_DADDR(mp
)),
2630 XFS_FSS_TO_BB(mp
, 1), 0, bpp
, &xfs_agi_buf_ops
);
2634 xfs_trans_buf_set_type(tp
, *bpp
, XFS_BLFT_AGI_BUF
);
2636 xfs_buf_set_ref(*bpp
, XFS_AGI_REF
);
2641 xfs_ialloc_read_agi(
2642 struct xfs_mount
*mp
, /* file system mount structure */
2643 struct xfs_trans
*tp
, /* transaction pointer */
2644 xfs_agnumber_t agno
, /* allocation group number */
2645 struct xfs_buf
**bpp
) /* allocation group hdr buf */
2647 struct xfs_agi
*agi
; /* allocation group header */
2648 struct xfs_perag
*pag
; /* per allocation group data */
2651 trace_xfs_ialloc_read_agi(mp
, agno
);
2653 error
= xfs_read_agi(mp
, tp
, agno
, bpp
);
2657 agi
= (*bpp
)->b_addr
;
2658 pag
= (*bpp
)->b_pag
;
2659 if (!pag
->pagi_init
) {
2660 pag
->pagi_freecount
= be32_to_cpu(agi
->agi_freecount
);
2661 pag
->pagi_count
= be32_to_cpu(agi
->agi_count
);
2666 * It's possible for these to be out of sync if
2667 * we are in the middle of a forced shutdown.
2669 ASSERT(pag
->pagi_freecount
== be32_to_cpu(agi
->agi_freecount
) ||
2670 XFS_FORCED_SHUTDOWN(mp
));
2675 * Read in the agi to initialise the per-ag data in the mount structure
2678 xfs_ialloc_pagi_init(
2679 xfs_mount_t
*mp
, /* file system mount structure */
2680 xfs_trans_t
*tp
, /* transaction pointer */
2681 xfs_agnumber_t agno
) /* allocation group number */
2683 struct xfs_buf
*bp
= NULL
;
2686 error
= xfs_ialloc_read_agi(mp
, tp
, agno
, &bp
);
2690 xfs_trans_brelse(tp
, bp
);
2694 /* Is there an inode record covering a given range of inode numbers? */
2696 xfs_ialloc_has_inode_record(
2697 struct xfs_btree_cur
*cur
,
2702 struct xfs_inobt_rec_incore irec
;
2710 error
= xfs_inobt_lookup(cur
, low
, XFS_LOOKUP_LE
, &has_record
);
2711 while (error
== 0 && has_record
) {
2712 error
= xfs_inobt_get_rec(cur
, &irec
, &has_record
);
2713 if (error
|| irec
.ir_startino
> high
)
2716 agino
= irec
.ir_startino
;
2717 holemask
= irec
.ir_holemask
;
2718 for (i
= 0; i
< XFS_INOBT_HOLEMASK_BITS
; holemask
>>= 1,
2719 i
++, agino
+= XFS_INODES_PER_HOLEMASK_BIT
) {
2722 if (agino
+ XFS_INODES_PER_HOLEMASK_BIT
> low
&&
2729 error
= xfs_btree_increment(cur
, 0, &has_record
);
2734 /* Is there an inode record covering a given extent? */
2736 xfs_ialloc_has_inodes_at_extent(
2737 struct xfs_btree_cur
*cur
,
2745 low
= XFS_AGB_TO_AGINO(cur
->bc_mp
, bno
);
2746 high
= XFS_AGB_TO_AGINO(cur
->bc_mp
, bno
+ len
) - 1;
2748 return xfs_ialloc_has_inode_record(cur
, low
, high
, exists
);
2751 struct xfs_ialloc_count_inodes
{
2753 xfs_agino_t freecount
;
2756 /* Record inode counts across all inobt records. */
2758 xfs_ialloc_count_inodes_rec(
2759 struct xfs_btree_cur
*cur
,
2760 union xfs_btree_rec
*rec
,
2763 struct xfs_inobt_rec_incore irec
;
2764 struct xfs_ialloc_count_inodes
*ci
= priv
;
2766 xfs_inobt_btrec_to_irec(cur
->bc_mp
, rec
, &irec
);
2767 ci
->count
+= irec
.ir_count
;
2768 ci
->freecount
+= irec
.ir_freecount
;
2773 /* Count allocated and free inodes under an inobt. */
2775 xfs_ialloc_count_inodes(
2776 struct xfs_btree_cur
*cur
,
2778 xfs_agino_t
*freecount
)
2780 struct xfs_ialloc_count_inodes ci
= {0};
2783 ASSERT(cur
->bc_btnum
== XFS_BTNUM_INO
);
2784 error
= xfs_btree_query_all(cur
, xfs_ialloc_count_inodes_rec
, &ci
);
2789 *freecount
= ci
.freecount
;
2794 * Initialize inode-related geometry information.
2796 * Compute the inode btree min and max levels and set maxicount.
2798 * Set the inode cluster size. This may still be overridden by the file
2799 * system block size if it is larger than the chosen cluster size.
2801 * For v5 filesystems, scale the cluster size with the inode size to keep a
2802 * constant ratio of inode per cluster buffer, but only if mkfs has set the
2803 * inode alignment value appropriately for larger cluster sizes.
2805 * Then compute the inode cluster alignment information.
2808 xfs_ialloc_setup_geometry(
2809 struct xfs_mount
*mp
)
2811 struct xfs_sb
*sbp
= &mp
->m_sb
;
2812 struct xfs_ino_geometry
*igeo
= M_IGEO(mp
);
2816 igeo
->new_diflags2
= 0;
2817 if (xfs_sb_version_hasbigtime(&mp
->m_sb
))
2818 igeo
->new_diflags2
|= XFS_DIFLAG2_BIGTIME
;
2820 /* Compute inode btree geometry. */
2821 igeo
->agino_log
= sbp
->sb_inopblog
+ sbp
->sb_agblklog
;
2822 igeo
->inobt_mxr
[0] = xfs_inobt_maxrecs(mp
, sbp
->sb_blocksize
, 1);
2823 igeo
->inobt_mxr
[1] = xfs_inobt_maxrecs(mp
, sbp
->sb_blocksize
, 0);
2824 igeo
->inobt_mnr
[0] = igeo
->inobt_mxr
[0] / 2;
2825 igeo
->inobt_mnr
[1] = igeo
->inobt_mxr
[1] / 2;
2827 igeo
->ialloc_inos
= max_t(uint16_t, XFS_INODES_PER_CHUNK
,
2829 igeo
->ialloc_blks
= igeo
->ialloc_inos
>> sbp
->sb_inopblog
;
2831 if (sbp
->sb_spino_align
)
2832 igeo
->ialloc_min_blks
= sbp
->sb_spino_align
;
2834 igeo
->ialloc_min_blks
= igeo
->ialloc_blks
;
2836 /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
2837 inodes
= (1LL << XFS_INO_AGINO_BITS(mp
)) >> XFS_INODES_PER_CHUNK_LOG
;
2838 igeo
->inobt_maxlevels
= xfs_btree_compute_maxlevels(igeo
->inobt_mnr
,
2842 * Set the maximum inode count for this filesystem, being careful not
2843 * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
2844 * users should never get here due to failing sb verification, but
2845 * certain users (xfs_db) need to be usable even with corrupt metadata.
2847 if (sbp
->sb_imax_pct
&& igeo
->ialloc_blks
) {
2849 * Make sure the maximum inode count is a multiple
2850 * of the units we allocate inodes in.
2852 icount
= sbp
->sb_dblocks
* sbp
->sb_imax_pct
;
2853 do_div(icount
, 100);
2854 do_div(icount
, igeo
->ialloc_blks
);
2855 igeo
->maxicount
= XFS_FSB_TO_INO(mp
,
2856 icount
* igeo
->ialloc_blks
);
2858 igeo
->maxicount
= 0;
2862 * Compute the desired size of an inode cluster buffer size, which
2863 * starts at 8K and (on v5 filesystems) scales up with larger inode
2866 * Preserve the desired inode cluster size because the sparse inodes
2867 * feature uses that desired size (not the actual size) to compute the
2868 * sparse inode alignment. The mount code validates this value, so we
2869 * cannot change the behavior.
2871 igeo
->inode_cluster_size_raw
= XFS_INODE_BIG_CLUSTER_SIZE
;
2872 if (xfs_sb_version_has_v3inode(&mp
->m_sb
)) {
2873 int new_size
= igeo
->inode_cluster_size_raw
;
2875 new_size
*= mp
->m_sb
.sb_inodesize
/ XFS_DINODE_MIN_SIZE
;
2876 if (mp
->m_sb
.sb_inoalignmt
>= XFS_B_TO_FSBT(mp
, new_size
))
2877 igeo
->inode_cluster_size_raw
= new_size
;
2880 /* Calculate inode cluster ratios. */
2881 if (igeo
->inode_cluster_size_raw
> mp
->m_sb
.sb_blocksize
)
2882 igeo
->blocks_per_cluster
= XFS_B_TO_FSBT(mp
,
2883 igeo
->inode_cluster_size_raw
);
2885 igeo
->blocks_per_cluster
= 1;
2886 igeo
->inode_cluster_size
= XFS_FSB_TO_B(mp
, igeo
->blocks_per_cluster
);
2887 igeo
->inodes_per_cluster
= XFS_FSB_TO_INO(mp
, igeo
->blocks_per_cluster
);
2889 /* Calculate inode cluster alignment. */
2890 if (xfs_sb_version_hasalign(&mp
->m_sb
) &&
2891 mp
->m_sb
.sb_inoalignmt
>= igeo
->blocks_per_cluster
)
2892 igeo
->cluster_align
= mp
->m_sb
.sb_inoalignmt
;
2894 igeo
->cluster_align
= 1;
2895 igeo
->inoalign_mask
= igeo
->cluster_align
- 1;
2896 igeo
->cluster_align_inodes
= XFS_FSB_TO_INO(mp
, igeo
->cluster_align
);
2899 * If we are using stripe alignment, check whether
2900 * the stripe unit is a multiple of the inode alignment
2902 if (mp
->m_dalign
&& igeo
->inoalign_mask
&&
2903 !(mp
->m_dalign
& igeo
->inoalign_mask
))
2904 igeo
->ialloc_align
= mp
->m_dalign
;
2906 igeo
->ialloc_align
= 0;
2909 /* Compute the location of the root directory inode that is laid out by mkfs. */
2911 xfs_ialloc_calc_rootino(
2912 struct xfs_mount
*mp
,
2915 struct xfs_ino_geometry
*igeo
= M_IGEO(mp
);
2916 xfs_agblock_t first_bno
;
2919 * Pre-calculate the geometry of AG 0. We know what it looks like
2920 * because libxfs knows how to create allocation groups now.
2922 * first_bno is the first block in which mkfs could possibly have
2923 * allocated the root directory inode, once we factor in the metadata
2924 * that mkfs formats before it. Namely, the four AG headers...
2926 first_bno
= howmany(4 * mp
->m_sb
.sb_sectsize
, mp
->m_sb
.sb_blocksize
);
2928 /* ...the two free space btree roots... */
2931 /* ...the inode btree root... */
2934 /* ...the initial AGFL... */
2935 first_bno
+= xfs_alloc_min_freelist(mp
, NULL
);
2937 /* ...the free inode btree root... */
2938 if (xfs_sb_version_hasfinobt(&mp
->m_sb
))
2941 /* ...the reverse mapping btree root... */
2942 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
))
2945 /* ...the reference count btree... */
2946 if (xfs_sb_version_hasreflink(&mp
->m_sb
))
2950 * ...and the log, if it is allocated in the first allocation group.
2952 * This can happen with filesystems that only have a single
2953 * allocation group, or very odd geometries created by old mkfs
2954 * versions on very small filesystems.
2956 if (mp
->m_sb
.sb_logstart
&&
2957 XFS_FSB_TO_AGNO(mp
, mp
->m_sb
.sb_logstart
) == 0)
2958 first_bno
+= mp
->m_sb
.sb_logblocks
;
2961 * Now round first_bno up to whatever allocation alignment is given
2962 * by the filesystem or was passed in.
2964 if (xfs_sb_version_hasdalign(&mp
->m_sb
) && igeo
->ialloc_align
> 0)
2965 first_bno
= roundup(first_bno
, sunit
);
2966 else if (xfs_sb_version_hasalign(&mp
->m_sb
) &&
2967 mp
->m_sb
.sb_inoalignmt
> 1)
2968 first_bno
= roundup(first_bno
, mp
->m_sb
.sb_inoalignmt
);
2970 return XFS_AGINO_TO_INO(mp
, 0, XFS_AGB_TO_AGINO(mp
, first_bno
));