Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6/trivial-mods.git] / fs / xfs / xfs_ialloc.c
blobf943368c9b93321e5afc32a9a4eb447b41189051
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_alloc.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_bmap.h"
45 * Log specified fields for the inode given by bp and off.
47 STATIC void
48 xfs_ialloc_log_di(
49 xfs_trans_t *tp, /* transaction pointer */
50 xfs_buf_t *bp, /* inode buffer */
51 int off, /* index of inode in buffer */
52 int fields) /* bitmask of fields to log */
54 int first; /* first byte number */
55 int ioffset; /* off in bytes */
56 int last; /* last byte number */
57 xfs_mount_t *mp; /* mount point structure */
58 static const short offsets[] = { /* field offsets */
59 /* keep in sync with bits */
60 offsetof(xfs_dinode_core_t, di_magic),
61 offsetof(xfs_dinode_core_t, di_mode),
62 offsetof(xfs_dinode_core_t, di_version),
63 offsetof(xfs_dinode_core_t, di_format),
64 offsetof(xfs_dinode_core_t, di_onlink),
65 offsetof(xfs_dinode_core_t, di_uid),
66 offsetof(xfs_dinode_core_t, di_gid),
67 offsetof(xfs_dinode_core_t, di_nlink),
68 offsetof(xfs_dinode_core_t, di_projid),
69 offsetof(xfs_dinode_core_t, di_pad),
70 offsetof(xfs_dinode_core_t, di_atime),
71 offsetof(xfs_dinode_core_t, di_mtime),
72 offsetof(xfs_dinode_core_t, di_ctime),
73 offsetof(xfs_dinode_core_t, di_size),
74 offsetof(xfs_dinode_core_t, di_nblocks),
75 offsetof(xfs_dinode_core_t, di_extsize),
76 offsetof(xfs_dinode_core_t, di_nextents),
77 offsetof(xfs_dinode_core_t, di_anextents),
78 offsetof(xfs_dinode_core_t, di_forkoff),
79 offsetof(xfs_dinode_core_t, di_aformat),
80 offsetof(xfs_dinode_core_t, di_dmevmask),
81 offsetof(xfs_dinode_core_t, di_dmstate),
82 offsetof(xfs_dinode_core_t, di_flags),
83 offsetof(xfs_dinode_core_t, di_gen),
84 offsetof(xfs_dinode_t, di_next_unlinked),
85 offsetof(xfs_dinode_t, di_u),
86 offsetof(xfs_dinode_t, di_a),
87 sizeof(xfs_dinode_t)
91 ASSERT(offsetof(xfs_dinode_t, di_core) == 0);
92 ASSERT((fields & (XFS_DI_U|XFS_DI_A)) == 0);
93 mp = tp->t_mountp;
95 * Get the inode-relative first and last bytes for these fields
97 xfs_btree_offsets(fields, offsets, XFS_DI_NUM_BITS, &first, &last);
99 * Convert to buffer offsets and log it.
101 ioffset = off << mp->m_sb.sb_inodelog;
102 first += ioffset;
103 last += ioffset;
104 xfs_trans_log_buf(tp, bp, first, last);
108 * Allocation group level functions.
112 * Allocate new inodes in the allocation group specified by agbp.
113 * Return 0 for success, else error code.
115 STATIC int /* error code or 0 */
116 xfs_ialloc_ag_alloc(
117 xfs_trans_t *tp, /* transaction pointer */
118 xfs_buf_t *agbp, /* alloc group buffer */
119 int *alloc)
121 xfs_agi_t *agi; /* allocation group header */
122 xfs_alloc_arg_t args; /* allocation argument structure */
123 int blks_per_cluster; /* fs blocks per inode cluster */
124 xfs_btree_cur_t *cur; /* inode btree cursor */
125 xfs_daddr_t d; /* disk addr of buffer */
126 xfs_agnumber_t agno;
127 int error;
128 xfs_buf_t *fbuf; /* new free inodes' buffer */
129 xfs_dinode_t *free; /* new free inode structure */
130 int i; /* inode counter */
131 int j; /* block counter */
132 int nbufs; /* num bufs of new inodes */
133 xfs_agino_t newino; /* new first inode's number */
134 xfs_agino_t newlen; /* new number of inodes */
135 int ninodes; /* num inodes per buf */
136 xfs_agino_t thisino; /* current inode number, for loop */
137 int version; /* inode version number to use */
138 int isaligned = 0; /* inode allocation at stripe unit */
139 /* boundary */
141 args.tp = tp;
142 args.mp = tp->t_mountp;
145 * Locking will ensure that we don't have two callers in here
146 * at one time.
148 newlen = XFS_IALLOC_INODES(args.mp);
149 if (args.mp->m_maxicount &&
150 args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)
151 return XFS_ERROR(ENOSPC);
152 args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);
154 * First try to allocate inodes contiguous with the last-allocated
155 * chunk of inodes. If the filesystem is striped, this will fill
156 * an entire stripe unit with inodes.
158 agi = XFS_BUF_TO_AGI(agbp);
159 newino = be32_to_cpu(agi->agi_newino);
160 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
161 XFS_IALLOC_BLOCKS(args.mp);
162 if (likely(newino != NULLAGINO &&
163 (args.agbno < be32_to_cpu(agi->agi_length)))) {
164 args.fsbno = XFS_AGB_TO_FSB(args.mp,
165 be32_to_cpu(agi->agi_seqno), args.agbno);
166 args.type = XFS_ALLOCTYPE_THIS_BNO;
167 args.mod = args.total = args.wasdel = args.isfl =
168 args.userdata = args.minalignslop = 0;
169 args.prod = 1;
170 args.alignment = 1;
172 * Allow space for the inode btree to split.
174 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
175 if ((error = xfs_alloc_vextent(&args)))
176 return error;
177 } else
178 args.fsbno = NULLFSBLOCK;
180 if (unlikely(args.fsbno == NULLFSBLOCK)) {
182 * Set the alignment for the allocation.
183 * If stripe alignment is turned on then align at stripe unit
184 * boundary.
185 * If the cluster size is smaller than a filesystem block
186 * then we're doing I/O for inodes in filesystem block size
187 * pieces, so don't need alignment anyway.
189 isaligned = 0;
190 if (args.mp->m_sinoalign) {
191 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
192 args.alignment = args.mp->m_dalign;
193 isaligned = 1;
194 } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
195 args.mp->m_sb.sb_inoalignmt >=
196 XFS_B_TO_FSBT(args.mp,
197 XFS_INODE_CLUSTER_SIZE(args.mp)))
198 args.alignment = args.mp->m_sb.sb_inoalignmt;
199 else
200 args.alignment = 1;
202 * Need to figure out where to allocate the inode blocks.
203 * Ideally they should be spaced out through the a.g.
204 * For now, just allocate blocks up front.
206 args.agbno = be32_to_cpu(agi->agi_root);
207 args.fsbno = XFS_AGB_TO_FSB(args.mp,
208 be32_to_cpu(agi->agi_seqno), args.agbno);
210 * Allocate a fixed-size extent of inodes.
212 args.type = XFS_ALLOCTYPE_NEAR_BNO;
213 args.mod = args.total = args.wasdel = args.isfl =
214 args.userdata = args.minalignslop = 0;
215 args.prod = 1;
217 * Allow space for the inode btree to split.
219 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
220 if ((error = xfs_alloc_vextent(&args)))
221 return error;
225 * If stripe alignment is turned on, then try again with cluster
226 * alignment.
228 if (isaligned && args.fsbno == NULLFSBLOCK) {
229 args.type = XFS_ALLOCTYPE_NEAR_BNO;
230 args.agbno = be32_to_cpu(agi->agi_root);
231 args.fsbno = XFS_AGB_TO_FSB(args.mp,
232 be32_to_cpu(agi->agi_seqno), args.agbno);
233 if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
234 args.mp->m_sb.sb_inoalignmt >=
235 XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
236 args.alignment = args.mp->m_sb.sb_inoalignmt;
237 else
238 args.alignment = 1;
239 if ((error = xfs_alloc_vextent(&args)))
240 return error;
243 if (args.fsbno == NULLFSBLOCK) {
244 *alloc = 0;
245 return 0;
247 ASSERT(args.len == args.minlen);
249 * Convert the results.
251 newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
253 * Loop over the new block(s), filling in the inodes.
254 * For small block sizes, manipulate the inodes in buffers
255 * which are multiples of the blocks size.
257 if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) {
258 blks_per_cluster = 1;
259 nbufs = (int)args.len;
260 ninodes = args.mp->m_sb.sb_inopblock;
261 } else {
262 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) /
263 args.mp->m_sb.sb_blocksize;
264 nbufs = (int)args.len / blks_per_cluster;
265 ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock;
268 * Figure out what version number to use in the inodes we create.
269 * If the superblock version has caught up to the one that supports
270 * the new inode format, then use the new inode version. Otherwise
271 * use the old version so that old kernels will continue to be
272 * able to use the file system.
274 if (XFS_SB_VERSION_HASNLINK(&args.mp->m_sb))
275 version = XFS_DINODE_VERSION_2;
276 else
277 version = XFS_DINODE_VERSION_1;
279 for (j = 0; j < nbufs; j++) {
281 * Get the block.
283 d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno),
284 args.agbno + (j * blks_per_cluster));
285 fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d,
286 args.mp->m_bsize * blks_per_cluster,
287 XFS_BUF_LOCK);
288 ASSERT(fbuf);
289 ASSERT(!XFS_BUF_GETERROR(fbuf));
291 * Set initial values for the inodes in this buffer.
293 xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog);
294 for (i = 0; i < ninodes; i++) {
295 free = XFS_MAKE_IPTR(args.mp, fbuf, i);
296 INT_SET(free->di_core.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC);
297 INT_SET(free->di_core.di_version, ARCH_CONVERT, version);
298 INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
299 xfs_ialloc_log_di(tp, fbuf, i,
300 XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);
302 xfs_trans_inode_alloc_buf(tp, fbuf);
304 be32_add(&agi->agi_count, newlen);
305 be32_add(&agi->agi_freecount, newlen);
306 agno = be32_to_cpu(agi->agi_seqno);
307 down_read(&args.mp->m_peraglock);
308 args.mp->m_perag[agno].pagi_freecount += newlen;
309 up_read(&args.mp->m_peraglock);
310 agi->agi_newino = cpu_to_be32(newino);
312 * Insert records describing the new inode chunk into the btree.
314 cur = xfs_btree_init_cursor(args.mp, tp, agbp, agno,
315 XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
316 for (thisino = newino;
317 thisino < newino + newlen;
318 thisino += XFS_INODES_PER_CHUNK) {
319 if ((error = xfs_inobt_lookup_eq(cur, thisino,
320 XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) {
321 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
322 return error;
324 ASSERT(i == 0);
325 if ((error = xfs_inobt_insert(cur, &i))) {
326 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
327 return error;
329 ASSERT(i == 1);
331 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
333 * Log allocation group header fields
335 xfs_ialloc_log_agi(tp, agbp,
336 XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
338 * Modify/log superblock values for inode count and inode free count.
340 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
341 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
342 *alloc = 1;
343 return 0;
346 STATIC_INLINE xfs_agnumber_t
347 xfs_ialloc_next_ag(
348 xfs_mount_t *mp)
350 xfs_agnumber_t agno;
352 spin_lock(&mp->m_agirotor_lock);
353 agno = mp->m_agirotor;
354 if (++mp->m_agirotor == mp->m_maxagi)
355 mp->m_agirotor = 0;
356 spin_unlock(&mp->m_agirotor_lock);
358 return agno;
362 * Select an allocation group to look for a free inode in, based on the parent
363 * inode and then mode. Return the allocation group buffer.
365 STATIC xfs_buf_t * /* allocation group buffer */
366 xfs_ialloc_ag_select(
367 xfs_trans_t *tp, /* transaction pointer */
368 xfs_ino_t parent, /* parent directory inode number */
369 mode_t mode, /* bits set to indicate file type */
370 int okalloc) /* ok to allocate more space */
372 xfs_buf_t *agbp; /* allocation group header buffer */
373 xfs_agnumber_t agcount; /* number of ag's in the filesystem */
374 xfs_agnumber_t agno; /* current ag number */
375 int flags; /* alloc buffer locking flags */
376 xfs_extlen_t ineed; /* blocks needed for inode allocation */
377 xfs_extlen_t longest = 0; /* longest extent available */
378 xfs_mount_t *mp; /* mount point structure */
379 int needspace; /* file mode implies space allocated */
380 xfs_perag_t *pag; /* per allocation group data */
381 xfs_agnumber_t pagno; /* parent (starting) ag number */
384 * Files of these types need at least one block if length > 0
385 * (and they won't fit in the inode, but that's hard to figure out).
387 needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
388 mp = tp->t_mountp;
389 agcount = mp->m_maxagi;
390 if (S_ISDIR(mode))
391 pagno = xfs_ialloc_next_ag(mp);
392 else {
393 pagno = XFS_INO_TO_AGNO(mp, parent);
394 if (pagno >= agcount)
395 pagno = 0;
397 ASSERT(pagno < agcount);
399 * Loop through allocation groups, looking for one with a little
400 * free space in it. Note we don't look for free inodes, exactly.
401 * Instead, we include whether there is a need to allocate inodes
402 * to mean that blocks must be allocated for them,
403 * if none are currently free.
405 agno = pagno;
406 flags = XFS_ALLOC_FLAG_TRYLOCK;
407 down_read(&mp->m_peraglock);
408 for (;;) {
409 pag = &mp->m_perag[agno];
410 if (!pag->pagi_init) {
411 if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
412 agbp = NULL;
413 goto nextag;
415 } else
416 agbp = NULL;
418 if (!pag->pagi_inodeok) {
419 xfs_ialloc_next_ag(mp);
420 goto unlock_nextag;
424 * Is there enough free space for the file plus a block
425 * of inodes (if we need to allocate some)?
427 ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp);
428 if (ineed && !pag->pagf_init) {
429 if (agbp == NULL &&
430 xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
431 agbp = NULL;
432 goto nextag;
434 (void)xfs_alloc_pagf_init(mp, tp, agno, flags);
436 if (!ineed || pag->pagf_init) {
437 if (ineed && !(longest = pag->pagf_longest))
438 longest = pag->pagf_flcount > 0;
439 if (!ineed ||
440 (pag->pagf_freeblks >= needspace + ineed &&
441 longest >= ineed &&
442 okalloc)) {
443 if (agbp == NULL &&
444 xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
445 agbp = NULL;
446 goto nextag;
448 up_read(&mp->m_peraglock);
449 return agbp;
452 unlock_nextag:
453 if (agbp)
454 xfs_trans_brelse(tp, agbp);
455 nextag:
457 * No point in iterating over the rest, if we're shutting
458 * down.
460 if (XFS_FORCED_SHUTDOWN(mp)) {
461 up_read(&mp->m_peraglock);
462 return NULL;
464 agno++;
465 if (agno >= agcount)
466 agno = 0;
467 if (agno == pagno) {
468 if (flags == 0) {
469 up_read(&mp->m_peraglock);
470 return NULL;
472 flags = 0;
478 * Visible inode allocation functions.
482 * Allocate an inode on disk.
483 * Mode is used to tell whether the new inode will need space, and whether
484 * it is a directory.
486 * The arguments IO_agbp and alloc_done are defined to work within
487 * the constraint of one allocation per transaction.
488 * xfs_dialloc() is designed to be called twice if it has to do an
489 * allocation to make more free inodes. On the first call,
490 * IO_agbp should be set to NULL. If an inode is available,
491 * i.e., xfs_dialloc() did not need to do an allocation, an inode
492 * number is returned. In this case, IO_agbp would be set to the
493 * current ag_buf and alloc_done set to false.
494 * If an allocation needed to be done, xfs_dialloc would return
495 * the current ag_buf in IO_agbp and set alloc_done to true.
496 * The caller should then commit the current transaction, allocate a new
497 * transaction, and call xfs_dialloc() again, passing in the previous
498 * value of IO_agbp. IO_agbp should be held across the transactions.
499 * Since the agbp is locked across the two calls, the second call is
500 * guaranteed to have a free inode available.
502 * Once we successfully pick an inode its number is returned and the
503 * on-disk data structures are updated. The inode itself is not read
504 * in, since doing so would break ordering constraints with xfs_reclaim.
507 xfs_dialloc(
508 xfs_trans_t *tp, /* transaction pointer */
509 xfs_ino_t parent, /* parent inode (directory) */
510 mode_t mode, /* mode bits for new inode */
511 int okalloc, /* ok to allocate more space */
512 xfs_buf_t **IO_agbp, /* in/out ag header's buffer */
513 boolean_t *alloc_done, /* true if we needed to replenish
514 inode freelist */
515 xfs_ino_t *inop) /* inode number allocated */
517 xfs_agnumber_t agcount; /* number of allocation groups */
518 xfs_buf_t *agbp; /* allocation group header's buffer */
519 xfs_agnumber_t agno; /* allocation group number */
520 xfs_agi_t *agi; /* allocation group header structure */
521 xfs_btree_cur_t *cur; /* inode allocation btree cursor */
522 int error; /* error return value */
523 int i; /* result code */
524 int ialloced; /* inode allocation status */
525 int noroom = 0; /* no space for inode blk allocation */
526 xfs_ino_t ino; /* fs-relative inode to be returned */
527 /* REFERENCED */
528 int j; /* result code */
529 xfs_mount_t *mp; /* file system mount structure */
530 int offset; /* index of inode in chunk */
531 xfs_agino_t pagino; /* parent's a.g. relative inode # */
532 xfs_agnumber_t pagno; /* parent's allocation group number */
533 xfs_inobt_rec_incore_t rec; /* inode allocation record */
534 xfs_agnumber_t tagno; /* testing allocation group number */
535 xfs_btree_cur_t *tcur; /* temp cursor */
536 xfs_inobt_rec_incore_t trec; /* temp inode allocation record */
539 if (*IO_agbp == NULL) {
541 * We do not have an agbp, so select an initial allocation
542 * group for inode allocation.
544 agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
546 * Couldn't find an allocation group satisfying the
547 * criteria, give up.
549 if (!agbp) {
550 *inop = NULLFSINO;
551 return 0;
553 agi = XFS_BUF_TO_AGI(agbp);
554 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
555 } else {
557 * Continue where we left off before. In this case, we
558 * know that the allocation group has free inodes.
560 agbp = *IO_agbp;
561 agi = XFS_BUF_TO_AGI(agbp);
562 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
563 ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
565 mp = tp->t_mountp;
566 agcount = mp->m_sb.sb_agcount;
567 agno = be32_to_cpu(agi->agi_seqno);
568 tagno = agno;
569 pagno = XFS_INO_TO_AGNO(mp, parent);
570 pagino = XFS_INO_TO_AGINO(mp, parent);
573 * If we have already hit the ceiling of inode blocks then clear
574 * okalloc so we scan all available agi structures for a free
575 * inode.
578 if (mp->m_maxicount &&
579 mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
580 noroom = 1;
581 okalloc = 0;
585 * Loop until we find an allocation group that either has free inodes
586 * or in which we can allocate some inodes. Iterate through the
587 * allocation groups upward, wrapping at the end.
589 *alloc_done = B_FALSE;
590 while (!agi->agi_freecount) {
592 * Don't do anything if we're not supposed to allocate
593 * any blocks, just go on to the next ag.
595 if (okalloc) {
597 * Try to allocate some new inodes in the allocation
598 * group.
600 if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) {
601 xfs_trans_brelse(tp, agbp);
602 if (error == ENOSPC) {
603 *inop = NULLFSINO;
604 return 0;
605 } else
606 return error;
608 if (ialloced) {
610 * We successfully allocated some inodes, return
611 * the current context to the caller so that it
612 * can commit the current transaction and call
613 * us again where we left off.
615 ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
616 *alloc_done = B_TRUE;
617 *IO_agbp = agbp;
618 *inop = NULLFSINO;
619 return 0;
623 * If it failed, give up on this ag.
625 xfs_trans_brelse(tp, agbp);
627 * Go on to the next ag: get its ag header.
629 nextag:
630 if (++tagno == agcount)
631 tagno = 0;
632 if (tagno == agno) {
633 *inop = NULLFSINO;
634 return noroom ? ENOSPC : 0;
636 down_read(&mp->m_peraglock);
637 if (mp->m_perag[tagno].pagi_inodeok == 0) {
638 up_read(&mp->m_peraglock);
639 goto nextag;
641 error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
642 up_read(&mp->m_peraglock);
643 if (error)
644 goto nextag;
645 agi = XFS_BUF_TO_AGI(agbp);
646 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
649 * Here with an allocation group that has a free inode.
650 * Reset agno since we may have chosen a new ag in the
651 * loop above.
653 agno = tagno;
654 *IO_agbp = NULL;
655 cur = xfs_btree_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno),
656 XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
658 * If pagino is 0 (this is the root inode allocation) use newino.
659 * This must work because we've just allocated some.
661 if (!pagino)
662 pagino = be32_to_cpu(agi->agi_newino);
663 #ifdef DEBUG
664 if (cur->bc_nlevels == 1) {
665 int freecount = 0;
667 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
668 goto error0;
669 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
670 do {
671 if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
672 &rec.ir_freecount, &rec.ir_free, &i)))
673 goto error0;
674 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
675 freecount += rec.ir_freecount;
676 if ((error = xfs_inobt_increment(cur, 0, &i)))
677 goto error0;
678 } while (i == 1);
680 ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
681 XFS_FORCED_SHUTDOWN(mp));
683 #endif
685 * If in the same a.g. as the parent, try to get near the parent.
687 if (pagno == agno) {
688 if ((error = xfs_inobt_lookup_le(cur, pagino, 0, 0, &i)))
689 goto error0;
690 if (i != 0 &&
691 (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
692 &rec.ir_freecount, &rec.ir_free, &j)) == 0 &&
693 j == 1 &&
694 rec.ir_freecount > 0) {
696 * Found a free inode in the same chunk
697 * as parent, done.
701 * In the same a.g. as parent, but parent's chunk is full.
703 else {
704 int doneleft; /* done, to the left */
705 int doneright; /* done, to the right */
707 if (error)
708 goto error0;
709 ASSERT(i == 1);
710 ASSERT(j == 1);
712 * Duplicate the cursor, search left & right
713 * simultaneously.
715 if ((error = xfs_btree_dup_cursor(cur, &tcur)))
716 goto error0;
718 * Search left with tcur, back up 1 record.
720 if ((error = xfs_inobt_decrement(tcur, 0, &i)))
721 goto error1;
722 doneleft = !i;
723 if (!doneleft) {
724 if ((error = xfs_inobt_get_rec(tcur,
725 &trec.ir_startino,
726 &trec.ir_freecount,
727 &trec.ir_free, &i)))
728 goto error1;
729 XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
732 * Search right with cur, go forward 1 record.
734 if ((error = xfs_inobt_increment(cur, 0, &i)))
735 goto error1;
736 doneright = !i;
737 if (!doneright) {
738 if ((error = xfs_inobt_get_rec(cur,
739 &rec.ir_startino,
740 &rec.ir_freecount,
741 &rec.ir_free, &i)))
742 goto error1;
743 XFS_WANT_CORRUPTED_GOTO(i == 1, error1);
746 * Loop until we find the closest inode chunk
747 * with a free one.
749 while (!doneleft || !doneright) {
750 int useleft; /* using left inode
751 chunk this time */
754 * Figure out which block is closer,
755 * if both are valid.
757 if (!doneleft && !doneright)
758 useleft =
759 pagino -
760 (trec.ir_startino +
761 XFS_INODES_PER_CHUNK - 1) <
762 rec.ir_startino - pagino;
763 else
764 useleft = !doneleft;
766 * If checking the left, does it have
767 * free inodes?
769 if (useleft && trec.ir_freecount) {
771 * Yes, set it up as the chunk to use.
773 rec = trec;
774 xfs_btree_del_cursor(cur,
775 XFS_BTREE_NOERROR);
776 cur = tcur;
777 break;
780 * If checking the right, does it have
781 * free inodes?
783 if (!useleft && rec.ir_freecount) {
785 * Yes, it's already set up.
787 xfs_btree_del_cursor(tcur,
788 XFS_BTREE_NOERROR);
789 break;
792 * If used the left, get another one
793 * further left.
795 if (useleft) {
796 if ((error = xfs_inobt_decrement(tcur, 0,
797 &i)))
798 goto error1;
799 doneleft = !i;
800 if (!doneleft) {
801 if ((error = xfs_inobt_get_rec(
802 tcur,
803 &trec.ir_startino,
804 &trec.ir_freecount,
805 &trec.ir_free, &i)))
806 goto error1;
807 XFS_WANT_CORRUPTED_GOTO(i == 1,
808 error1);
812 * If used the right, get another one
813 * further right.
815 else {
816 if ((error = xfs_inobt_increment(cur, 0,
817 &i)))
818 goto error1;
819 doneright = !i;
820 if (!doneright) {
821 if ((error = xfs_inobt_get_rec(
822 cur,
823 &rec.ir_startino,
824 &rec.ir_freecount,
825 &rec.ir_free, &i)))
826 goto error1;
827 XFS_WANT_CORRUPTED_GOTO(i == 1,
828 error1);
832 ASSERT(!doneleft || !doneright);
836 * In a different a.g. from the parent.
837 * See if the most recently allocated block has any free.
839 else if (be32_to_cpu(agi->agi_newino) != NULLAGINO) {
840 if ((error = xfs_inobt_lookup_eq(cur,
841 be32_to_cpu(agi->agi_newino), 0, 0, &i)))
842 goto error0;
843 if (i == 1 &&
844 (error = xfs_inobt_get_rec(cur, &rec.ir_startino,
845 &rec.ir_freecount, &rec.ir_free, &j)) == 0 &&
846 j == 1 &&
847 rec.ir_freecount > 0) {
849 * The last chunk allocated in the group still has
850 * a free inode.
854 * None left in the last group, search the whole a.g.
856 else {
857 if (error)
858 goto error0;
859 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
860 goto error0;
861 ASSERT(i == 1);
862 for (;;) {
863 if ((error = xfs_inobt_get_rec(cur,
864 &rec.ir_startino,
865 &rec.ir_freecount, &rec.ir_free,
866 &i)))
867 goto error0;
868 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
869 if (rec.ir_freecount > 0)
870 break;
871 if ((error = xfs_inobt_increment(cur, 0, &i)))
872 goto error0;
873 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
877 offset = XFS_IALLOC_FIND_FREE(&rec.ir_free);
878 ASSERT(offset >= 0);
879 ASSERT(offset < XFS_INODES_PER_CHUNK);
880 ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
881 XFS_INODES_PER_CHUNK) == 0);
882 ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
883 XFS_INOBT_CLR_FREE(&rec, offset);
884 rec.ir_freecount--;
885 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount,
886 rec.ir_free)))
887 goto error0;
888 be32_add(&agi->agi_freecount, -1);
889 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
890 down_read(&mp->m_peraglock);
891 mp->m_perag[tagno].pagi_freecount--;
892 up_read(&mp->m_peraglock);
893 #ifdef DEBUG
894 if (cur->bc_nlevels == 1) {
895 int freecount = 0;
897 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
898 goto error0;
899 do {
900 if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
901 &rec.ir_freecount, &rec.ir_free, &i)))
902 goto error0;
903 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
904 freecount += rec.ir_freecount;
905 if ((error = xfs_inobt_increment(cur, 0, &i)))
906 goto error0;
907 } while (i == 1);
908 ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
909 XFS_FORCED_SHUTDOWN(mp));
911 #endif
912 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
913 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
914 *inop = ino;
915 return 0;
916 error1:
917 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
918 error0:
919 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
920 return error;
924 * Free disk inode. Carefully avoids touching the incore inode, all
925 * manipulations incore are the caller's responsibility.
926 * The on-disk inode is not changed by this operation, only the
927 * btree (free inode mask) is changed.
930 xfs_difree(
931 xfs_trans_t *tp, /* transaction pointer */
932 xfs_ino_t inode, /* inode to be freed */
933 xfs_bmap_free_t *flist, /* extents to free */
934 int *delete, /* set if inode cluster was deleted */
935 xfs_ino_t *first_ino) /* first inode in deleted cluster */
937 /* REFERENCED */
938 xfs_agblock_t agbno; /* block number containing inode */
939 xfs_buf_t *agbp; /* buffer containing allocation group header */
940 xfs_agino_t agino; /* inode number relative to allocation group */
941 xfs_agnumber_t agno; /* allocation group number */
942 xfs_agi_t *agi; /* allocation group header */
943 xfs_btree_cur_t *cur; /* inode btree cursor */
944 int error; /* error return value */
945 int i; /* result code */
946 int ilen; /* inodes in an inode cluster */
947 xfs_mount_t *mp; /* mount structure for filesystem */
948 int off; /* offset of inode in inode chunk */
949 xfs_inobt_rec_incore_t rec; /* btree record */
951 mp = tp->t_mountp;
954 * Break up inode number into its components.
956 agno = XFS_INO_TO_AGNO(mp, inode);
957 if (agno >= mp->m_sb.sb_agcount) {
958 cmn_err(CE_WARN,
959 "xfs_difree: agno >= mp->m_sb.sb_agcount (%d >= %d) on %s. Returning EINVAL.",
960 agno, mp->m_sb.sb_agcount, mp->m_fsname);
961 ASSERT(0);
962 return XFS_ERROR(EINVAL);
964 agino = XFS_INO_TO_AGINO(mp, inode);
965 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
966 cmn_err(CE_WARN,
967 "xfs_difree: inode != XFS_AGINO_TO_INO() "
968 "(%llu != %llu) on %s. Returning EINVAL.",
969 (unsigned long long)inode,
970 (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino),
971 mp->m_fsname);
972 ASSERT(0);
973 return XFS_ERROR(EINVAL);
975 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
976 if (agbno >= mp->m_sb.sb_agblocks) {
977 cmn_err(CE_WARN,
978 "xfs_difree: agbno >= mp->m_sb.sb_agblocks (%d >= %d) on %s. Returning EINVAL.",
979 agbno, mp->m_sb.sb_agblocks, mp->m_fsname);
980 ASSERT(0);
981 return XFS_ERROR(EINVAL);
984 * Get the allocation group header.
986 down_read(&mp->m_peraglock);
987 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
988 up_read(&mp->m_peraglock);
989 if (error) {
990 cmn_err(CE_WARN,
991 "xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.",
992 error, mp->m_fsname);
993 return error;
995 agi = XFS_BUF_TO_AGI(agbp);
996 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
997 ASSERT(agbno < be32_to_cpu(agi->agi_length));
999 * Initialize the cursor.
1001 cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO,
1002 (xfs_inode_t *)0, 0);
1003 #ifdef DEBUG
1004 if (cur->bc_nlevels == 1) {
1005 int freecount = 0;
1007 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
1008 goto error0;
1009 do {
1010 if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino,
1011 &rec.ir_freecount, &rec.ir_free, &i)))
1012 goto error0;
1013 if (i) {
1014 freecount += rec.ir_freecount;
1015 if ((error = xfs_inobt_increment(cur, 0, &i)))
1016 goto error0;
1018 } while (i == 1);
1019 ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
1020 XFS_FORCED_SHUTDOWN(mp));
1022 #endif
1024 * Look for the entry describing this inode.
1026 if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) {
1027 cmn_err(CE_WARN,
1028 "xfs_difree: xfs_inobt_lookup_le returned() an error %d on %s. Returning error.",
1029 error, mp->m_fsname);
1030 goto error0;
1032 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1033 if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, &rec.ir_freecount,
1034 &rec.ir_free, &i))) {
1035 cmn_err(CE_WARN,
1036 "xfs_difree: xfs_inobt_get_rec() returned an error %d on %s. Returning error.",
1037 error, mp->m_fsname);
1038 goto error0;
1040 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1042 * Get the offset in the inode chunk.
1044 off = agino - rec.ir_startino;
1045 ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
1046 ASSERT(!XFS_INOBT_IS_FREE(&rec, off));
1048 * Mark the inode free & increment the count.
1050 XFS_INOBT_SET_FREE(&rec, off);
1051 rec.ir_freecount++;
1054 * When an inode cluster is free, it becomes eligible for removal
1056 if ((mp->m_flags & XFS_MOUNT_IDELETE) &&
1057 (rec.ir_freecount == XFS_IALLOC_INODES(mp))) {
1059 *delete = 1;
1060 *first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
1063 * Remove the inode cluster from the AGI B+Tree, adjust the
1064 * AGI and Superblock inode counts, and mark the disk space
1065 * to be freed when the transaction is committed.
1067 ilen = XFS_IALLOC_INODES(mp);
1068 be32_add(&agi->agi_count, -ilen);
1069 be32_add(&agi->agi_freecount, -(ilen - 1));
1070 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
1071 down_read(&mp->m_peraglock);
1072 mp->m_perag[agno].pagi_freecount -= ilen - 1;
1073 up_read(&mp->m_peraglock);
1074 xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
1075 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
1077 if ((error = xfs_inobt_delete(cur, &i))) {
1078 cmn_err(CE_WARN, "xfs_difree: xfs_inobt_delete returned an error %d on %s.\n",
1079 error, mp->m_fsname);
1080 goto error0;
1083 xfs_bmap_add_free(XFS_AGB_TO_FSB(mp,
1084 agno, XFS_INO_TO_AGBNO(mp,rec.ir_startino)),
1085 XFS_IALLOC_BLOCKS(mp), flist, mp);
1086 } else {
1087 *delete = 0;
1089 if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, rec.ir_free))) {
1090 cmn_err(CE_WARN,
1091 "xfs_difree: xfs_inobt_update() returned an error %d on %s. Returning error.",
1092 error, mp->m_fsname);
1093 goto error0;
1096 * Change the inode free counts and log the ag/sb changes.
1098 be32_add(&agi->agi_freecount, 1);
1099 xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
1100 down_read(&mp->m_peraglock);
1101 mp->m_perag[agno].pagi_freecount++;
1102 up_read(&mp->m_peraglock);
1103 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
1106 #ifdef DEBUG
1107 if (cur->bc_nlevels == 1) {
1108 int freecount = 0;
1110 if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i)))
1111 goto error0;
1112 do {
1113 if ((error = xfs_inobt_get_rec(cur,
1114 &rec.ir_startino,
1115 &rec.ir_freecount,
1116 &rec.ir_free, &i)))
1117 goto error0;
1118 if (i) {
1119 freecount += rec.ir_freecount;
1120 if ((error = xfs_inobt_increment(cur, 0, &i)))
1121 goto error0;
1123 } while (i == 1);
1124 ASSERT(freecount == be32_to_cpu(agi->agi_freecount) ||
1125 XFS_FORCED_SHUTDOWN(mp));
1127 #endif
1128 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1129 return 0;
1131 error0:
1132 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1133 return error;
1137 * Return the location of the inode in bno/off, for mapping it into a buffer.
1139 /*ARGSUSED*/
1141 xfs_dilocate(
1142 xfs_mount_t *mp, /* file system mount structure */
1143 xfs_trans_t *tp, /* transaction pointer */
1144 xfs_ino_t ino, /* inode to locate */
1145 xfs_fsblock_t *bno, /* output: block containing inode */
1146 int *len, /* output: num blocks in inode cluster */
1147 int *off, /* output: index in block of inode */
1148 uint flags) /* flags concerning inode lookup */
1150 xfs_agblock_t agbno; /* block number of inode in the alloc group */
1151 xfs_buf_t *agbp; /* agi buffer */
1152 xfs_agino_t agino; /* inode number within alloc group */
1153 xfs_agnumber_t agno; /* allocation group number */
1154 int blks_per_cluster; /* num blocks per inode cluster */
1155 xfs_agblock_t chunk_agbno; /* first block in inode chunk */
1156 xfs_agino_t chunk_agino; /* first agino in inode chunk */
1157 __int32_t chunk_cnt; /* count of free inodes in chunk */
1158 xfs_inofree_t chunk_free; /* mask of free inodes in chunk */
1159 xfs_agblock_t cluster_agbno; /* first block in inode cluster */
1160 xfs_btree_cur_t *cur; /* inode btree cursor */
1161 int error; /* error code */
1162 int i; /* temp state */
1163 int offset; /* index of inode in its buffer */
1164 int offset_agbno; /* blks from chunk start to inode */
1166 ASSERT(ino != NULLFSINO);
1168 * Split up the inode number into its parts.
1170 agno = XFS_INO_TO_AGNO(mp, ino);
1171 agino = XFS_INO_TO_AGINO(mp, ino);
1172 agbno = XFS_AGINO_TO_AGBNO(mp, agino);
1173 if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
1174 ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
1175 #ifdef DEBUG
1176 /* no diagnostics for bulkstat, ino comes from userspace */
1177 if (flags & XFS_IMAP_BULKSTAT)
1178 return XFS_ERROR(EINVAL);
1179 if (agno >= mp->m_sb.sb_agcount) {
1180 xfs_fs_cmn_err(CE_ALERT, mp,
1181 "xfs_dilocate: agno (%d) >= "
1182 "mp->m_sb.sb_agcount (%d)",
1183 agno, mp->m_sb.sb_agcount);
1185 if (agbno >= mp->m_sb.sb_agblocks) {
1186 xfs_fs_cmn_err(CE_ALERT, mp,
1187 "xfs_dilocate: agbno (0x%llx) >= "
1188 "mp->m_sb.sb_agblocks (0x%lx)",
1189 (unsigned long long) agbno,
1190 (unsigned long) mp->m_sb.sb_agblocks);
1192 if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
1193 xfs_fs_cmn_err(CE_ALERT, mp,
1194 "xfs_dilocate: ino (0x%llx) != "
1195 "XFS_AGINO_TO_INO(mp, agno, agino) "
1196 "(0x%llx)",
1197 ino, XFS_AGINO_TO_INO(mp, agno, agino));
1199 xfs_stack_trace();
1200 #endif /* DEBUG */
1201 return XFS_ERROR(EINVAL);
1203 if ((mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) ||
1204 !(flags & XFS_IMAP_LOOKUP)) {
1205 offset = XFS_INO_TO_OFFSET(mp, ino);
1206 ASSERT(offset < mp->m_sb.sb_inopblock);
1207 *bno = XFS_AGB_TO_FSB(mp, agno, agbno);
1208 *off = offset;
1209 *len = 1;
1210 return 0;
1212 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
1213 if (*bno != NULLFSBLOCK) {
1214 offset = XFS_INO_TO_OFFSET(mp, ino);
1215 ASSERT(offset < mp->m_sb.sb_inopblock);
1216 cluster_agbno = XFS_FSB_TO_AGBNO(mp, *bno);
1217 *off = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
1218 offset;
1219 *len = blks_per_cluster;
1220 return 0;
1222 if (mp->m_inoalign_mask) {
1223 offset_agbno = agbno & mp->m_inoalign_mask;
1224 chunk_agbno = agbno - offset_agbno;
1225 } else {
1226 down_read(&mp->m_peraglock);
1227 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
1228 up_read(&mp->m_peraglock);
1229 if (error) {
1230 #ifdef DEBUG
1231 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
1232 "xfs_ialloc_read_agi() returned "
1233 "error %d, agno %d",
1234 error, agno);
1235 #endif /* DEBUG */
1236 return error;
1238 cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO,
1239 (xfs_inode_t *)0, 0);
1240 if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) {
1241 #ifdef DEBUG
1242 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
1243 "xfs_inobt_lookup_le() failed");
1244 #endif /* DEBUG */
1245 goto error0;
1247 if ((error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt,
1248 &chunk_free, &i))) {
1249 #ifdef DEBUG
1250 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
1251 "xfs_inobt_get_rec() failed");
1252 #endif /* DEBUG */
1253 goto error0;
1255 if (i == 0) {
1256 #ifdef DEBUG
1257 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
1258 "xfs_inobt_get_rec() failed");
1259 #endif /* DEBUG */
1260 error = XFS_ERROR(EINVAL);
1262 xfs_trans_brelse(tp, agbp);
1263 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1264 if (error)
1265 return error;
1266 chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_agino);
1267 offset_agbno = agbno - chunk_agbno;
1269 ASSERT(agbno >= chunk_agbno);
1270 cluster_agbno = chunk_agbno +
1271 ((offset_agbno / blks_per_cluster) * blks_per_cluster);
1272 offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
1273 XFS_INO_TO_OFFSET(mp, ino);
1274 *bno = XFS_AGB_TO_FSB(mp, agno, cluster_agbno);
1275 *off = offset;
1276 *len = blks_per_cluster;
1277 return 0;
1278 error0:
1279 xfs_trans_brelse(tp, agbp);
1280 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1281 return error;
1285 * Compute and fill in value of m_in_maxlevels.
1287 void
1288 xfs_ialloc_compute_maxlevels(
1289 xfs_mount_t *mp) /* file system mount structure */
1291 int level;
1292 uint maxblocks;
1293 uint maxleafents;
1294 int minleafrecs;
1295 int minnoderecs;
1297 maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >>
1298 XFS_INODES_PER_CHUNK_LOG;
1299 minleafrecs = mp->m_alloc_mnr[0];
1300 minnoderecs = mp->m_alloc_mnr[1];
1301 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1302 for (level = 1; maxblocks > 1; level++)
1303 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1304 mp->m_in_maxlevels = level;
1308 * Log specified fields for the ag hdr (inode section)
1310 void
1311 xfs_ialloc_log_agi(
1312 xfs_trans_t *tp, /* transaction pointer */
1313 xfs_buf_t *bp, /* allocation group header buffer */
1314 int fields) /* bitmask of fields to log */
1316 int first; /* first byte number */
1317 int last; /* last byte number */
1318 static const short offsets[] = { /* field starting offsets */
1319 /* keep in sync with bit definitions */
1320 offsetof(xfs_agi_t, agi_magicnum),
1321 offsetof(xfs_agi_t, agi_versionnum),
1322 offsetof(xfs_agi_t, agi_seqno),
1323 offsetof(xfs_agi_t, agi_length),
1324 offsetof(xfs_agi_t, agi_count),
1325 offsetof(xfs_agi_t, agi_root),
1326 offsetof(xfs_agi_t, agi_level),
1327 offsetof(xfs_agi_t, agi_freecount),
1328 offsetof(xfs_agi_t, agi_newino),
1329 offsetof(xfs_agi_t, agi_dirino),
1330 offsetof(xfs_agi_t, agi_unlinked),
1331 sizeof(xfs_agi_t)
1333 #ifdef DEBUG
1334 xfs_agi_t *agi; /* allocation group header */
1336 agi = XFS_BUF_TO_AGI(bp);
1337 ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
1338 #endif
1340 * Compute byte offsets for the first and last fields.
1342 xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS, &first, &last);
1344 * Log the allocation group inode header buffer.
1346 xfs_trans_log_buf(tp, bp, first, last);
1350 * Read in the allocation group header (inode allocation section)
1353 xfs_ialloc_read_agi(
1354 xfs_mount_t *mp, /* file system mount structure */
1355 xfs_trans_t *tp, /* transaction pointer */
1356 xfs_agnumber_t agno, /* allocation group number */
1357 xfs_buf_t **bpp) /* allocation group hdr buf */
1359 xfs_agi_t *agi; /* allocation group header */
1360 int agi_ok; /* agi is consistent */
1361 xfs_buf_t *bp; /* allocation group hdr buf */
1362 xfs_perag_t *pag; /* per allocation group data */
1363 int error;
1365 ASSERT(agno != NULLAGNUMBER);
1366 error = xfs_trans_read_buf(
1367 mp, tp, mp->m_ddev_targp,
1368 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
1369 XFS_FSS_TO_BB(mp, 1), 0, &bp);
1370 if (error)
1371 return error;
1372 ASSERT(bp && !XFS_BUF_GETERROR(bp));
1375 * Validate the magic number of the agi block.
1377 agi = XFS_BUF_TO_AGI(bp);
1378 agi_ok =
1379 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1380 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1381 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI,
1382 XFS_RANDOM_IALLOC_READ_AGI))) {
1383 XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW,
1384 mp, agi);
1385 xfs_trans_brelse(tp, bp);
1386 return XFS_ERROR(EFSCORRUPTED);
1388 pag = &mp->m_perag[agno];
1389 if (!pag->pagi_init) {
1390 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
1391 pag->pagi_count = be32_to_cpu(agi->agi_count);
1392 pag->pagi_init = 1;
1393 } else {
1395 * It's possible for these to be out of sync if
1396 * we are in the middle of a forced shutdown.
1398 ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
1399 XFS_FORCED_SHUTDOWN(mp));
1402 #ifdef DEBUG
1404 int i;
1406 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++)
1407 ASSERT(agi->agi_unlinked[i]);
1409 #endif
1411 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGI, XFS_AGI_REF);
1412 *bpp = bp;
1413 return 0;
1417 * Read in the agi to initialise the per-ag data in the mount structure
1420 xfs_ialloc_pagi_init(
1421 xfs_mount_t *mp, /* file system mount structure */
1422 xfs_trans_t *tp, /* transaction pointer */
1423 xfs_agnumber_t agno) /* allocation group number */
1425 xfs_buf_t *bp = NULL;
1426 int error;
1428 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
1429 if (error)
1430 return error;
1431 if (bp)
1432 xfs_trans_brelse(tp, bp);
1433 return 0;