ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / fs / xfs / xfs_alloc.c
blob95862bbff56bf0cf8c82d4e88178dc43408866d7
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_btree.h"
34 #include "xfs_alloc.h"
35 #include "xfs_error.h"
36 #include "xfs_trace.h"
39 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
41 #define XFSA_FIXUP_BNO_OK 1
42 #define XFSA_FIXUP_CNT_OK 2
44 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
45 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
46 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
47 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
48 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
49 STATIC void xfs_alloc_busy_trim(struct xfs_alloc_arg *,
50 xfs_agblock_t, xfs_extlen_t, xfs_agblock_t *, xfs_extlen_t *);
53 * Lookup the record equal to [bno, len] in the btree given by cur.
55 STATIC int /* error */
56 xfs_alloc_lookup_eq(
57 struct xfs_btree_cur *cur, /* btree cursor */
58 xfs_agblock_t bno, /* starting block of extent */
59 xfs_extlen_t len, /* length of extent */
60 int *stat) /* success/failure */
62 cur->bc_rec.a.ar_startblock = bno;
63 cur->bc_rec.a.ar_blockcount = len;
64 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
68 * Lookup the first record greater than or equal to [bno, len]
69 * in the btree given by cur.
71 STATIC int /* error */
72 xfs_alloc_lookup_ge(
73 struct xfs_btree_cur *cur, /* btree cursor */
74 xfs_agblock_t bno, /* starting block of extent */
75 xfs_extlen_t len, /* length of extent */
76 int *stat) /* success/failure */
78 cur->bc_rec.a.ar_startblock = bno;
79 cur->bc_rec.a.ar_blockcount = len;
80 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
84 * Lookup the first record less than or equal to [bno, len]
85 * in the btree given by cur.
87 int /* error */
88 xfs_alloc_lookup_le(
89 struct xfs_btree_cur *cur, /* btree cursor */
90 xfs_agblock_t bno, /* starting block of extent */
91 xfs_extlen_t len, /* length of extent */
92 int *stat) /* success/failure */
94 cur->bc_rec.a.ar_startblock = bno;
95 cur->bc_rec.a.ar_blockcount = len;
96 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
100 * Update the record referred to by cur to the value given
101 * by [bno, len].
102 * This either works (return 0) or gets an EFSCORRUPTED error.
104 STATIC int /* error */
105 xfs_alloc_update(
106 struct xfs_btree_cur *cur, /* btree cursor */
107 xfs_agblock_t bno, /* starting block of extent */
108 xfs_extlen_t len) /* length of extent */
110 union xfs_btree_rec rec;
112 rec.alloc.ar_startblock = cpu_to_be32(bno);
113 rec.alloc.ar_blockcount = cpu_to_be32(len);
114 return xfs_btree_update(cur, &rec);
118 * Get the data from the pointed-to record.
120 int /* error */
121 xfs_alloc_get_rec(
122 struct xfs_btree_cur *cur, /* btree cursor */
123 xfs_agblock_t *bno, /* output: starting block of extent */
124 xfs_extlen_t *len, /* output: length of extent */
125 int *stat) /* output: success/failure */
127 union xfs_btree_rec *rec;
128 int error;
130 error = xfs_btree_get_rec(cur, &rec, stat);
131 if (!error && *stat == 1) {
132 *bno = be32_to_cpu(rec->alloc.ar_startblock);
133 *len = be32_to_cpu(rec->alloc.ar_blockcount);
135 return error;
139 * Compute aligned version of the found extent.
140 * Takes alignment and min length into account.
142 STATIC void
143 xfs_alloc_compute_aligned(
144 xfs_alloc_arg_t *args, /* allocation argument structure */
145 xfs_agblock_t foundbno, /* starting block in found extent */
146 xfs_extlen_t foundlen, /* length in found extent */
147 xfs_agblock_t *resbno, /* result block number */
148 xfs_extlen_t *reslen) /* result length */
150 xfs_agblock_t bno;
151 xfs_extlen_t len;
153 /* Trim busy sections out of found extent */
154 xfs_alloc_busy_trim(args, foundbno, foundlen, &bno, &len);
156 if (args->alignment > 1 && len >= args->minlen) {
157 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
158 xfs_extlen_t diff = aligned_bno - bno;
160 *resbno = aligned_bno;
161 *reslen = diff >= len ? 0 : len - diff;
162 } else {
163 *resbno = bno;
164 *reslen = len;
169 * Compute best start block and diff for "near" allocations.
170 * freelen >= wantlen already checked by caller.
172 STATIC xfs_extlen_t /* difference value (absolute) */
173 xfs_alloc_compute_diff(
174 xfs_agblock_t wantbno, /* target starting block */
175 xfs_extlen_t wantlen, /* target length */
176 xfs_extlen_t alignment, /* target alignment */
177 xfs_agblock_t freebno, /* freespace's starting block */
178 xfs_extlen_t freelen, /* freespace's length */
179 xfs_agblock_t *newbnop) /* result: best start block from free */
181 xfs_agblock_t freeend; /* end of freespace extent */
182 xfs_agblock_t newbno1; /* return block number */
183 xfs_agblock_t newbno2; /* other new block number */
184 xfs_extlen_t newlen1=0; /* length with newbno1 */
185 xfs_extlen_t newlen2=0; /* length with newbno2 */
186 xfs_agblock_t wantend; /* end of target extent */
188 ASSERT(freelen >= wantlen);
189 freeend = freebno + freelen;
190 wantend = wantbno + wantlen;
191 if (freebno >= wantbno) {
192 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
193 newbno1 = NULLAGBLOCK;
194 } else if (freeend >= wantend && alignment > 1) {
195 newbno1 = roundup(wantbno, alignment);
196 newbno2 = newbno1 - alignment;
197 if (newbno1 >= freeend)
198 newbno1 = NULLAGBLOCK;
199 else
200 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
201 if (newbno2 < freebno)
202 newbno2 = NULLAGBLOCK;
203 else
204 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
205 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
206 if (newlen1 < newlen2 ||
207 (newlen1 == newlen2 &&
208 XFS_ABSDIFF(newbno1, wantbno) >
209 XFS_ABSDIFF(newbno2, wantbno)))
210 newbno1 = newbno2;
211 } else if (newbno2 != NULLAGBLOCK)
212 newbno1 = newbno2;
213 } else if (freeend >= wantend) {
214 newbno1 = wantbno;
215 } else if (alignment > 1) {
216 newbno1 = roundup(freeend - wantlen, alignment);
217 if (newbno1 > freeend - wantlen &&
218 newbno1 - alignment >= freebno)
219 newbno1 -= alignment;
220 else if (newbno1 >= freeend)
221 newbno1 = NULLAGBLOCK;
222 } else
223 newbno1 = freeend - wantlen;
224 *newbnop = newbno1;
225 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
229 * Fix up the length, based on mod and prod.
230 * len should be k * prod + mod for some k.
231 * If len is too small it is returned unchanged.
232 * If len hits maxlen it is left alone.
234 STATIC void
235 xfs_alloc_fix_len(
236 xfs_alloc_arg_t *args) /* allocation argument structure */
238 xfs_extlen_t k;
239 xfs_extlen_t rlen;
241 ASSERT(args->mod < args->prod);
242 rlen = args->len;
243 ASSERT(rlen >= args->minlen);
244 ASSERT(rlen <= args->maxlen);
245 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
246 (args->mod == 0 && rlen < args->prod))
247 return;
248 k = rlen % args->prod;
249 if (k == args->mod)
250 return;
251 if (k > args->mod) {
252 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
253 return;
254 } else {
255 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
256 (int)args->minlen)
257 return;
259 ASSERT(rlen >= args->minlen);
260 ASSERT(rlen <= args->maxlen);
261 args->len = rlen;
265 * Fix up length if there is too little space left in the a.g.
266 * Return 1 if ok, 0 if too little, should give up.
268 STATIC int
269 xfs_alloc_fix_minleft(
270 xfs_alloc_arg_t *args) /* allocation argument structure */
272 xfs_agf_t *agf; /* a.g. freelist header */
273 int diff; /* free space difference */
275 if (args->minleft == 0)
276 return 1;
277 agf = XFS_BUF_TO_AGF(args->agbp);
278 diff = be32_to_cpu(agf->agf_freeblks)
279 - args->len - args->minleft;
280 if (diff >= 0)
281 return 1;
282 args->len += diff; /* shrink the allocated space */
283 if (args->len >= args->minlen)
284 return 1;
285 args->agbno = NULLAGBLOCK;
286 return 0;
290 * Update the two btrees, logically removing from freespace the extent
291 * starting at rbno, rlen blocks. The extent is contained within the
292 * actual (current) free extent fbno for flen blocks.
293 * Flags are passed in indicating whether the cursors are set to the
294 * relevant records.
296 STATIC int /* error code */
297 xfs_alloc_fixup_trees(
298 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
299 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
300 xfs_agblock_t fbno, /* starting block of free extent */
301 xfs_extlen_t flen, /* length of free extent */
302 xfs_agblock_t rbno, /* starting block of returned extent */
303 xfs_extlen_t rlen, /* length of returned extent */
304 int flags) /* flags, XFSA_FIXUP_... */
306 int error; /* error code */
307 int i; /* operation results */
308 xfs_agblock_t nfbno1; /* first new free startblock */
309 xfs_agblock_t nfbno2; /* second new free startblock */
310 xfs_extlen_t nflen1=0; /* first new free length */
311 xfs_extlen_t nflen2=0; /* second new free length */
314 * Look up the record in the by-size tree if necessary.
316 if (flags & XFSA_FIXUP_CNT_OK) {
317 #ifdef DEBUG
318 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
319 return error;
320 XFS_WANT_CORRUPTED_RETURN(
321 i == 1 && nfbno1 == fbno && nflen1 == flen);
322 #endif
323 } else {
324 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
325 return error;
326 XFS_WANT_CORRUPTED_RETURN(i == 1);
329 * Look up the record in the by-block tree if necessary.
331 if (flags & XFSA_FIXUP_BNO_OK) {
332 #ifdef DEBUG
333 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
334 return error;
335 XFS_WANT_CORRUPTED_RETURN(
336 i == 1 && nfbno1 == fbno && nflen1 == flen);
337 #endif
338 } else {
339 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
340 return error;
341 XFS_WANT_CORRUPTED_RETURN(i == 1);
344 #ifdef DEBUG
345 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
346 struct xfs_btree_block *bnoblock;
347 struct xfs_btree_block *cntblock;
349 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
350 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
352 XFS_WANT_CORRUPTED_RETURN(
353 bnoblock->bb_numrecs == cntblock->bb_numrecs);
355 #endif
358 * Deal with all four cases: the allocated record is contained
359 * within the freespace record, so we can have new freespace
360 * at either (or both) end, or no freespace remaining.
362 if (rbno == fbno && rlen == flen)
363 nfbno1 = nfbno2 = NULLAGBLOCK;
364 else if (rbno == fbno) {
365 nfbno1 = rbno + rlen;
366 nflen1 = flen - rlen;
367 nfbno2 = NULLAGBLOCK;
368 } else if (rbno + rlen == fbno + flen) {
369 nfbno1 = fbno;
370 nflen1 = flen - rlen;
371 nfbno2 = NULLAGBLOCK;
372 } else {
373 nfbno1 = fbno;
374 nflen1 = rbno - fbno;
375 nfbno2 = rbno + rlen;
376 nflen2 = (fbno + flen) - nfbno2;
379 * Delete the entry from the by-size btree.
381 if ((error = xfs_btree_delete(cnt_cur, &i)))
382 return error;
383 XFS_WANT_CORRUPTED_RETURN(i == 1);
385 * Add new by-size btree entry(s).
387 if (nfbno1 != NULLAGBLOCK) {
388 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
389 return error;
390 XFS_WANT_CORRUPTED_RETURN(i == 0);
391 if ((error = xfs_btree_insert(cnt_cur, &i)))
392 return error;
393 XFS_WANT_CORRUPTED_RETURN(i == 1);
395 if (nfbno2 != NULLAGBLOCK) {
396 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
397 return error;
398 XFS_WANT_CORRUPTED_RETURN(i == 0);
399 if ((error = xfs_btree_insert(cnt_cur, &i)))
400 return error;
401 XFS_WANT_CORRUPTED_RETURN(i == 1);
404 * Fix up the by-block btree entry(s).
406 if (nfbno1 == NULLAGBLOCK) {
408 * No remaining freespace, just delete the by-block tree entry.
410 if ((error = xfs_btree_delete(bno_cur, &i)))
411 return error;
412 XFS_WANT_CORRUPTED_RETURN(i == 1);
413 } else {
415 * Update the by-block entry to start later|be shorter.
417 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
418 return error;
420 if (nfbno2 != NULLAGBLOCK) {
422 * 2 resulting free entries, need to add one.
424 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
425 return error;
426 XFS_WANT_CORRUPTED_RETURN(i == 0);
427 if ((error = xfs_btree_insert(bno_cur, &i)))
428 return error;
429 XFS_WANT_CORRUPTED_RETURN(i == 1);
431 return 0;
435 * Read in the allocation group free block array.
437 STATIC int /* error */
438 xfs_alloc_read_agfl(
439 xfs_mount_t *mp, /* mount point structure */
440 xfs_trans_t *tp, /* transaction pointer */
441 xfs_agnumber_t agno, /* allocation group number */
442 xfs_buf_t **bpp) /* buffer for the ag free block array */
444 xfs_buf_t *bp; /* return value */
445 int error;
447 ASSERT(agno != NULLAGNUMBER);
448 error = xfs_trans_read_buf(
449 mp, tp, mp->m_ddev_targp,
450 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
451 XFS_FSS_TO_BB(mp, 1), 0, &bp);
452 if (error)
453 return error;
454 ASSERT(bp);
455 ASSERT(!XFS_BUF_GETERROR(bp));
456 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF);
457 *bpp = bp;
458 return 0;
461 STATIC int
462 xfs_alloc_update_counters(
463 struct xfs_trans *tp,
464 struct xfs_perag *pag,
465 struct xfs_buf *agbp,
466 long len)
468 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
470 pag->pagf_freeblks += len;
471 be32_add_cpu(&agf->agf_freeblks, len);
473 xfs_trans_agblocks_delta(tp, len);
474 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
475 be32_to_cpu(agf->agf_length)))
476 return EFSCORRUPTED;
478 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
479 return 0;
483 * Allocation group level functions.
487 * Allocate a variable extent in the allocation group agno.
488 * Type and bno are used to determine where in the allocation group the
489 * extent will start.
490 * Extent's length (returned in *len) will be between minlen and maxlen,
491 * and of the form k * prod + mod unless there's nothing that large.
492 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
494 STATIC int /* error */
495 xfs_alloc_ag_vextent(
496 xfs_alloc_arg_t *args) /* argument structure for allocation */
498 int error=0;
500 ASSERT(args->minlen > 0);
501 ASSERT(args->maxlen > 0);
502 ASSERT(args->minlen <= args->maxlen);
503 ASSERT(args->mod < args->prod);
504 ASSERT(args->alignment > 0);
506 * Branch to correct routine based on the type.
508 args->wasfromfl = 0;
509 switch (args->type) {
510 case XFS_ALLOCTYPE_THIS_AG:
511 error = xfs_alloc_ag_vextent_size(args);
512 break;
513 case XFS_ALLOCTYPE_NEAR_BNO:
514 error = xfs_alloc_ag_vextent_near(args);
515 break;
516 case XFS_ALLOCTYPE_THIS_BNO:
517 error = xfs_alloc_ag_vextent_exact(args);
518 break;
519 default:
520 ASSERT(0);
521 /* NOTREACHED */
524 if (error || args->agbno == NULLAGBLOCK)
525 return error;
527 ASSERT(args->len >= args->minlen);
528 ASSERT(args->len <= args->maxlen);
529 ASSERT(!args->wasfromfl || !args->isfl);
530 ASSERT(args->agbno % args->alignment == 0);
532 if (!args->wasfromfl) {
533 error = xfs_alloc_update_counters(args->tp, args->pag,
534 args->agbp,
535 -((long)(args->len)));
536 if (error)
537 return error;
539 ASSERT(!xfs_alloc_busy_search(args->mp, args->agno,
540 args->agbno, args->len));
543 if (!args->isfl) {
544 xfs_trans_mod_sb(args->tp, args->wasdel ?
545 XFS_TRANS_SB_RES_FDBLOCKS :
546 XFS_TRANS_SB_FDBLOCKS,
547 -((long)(args->len)));
550 XFS_STATS_INC(xs_allocx);
551 XFS_STATS_ADD(xs_allocb, args->len);
552 return error;
556 * Allocate a variable extent at exactly agno/bno.
557 * Extent's length (returned in *len) will be between minlen and maxlen,
558 * and of the form k * prod + mod unless there's nothing that large.
559 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
561 STATIC int /* error */
562 xfs_alloc_ag_vextent_exact(
563 xfs_alloc_arg_t *args) /* allocation argument structure */
565 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
566 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
567 int error;
568 xfs_agblock_t fbno; /* start block of found extent */
569 xfs_extlen_t flen; /* length of found extent */
570 xfs_agblock_t tbno; /* start block of trimmed extent */
571 xfs_extlen_t tlen; /* length of trimmed extent */
572 xfs_agblock_t tend; /* end block of trimmed extent */
573 xfs_agblock_t end; /* end of allocated extent */
574 int i; /* success/failure of operation */
575 xfs_extlen_t rlen; /* length of returned extent */
577 ASSERT(args->alignment == 1);
580 * Allocate/initialize a cursor for the by-number freespace btree.
582 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
583 args->agno, XFS_BTNUM_BNO);
586 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
587 * Look for the closest free block <= bno, it must contain bno
588 * if any free block does.
590 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
591 if (error)
592 goto error0;
593 if (!i)
594 goto not_found;
597 * Grab the freespace record.
599 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
600 if (error)
601 goto error0;
602 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
603 ASSERT(fbno <= args->agbno);
606 * Check for overlapping busy extents.
608 xfs_alloc_busy_trim(args, fbno, flen, &tbno, &tlen);
611 * Give up if the start of the extent is busy, or the freespace isn't
612 * long enough for the minimum request.
614 if (tbno > args->agbno)
615 goto not_found;
616 if (tlen < args->minlen)
617 goto not_found;
618 tend = tbno + tlen;
619 if (tend < args->agbno + args->minlen)
620 goto not_found;
623 * End of extent will be smaller of the freespace end and the
624 * maximal requested end.
626 * Fix the length according to mod and prod if given.
628 end = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen);
629 args->len = end - args->agbno;
630 xfs_alloc_fix_len(args);
631 if (!xfs_alloc_fix_minleft(args))
632 goto not_found;
634 rlen = args->len;
635 ASSERT(args->agbno + rlen <= tend);
636 end = args->agbno + rlen;
639 * We are allocating agbno for rlen [agbno .. end]
640 * Allocate/initialize a cursor for the by-size btree.
642 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
643 args->agno, XFS_BTNUM_CNT);
644 ASSERT(args->agbno + args->len <=
645 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
646 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
647 args->len, XFSA_FIXUP_BNO_OK);
648 if (error) {
649 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
650 goto error0;
653 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
654 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
656 args->wasfromfl = 0;
657 trace_xfs_alloc_exact_done(args);
658 return 0;
660 not_found:
661 /* Didn't find it, return null. */
662 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
663 args->agbno = NULLAGBLOCK;
664 trace_xfs_alloc_exact_notfound(args);
665 return 0;
667 error0:
668 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
669 trace_xfs_alloc_exact_error(args);
670 return error;
674 * Search the btree in a given direction via the search cursor and compare
675 * the records found against the good extent we've already found.
677 STATIC int
678 xfs_alloc_find_best_extent(
679 struct xfs_alloc_arg *args, /* allocation argument structure */
680 struct xfs_btree_cur **gcur, /* good cursor */
681 struct xfs_btree_cur **scur, /* searching cursor */
682 xfs_agblock_t gdiff, /* difference for search comparison */
683 xfs_agblock_t *sbno, /* extent found by search */
684 xfs_extlen_t *slen, /* extent length */
685 xfs_agblock_t *sbnoa, /* aligned extent found by search */
686 xfs_extlen_t *slena, /* aligned extent length */
687 int dir) /* 0 = search right, 1 = search left */
689 xfs_agblock_t new;
690 xfs_agblock_t sdiff;
691 int error;
692 int i;
694 /* The good extent is perfect, no need to search. */
695 if (!gdiff)
696 goto out_use_good;
699 * Look until we find a better one, run out of space or run off the end.
701 do {
702 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
703 if (error)
704 goto error0;
705 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
706 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
709 * The good extent is closer than this one.
711 if (!dir) {
712 if (*sbnoa >= args->agbno + gdiff)
713 goto out_use_good;
714 } else {
715 if (*sbnoa <= args->agbno - gdiff)
716 goto out_use_good;
720 * Same distance, compare length and pick the best.
722 if (*slena >= args->minlen) {
723 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
724 xfs_alloc_fix_len(args);
726 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
727 args->alignment, *sbnoa,
728 *slena, &new);
731 * Choose closer size and invalidate other cursor.
733 if (sdiff < gdiff)
734 goto out_use_search;
735 goto out_use_good;
738 if (!dir)
739 error = xfs_btree_increment(*scur, 0, &i);
740 else
741 error = xfs_btree_decrement(*scur, 0, &i);
742 if (error)
743 goto error0;
744 } while (i);
746 out_use_good:
747 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
748 *scur = NULL;
749 return 0;
751 out_use_search:
752 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
753 *gcur = NULL;
754 return 0;
756 error0:
757 /* caller invalidates cursors */
758 return error;
762 * Allocate a variable extent near bno in the allocation group agno.
763 * Extent's length (returned in len) will be between minlen and maxlen,
764 * and of the form k * prod + mod unless there's nothing that large.
765 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
767 STATIC int /* error */
768 xfs_alloc_ag_vextent_near(
769 xfs_alloc_arg_t *args) /* allocation argument structure */
771 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
772 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
773 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
774 xfs_agblock_t gtbno; /* start bno of right side entry */
775 xfs_agblock_t gtbnoa; /* aligned ... */
776 xfs_extlen_t gtdiff; /* difference to right side entry */
777 xfs_extlen_t gtlen; /* length of right side entry */
778 xfs_extlen_t gtlena; /* aligned ... */
779 xfs_agblock_t gtnew; /* useful start bno of right side */
780 int error; /* error code */
781 int i; /* result code, temporary */
782 int j; /* result code, temporary */
783 xfs_agblock_t ltbno; /* start bno of left side entry */
784 xfs_agblock_t ltbnoa; /* aligned ... */
785 xfs_extlen_t ltdiff; /* difference to left side entry */
786 xfs_extlen_t ltlen; /* length of left side entry */
787 xfs_extlen_t ltlena; /* aligned ... */
788 xfs_agblock_t ltnew; /* useful start bno of left side */
789 xfs_extlen_t rlen; /* length of returned extent */
790 int forced = 0;
791 #if defined(DEBUG) && defined(__KERNEL__)
793 * Randomly don't execute the first algorithm.
795 int dofirst; /* set to do first algorithm */
797 dofirst = random32() & 1;
798 #endif
800 restart:
801 bno_cur_lt = NULL;
802 bno_cur_gt = NULL;
803 ltlen = 0;
804 gtlena = 0;
805 ltlena = 0;
808 * Get a cursor for the by-size btree.
810 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
811 args->agno, XFS_BTNUM_CNT);
814 * See if there are any free extents as big as maxlen.
816 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
817 goto error0;
819 * If none, then pick up the last entry in the tree unless the
820 * tree is empty.
822 if (!i) {
823 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
824 &ltlen, &i)))
825 goto error0;
826 if (i == 0 || ltlen == 0) {
827 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
828 trace_xfs_alloc_near_noentry(args);
829 return 0;
831 ASSERT(i == 1);
833 args->wasfromfl = 0;
836 * First algorithm.
837 * If the requested extent is large wrt the freespaces available
838 * in this a.g., then the cursor will be pointing to a btree entry
839 * near the right edge of the tree. If it's in the last btree leaf
840 * block, then we just examine all the entries in that block
841 * that are big enough, and pick the best one.
842 * This is written as a while loop so we can break out of it,
843 * but we never loop back to the top.
845 while (xfs_btree_islastblock(cnt_cur, 0)) {
846 xfs_extlen_t bdiff;
847 int besti=0;
848 xfs_extlen_t blen=0;
849 xfs_agblock_t bnew=0;
851 #if defined(DEBUG) && defined(__KERNEL__)
852 if (!dofirst)
853 break;
854 #endif
856 * Start from the entry that lookup found, sequence through
857 * all larger free blocks. If we're actually pointing at a
858 * record smaller than maxlen, go to the start of this block,
859 * and skip all those smaller than minlen.
861 if (ltlen || args->alignment > 1) {
862 cnt_cur->bc_ptrs[0] = 1;
863 do {
864 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
865 &ltlen, &i)))
866 goto error0;
867 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
868 if (ltlen >= args->minlen)
869 break;
870 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
871 goto error0;
872 } while (i);
873 ASSERT(ltlen >= args->minlen);
874 if (!i)
875 break;
877 i = cnt_cur->bc_ptrs[0];
878 for (j = 1, blen = 0, bdiff = 0;
879 !error && j && (blen < args->maxlen || bdiff > 0);
880 error = xfs_btree_increment(cnt_cur, 0, &j)) {
882 * For each entry, decide if it's better than
883 * the previous best entry.
885 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
886 goto error0;
887 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
888 xfs_alloc_compute_aligned(args, ltbno, ltlen,
889 &ltbnoa, &ltlena);
890 if (ltlena < args->minlen)
891 continue;
892 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
893 xfs_alloc_fix_len(args);
894 ASSERT(args->len >= args->minlen);
895 if (args->len < blen)
896 continue;
897 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
898 args->alignment, ltbnoa, ltlena, &ltnew);
899 if (ltnew != NULLAGBLOCK &&
900 (args->len > blen || ltdiff < bdiff)) {
901 bdiff = ltdiff;
902 bnew = ltnew;
903 blen = args->len;
904 besti = cnt_cur->bc_ptrs[0];
908 * It didn't work. We COULD be in a case where
909 * there's a good record somewhere, so try again.
911 if (blen == 0)
912 break;
914 * Point at the best entry, and retrieve it again.
916 cnt_cur->bc_ptrs[0] = besti;
917 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
918 goto error0;
919 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
920 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
921 args->len = blen;
922 if (!xfs_alloc_fix_minleft(args)) {
923 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
924 trace_xfs_alloc_near_nominleft(args);
925 return 0;
927 blen = args->len;
929 * We are allocating starting at bnew for blen blocks.
931 args->agbno = bnew;
932 ASSERT(bnew >= ltbno);
933 ASSERT(bnew + blen <= ltbno + ltlen);
935 * Set up a cursor for the by-bno tree.
937 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
938 args->agbp, args->agno, XFS_BTNUM_BNO);
940 * Fix up the btree entries.
942 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
943 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
944 goto error0;
945 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
946 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
948 trace_xfs_alloc_near_first(args);
949 return 0;
952 * Second algorithm.
953 * Search in the by-bno tree to the left and to the right
954 * simultaneously, until in each case we find a space big enough,
955 * or run into the edge of the tree. When we run into the edge,
956 * we deallocate that cursor.
957 * If both searches succeed, we compare the two spaces and pick
958 * the better one.
959 * With alignment, it's possible for both to fail; the upper
960 * level algorithm that picks allocation groups for allocations
961 * is not supposed to do this.
964 * Allocate and initialize the cursor for the leftward search.
966 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
967 args->agno, XFS_BTNUM_BNO);
969 * Lookup <= bno to find the leftward search's starting point.
971 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
972 goto error0;
973 if (!i) {
975 * Didn't find anything; use this cursor for the rightward
976 * search.
978 bno_cur_gt = bno_cur_lt;
979 bno_cur_lt = NULL;
982 * Found something. Duplicate the cursor for the rightward search.
984 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
985 goto error0;
987 * Increment the cursor, so we will point at the entry just right
988 * of the leftward entry if any, or to the leftmost entry.
990 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
991 goto error0;
992 if (!i) {
994 * It failed, there are no rightward entries.
996 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
997 bno_cur_gt = NULL;
1000 * Loop going left with the leftward cursor, right with the
1001 * rightward cursor, until either both directions give up or
1002 * we find an entry at least as big as minlen.
1004 do {
1005 if (bno_cur_lt) {
1006 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1007 goto error0;
1008 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1009 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1010 &ltbnoa, &ltlena);
1011 if (ltlena >= args->minlen)
1012 break;
1013 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1014 goto error0;
1015 if (!i) {
1016 xfs_btree_del_cursor(bno_cur_lt,
1017 XFS_BTREE_NOERROR);
1018 bno_cur_lt = NULL;
1021 if (bno_cur_gt) {
1022 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1023 goto error0;
1024 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1025 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1026 &gtbnoa, &gtlena);
1027 if (gtlena >= args->minlen)
1028 break;
1029 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1030 goto error0;
1031 if (!i) {
1032 xfs_btree_del_cursor(bno_cur_gt,
1033 XFS_BTREE_NOERROR);
1034 bno_cur_gt = NULL;
1037 } while (bno_cur_lt || bno_cur_gt);
1040 * Got both cursors still active, need to find better entry.
1042 if (bno_cur_lt && bno_cur_gt) {
1043 if (ltlena >= args->minlen) {
1045 * Left side is good, look for a right side entry.
1047 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1048 xfs_alloc_fix_len(args);
1049 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1050 args->alignment, ltbnoa, ltlena, &ltnew);
1052 error = xfs_alloc_find_best_extent(args,
1053 &bno_cur_lt, &bno_cur_gt,
1054 ltdiff, &gtbno, &gtlen,
1055 &gtbnoa, &gtlena,
1056 0 /* search right */);
1057 } else {
1058 ASSERT(gtlena >= args->minlen);
1061 * Right side is good, look for a left side entry.
1063 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1064 xfs_alloc_fix_len(args);
1065 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1066 args->alignment, gtbnoa, gtlena, &gtnew);
1068 error = xfs_alloc_find_best_extent(args,
1069 &bno_cur_gt, &bno_cur_lt,
1070 gtdiff, &ltbno, &ltlen,
1071 &ltbnoa, &ltlena,
1072 1 /* search left */);
1075 if (error)
1076 goto error0;
1080 * If we couldn't get anything, give up.
1082 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1083 if (!forced++) {
1084 trace_xfs_alloc_near_busy(args);
1085 xfs_log_force(args->mp, XFS_LOG_SYNC);
1086 goto restart;
1089 trace_xfs_alloc_size_neither(args);
1090 args->agbno = NULLAGBLOCK;
1091 return 0;
1095 * At this point we have selected a freespace entry, either to the
1096 * left or to the right. If it's on the right, copy all the
1097 * useful variables to the "left" set so we only have one
1098 * copy of this code.
1100 if (bno_cur_gt) {
1101 bno_cur_lt = bno_cur_gt;
1102 bno_cur_gt = NULL;
1103 ltbno = gtbno;
1104 ltbnoa = gtbnoa;
1105 ltlen = gtlen;
1106 ltlena = gtlena;
1107 j = 1;
1108 } else
1109 j = 0;
1112 * Fix up the length and compute the useful address.
1114 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1115 xfs_alloc_fix_len(args);
1116 if (!xfs_alloc_fix_minleft(args)) {
1117 trace_xfs_alloc_near_nominleft(args);
1118 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1119 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1120 return 0;
1122 rlen = args->len;
1123 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1124 ltbnoa, ltlena, &ltnew);
1125 ASSERT(ltnew >= ltbno);
1126 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1127 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1128 args->agbno = ltnew;
1130 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1131 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1132 goto error0;
1134 if (j)
1135 trace_xfs_alloc_near_greater(args);
1136 else
1137 trace_xfs_alloc_near_lesser(args);
1139 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1140 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1141 return 0;
1143 error0:
1144 trace_xfs_alloc_near_error(args);
1145 if (cnt_cur != NULL)
1146 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1147 if (bno_cur_lt != NULL)
1148 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1149 if (bno_cur_gt != NULL)
1150 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1151 return error;
1155 * Allocate a variable extent anywhere in the allocation group agno.
1156 * Extent's length (returned in len) will be between minlen and maxlen,
1157 * and of the form k * prod + mod unless there's nothing that large.
1158 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1160 STATIC int /* error */
1161 xfs_alloc_ag_vextent_size(
1162 xfs_alloc_arg_t *args) /* allocation argument structure */
1164 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1165 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1166 int error; /* error result */
1167 xfs_agblock_t fbno; /* start of found freespace */
1168 xfs_extlen_t flen; /* length of found freespace */
1169 int i; /* temp status variable */
1170 xfs_agblock_t rbno; /* returned block number */
1171 xfs_extlen_t rlen; /* length of returned extent */
1172 int forced = 0;
1174 restart:
1176 * Allocate and initialize a cursor for the by-size btree.
1178 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1179 args->agno, XFS_BTNUM_CNT);
1180 bno_cur = NULL;
1183 * Look for an entry >= maxlen+alignment-1 blocks.
1185 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1186 args->maxlen + args->alignment - 1, &i)))
1187 goto error0;
1190 * If none or we have busy extents that we cannot allocate from, then
1191 * we have to settle for a smaller extent. In the case that there are
1192 * no large extents, this will return the last entry in the tree unless
1193 * the tree is empty. In the case that there are only busy large
1194 * extents, this will return the largest small extent unless there
1195 * are no smaller extents available.
1197 if (!i || forced > 1) {
1198 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1199 &fbno, &flen, &i);
1200 if (error)
1201 goto error0;
1202 if (i == 0 || flen == 0) {
1203 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1204 trace_xfs_alloc_size_noentry(args);
1205 return 0;
1207 ASSERT(i == 1);
1208 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1209 } else {
1211 * Search for a non-busy extent that is large enough.
1212 * If we are at low space, don't check, or if we fall of
1213 * the end of the btree, turn off the busy check and
1214 * restart.
1216 for (;;) {
1217 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1218 if (error)
1219 goto error0;
1220 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1222 xfs_alloc_compute_aligned(args, fbno, flen,
1223 &rbno, &rlen);
1225 if (rlen >= args->maxlen)
1226 break;
1228 error = xfs_btree_increment(cnt_cur, 0, &i);
1229 if (error)
1230 goto error0;
1231 if (i == 0) {
1233 * Our only valid extents must have been busy.
1234 * Make it unbusy by forcing the log out and
1235 * retrying. If we've been here before, forcing
1236 * the log isn't making the extents available,
1237 * which means they have probably been freed in
1238 * this transaction. In that case, we have to
1239 * give up on them and we'll attempt a minlen
1240 * allocation the next time around.
1242 xfs_btree_del_cursor(cnt_cur,
1243 XFS_BTREE_NOERROR);
1244 trace_xfs_alloc_size_busy(args);
1245 if (!forced++)
1246 xfs_log_force(args->mp, XFS_LOG_SYNC);
1247 goto restart;
1253 * In the first case above, we got the last entry in the
1254 * by-size btree. Now we check to see if the space hits maxlen
1255 * once aligned; if not, we search left for something better.
1256 * This can't happen in the second case above.
1258 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1259 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1260 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1261 if (rlen < args->maxlen) {
1262 xfs_agblock_t bestfbno;
1263 xfs_extlen_t bestflen;
1264 xfs_agblock_t bestrbno;
1265 xfs_extlen_t bestrlen;
1267 bestrlen = rlen;
1268 bestrbno = rbno;
1269 bestflen = flen;
1270 bestfbno = fbno;
1271 for (;;) {
1272 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1273 goto error0;
1274 if (i == 0)
1275 break;
1276 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1277 &i)))
1278 goto error0;
1279 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1280 if (flen < bestrlen)
1281 break;
1282 xfs_alloc_compute_aligned(args, fbno, flen,
1283 &rbno, &rlen);
1284 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1285 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1286 (rlen <= flen && rbno + rlen <= fbno + flen),
1287 error0);
1288 if (rlen > bestrlen) {
1289 bestrlen = rlen;
1290 bestrbno = rbno;
1291 bestflen = flen;
1292 bestfbno = fbno;
1293 if (rlen == args->maxlen)
1294 break;
1297 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1298 &i)))
1299 goto error0;
1300 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1301 rlen = bestrlen;
1302 rbno = bestrbno;
1303 flen = bestflen;
1304 fbno = bestfbno;
1306 args->wasfromfl = 0;
1308 * Fix up the length.
1310 args->len = rlen;
1311 if (rlen < args->minlen) {
1312 if (!forced++) {
1313 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1314 trace_xfs_alloc_size_busy(args);
1315 xfs_log_force(args->mp, XFS_LOG_SYNC);
1316 goto restart;
1318 goto out_nominleft;
1320 xfs_alloc_fix_len(args);
1322 if (!xfs_alloc_fix_minleft(args))
1323 goto out_nominleft;
1324 rlen = args->len;
1325 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1327 * Allocate and initialize a cursor for the by-block tree.
1329 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1330 args->agno, XFS_BTNUM_BNO);
1331 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1332 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1333 goto error0;
1334 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1335 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1336 cnt_cur = bno_cur = NULL;
1337 args->len = rlen;
1338 args->agbno = rbno;
1339 XFS_WANT_CORRUPTED_GOTO(
1340 args->agbno + args->len <=
1341 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1342 error0);
1343 trace_xfs_alloc_size_done(args);
1344 return 0;
1346 error0:
1347 trace_xfs_alloc_size_error(args);
1348 if (cnt_cur)
1349 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1350 if (bno_cur)
1351 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1352 return error;
1354 out_nominleft:
1355 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1356 trace_xfs_alloc_size_nominleft(args);
1357 args->agbno = NULLAGBLOCK;
1358 return 0;
1362 * Deal with the case where only small freespaces remain.
1363 * Either return the contents of the last freespace record,
1364 * or allocate space from the freelist if there is nothing in the tree.
1366 STATIC int /* error */
1367 xfs_alloc_ag_vextent_small(
1368 xfs_alloc_arg_t *args, /* allocation argument structure */
1369 xfs_btree_cur_t *ccur, /* by-size cursor */
1370 xfs_agblock_t *fbnop, /* result block number */
1371 xfs_extlen_t *flenp, /* result length */
1372 int *stat) /* status: 0-freelist, 1-normal/none */
1374 int error;
1375 xfs_agblock_t fbno;
1376 xfs_extlen_t flen;
1377 int i;
1379 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1380 goto error0;
1381 if (i) {
1382 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1383 goto error0;
1384 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1387 * Nothing in the btree, try the freelist. Make sure
1388 * to respect minleft even when pulling from the
1389 * freelist.
1391 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1392 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1393 > args->minleft)) {
1394 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1395 if (error)
1396 goto error0;
1397 if (fbno != NULLAGBLOCK) {
1398 xfs_alloc_busy_reuse(args->mp, args->agno, fbno, 1,
1399 args->userdata);
1401 if (args->userdata) {
1402 xfs_buf_t *bp;
1404 bp = xfs_btree_get_bufs(args->mp, args->tp,
1405 args->agno, fbno, 0);
1406 xfs_trans_binval(args->tp, bp);
1408 args->len = 1;
1409 args->agbno = fbno;
1410 XFS_WANT_CORRUPTED_GOTO(
1411 args->agbno + args->len <=
1412 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1413 error0);
1414 args->wasfromfl = 1;
1415 trace_xfs_alloc_small_freelist(args);
1416 *stat = 0;
1417 return 0;
1420 * Nothing in the freelist.
1422 else
1423 flen = 0;
1426 * Can't allocate from the freelist for some reason.
1428 else {
1429 fbno = NULLAGBLOCK;
1430 flen = 0;
1433 * Can't do the allocation, give up.
1435 if (flen < args->minlen) {
1436 args->agbno = NULLAGBLOCK;
1437 trace_xfs_alloc_small_notenough(args);
1438 flen = 0;
1440 *fbnop = fbno;
1441 *flenp = flen;
1442 *stat = 1;
1443 trace_xfs_alloc_small_done(args);
1444 return 0;
1446 error0:
1447 trace_xfs_alloc_small_error(args);
1448 return error;
1452 * Free the extent starting at agno/bno for length.
1454 STATIC int /* error */
1455 xfs_free_ag_extent(
1456 xfs_trans_t *tp, /* transaction pointer */
1457 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
1458 xfs_agnumber_t agno, /* allocation group number */
1459 xfs_agblock_t bno, /* starting block number */
1460 xfs_extlen_t len, /* length of extent */
1461 int isfl) /* set if is freelist blocks - no sb acctg */
1463 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1464 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1465 int error; /* error return value */
1466 xfs_agblock_t gtbno; /* start of right neighbor block */
1467 xfs_extlen_t gtlen; /* length of right neighbor block */
1468 int haveleft; /* have a left neighbor block */
1469 int haveright; /* have a right neighbor block */
1470 int i; /* temp, result code */
1471 xfs_agblock_t ltbno; /* start of left neighbor block */
1472 xfs_extlen_t ltlen; /* length of left neighbor block */
1473 xfs_mount_t *mp; /* mount point struct for filesystem */
1474 xfs_agblock_t nbno; /* new starting block of freespace */
1475 xfs_extlen_t nlen; /* new length of freespace */
1476 xfs_perag_t *pag; /* per allocation group data */
1478 mp = tp->t_mountp;
1480 * Allocate and initialize a cursor for the by-block btree.
1482 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1483 cnt_cur = NULL;
1485 * Look for a neighboring block on the left (lower block numbers)
1486 * that is contiguous with this space.
1488 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1489 goto error0;
1490 if (haveleft) {
1492 * There is a block to our left.
1494 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1495 goto error0;
1496 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1498 * It's not contiguous, though.
1500 if (ltbno + ltlen < bno)
1501 haveleft = 0;
1502 else {
1504 * If this failure happens the request to free this
1505 * space was invalid, it's (partly) already free.
1506 * Very bad.
1508 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1512 * Look for a neighboring block on the right (higher block numbers)
1513 * that is contiguous with this space.
1515 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1516 goto error0;
1517 if (haveright) {
1519 * There is a block to our right.
1521 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1522 goto error0;
1523 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1525 * It's not contiguous, though.
1527 if (bno + len < gtbno)
1528 haveright = 0;
1529 else {
1531 * If this failure happens the request to free this
1532 * space was invalid, it's (partly) already free.
1533 * Very bad.
1535 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1539 * Now allocate and initialize a cursor for the by-size tree.
1541 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1543 * Have both left and right contiguous neighbors.
1544 * Merge all three into a single free block.
1546 if (haveleft && haveright) {
1548 * Delete the old by-size entry on the left.
1550 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1551 goto error0;
1552 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1553 if ((error = xfs_btree_delete(cnt_cur, &i)))
1554 goto error0;
1555 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1557 * Delete the old by-size entry on the right.
1559 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1560 goto error0;
1561 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1562 if ((error = xfs_btree_delete(cnt_cur, &i)))
1563 goto error0;
1564 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1566 * Delete the old by-block entry for the right block.
1568 if ((error = xfs_btree_delete(bno_cur, &i)))
1569 goto error0;
1570 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1572 * Move the by-block cursor back to the left neighbor.
1574 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1575 goto error0;
1576 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1577 #ifdef DEBUG
1579 * Check that this is the right record: delete didn't
1580 * mangle the cursor.
1583 xfs_agblock_t xxbno;
1584 xfs_extlen_t xxlen;
1586 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1587 &i)))
1588 goto error0;
1589 XFS_WANT_CORRUPTED_GOTO(
1590 i == 1 && xxbno == ltbno && xxlen == ltlen,
1591 error0);
1593 #endif
1595 * Update remaining by-block entry to the new, joined block.
1597 nbno = ltbno;
1598 nlen = len + ltlen + gtlen;
1599 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1600 goto error0;
1603 * Have only a left contiguous neighbor.
1604 * Merge it together with the new freespace.
1606 else if (haveleft) {
1608 * Delete the old by-size entry on the left.
1610 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1611 goto error0;
1612 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1613 if ((error = xfs_btree_delete(cnt_cur, &i)))
1614 goto error0;
1615 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1617 * Back up the by-block cursor to the left neighbor, and
1618 * update its length.
1620 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1621 goto error0;
1622 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1623 nbno = ltbno;
1624 nlen = len + ltlen;
1625 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1626 goto error0;
1629 * Have only a right contiguous neighbor.
1630 * Merge it together with the new freespace.
1632 else if (haveright) {
1634 * Delete the old by-size entry on the right.
1636 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1637 goto error0;
1638 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1639 if ((error = xfs_btree_delete(cnt_cur, &i)))
1640 goto error0;
1641 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1643 * Update the starting block and length of the right
1644 * neighbor in the by-block tree.
1646 nbno = bno;
1647 nlen = len + gtlen;
1648 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1649 goto error0;
1652 * No contiguous neighbors.
1653 * Insert the new freespace into the by-block tree.
1655 else {
1656 nbno = bno;
1657 nlen = len;
1658 if ((error = xfs_btree_insert(bno_cur, &i)))
1659 goto error0;
1660 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1662 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1663 bno_cur = NULL;
1665 * In all cases we need to insert the new freespace in the by-size tree.
1667 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1668 goto error0;
1669 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
1670 if ((error = xfs_btree_insert(cnt_cur, &i)))
1671 goto error0;
1672 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1673 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1674 cnt_cur = NULL;
1677 * Update the freespace totals in the ag and superblock.
1679 pag = xfs_perag_get(mp, agno);
1680 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1681 xfs_perag_put(pag);
1682 if (error)
1683 goto error0;
1685 if (!isfl)
1686 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1687 XFS_STATS_INC(xs_freex);
1688 XFS_STATS_ADD(xs_freeb, len);
1690 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1692 return 0;
1694 error0:
1695 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1696 if (bno_cur)
1697 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1698 if (cnt_cur)
1699 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1700 return error;
1704 * Visible (exported) allocation/free functions.
1705 * Some of these are used just by xfs_alloc_btree.c and this file.
1709 * Compute and fill in value of m_ag_maxlevels.
1711 void
1712 xfs_alloc_compute_maxlevels(
1713 xfs_mount_t *mp) /* file system mount structure */
1715 int level;
1716 uint maxblocks;
1717 uint maxleafents;
1718 int minleafrecs;
1719 int minnoderecs;
1721 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1722 minleafrecs = mp->m_alloc_mnr[0];
1723 minnoderecs = mp->m_alloc_mnr[1];
1724 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1725 for (level = 1; maxblocks > 1; level++)
1726 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1727 mp->m_ag_maxlevels = level;
1731 * Find the length of the longest extent in an AG.
1733 xfs_extlen_t
1734 xfs_alloc_longest_free_extent(
1735 struct xfs_mount *mp,
1736 struct xfs_perag *pag)
1738 xfs_extlen_t need, delta = 0;
1740 need = XFS_MIN_FREELIST_PAG(pag, mp);
1741 if (need > pag->pagf_flcount)
1742 delta = need - pag->pagf_flcount;
1744 if (pag->pagf_longest > delta)
1745 return pag->pagf_longest - delta;
1746 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1750 * Decide whether to use this allocation group for this allocation.
1751 * If so, fix up the btree freelist's size.
1753 STATIC int /* error */
1754 xfs_alloc_fix_freelist(
1755 xfs_alloc_arg_t *args, /* allocation argument structure */
1756 int flags) /* XFS_ALLOC_FLAG_... */
1758 xfs_buf_t *agbp; /* agf buffer pointer */
1759 xfs_agf_t *agf; /* a.g. freespace structure pointer */
1760 xfs_buf_t *agflbp;/* agfl buffer pointer */
1761 xfs_agblock_t bno; /* freelist block */
1762 xfs_extlen_t delta; /* new blocks needed in freelist */
1763 int error; /* error result code */
1764 xfs_extlen_t longest;/* longest extent in allocation group */
1765 xfs_mount_t *mp; /* file system mount point structure */
1766 xfs_extlen_t need; /* total blocks needed in freelist */
1767 xfs_perag_t *pag; /* per-ag information structure */
1768 xfs_alloc_arg_t targs; /* local allocation arguments */
1769 xfs_trans_t *tp; /* transaction pointer */
1771 mp = args->mp;
1773 pag = args->pag;
1774 tp = args->tp;
1775 if (!pag->pagf_init) {
1776 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1777 &agbp)))
1778 return error;
1779 if (!pag->pagf_init) {
1780 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1781 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1782 args->agbp = NULL;
1783 return 0;
1785 } else
1786 agbp = NULL;
1789 * If this is a metadata preferred pag and we are user data
1790 * then try somewhere else if we are not being asked to
1791 * try harder at this point
1793 if (pag->pagf_metadata && args->userdata &&
1794 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1795 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1796 args->agbp = NULL;
1797 return 0;
1800 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1802 * If it looks like there isn't a long enough extent, or enough
1803 * total blocks, reject it.
1805 need = XFS_MIN_FREELIST_PAG(pag, mp);
1806 longest = xfs_alloc_longest_free_extent(mp, pag);
1807 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1808 longest ||
1809 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1810 need - args->total) < (int)args->minleft)) {
1811 if (agbp)
1812 xfs_trans_brelse(tp, agbp);
1813 args->agbp = NULL;
1814 return 0;
1819 * Get the a.g. freespace buffer.
1820 * Can fail if we're not blocking on locks, and it's held.
1822 if (agbp == NULL) {
1823 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1824 &agbp)))
1825 return error;
1826 if (agbp == NULL) {
1827 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1828 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1829 args->agbp = NULL;
1830 return 0;
1834 * Figure out how many blocks we should have in the freelist.
1836 agf = XFS_BUF_TO_AGF(agbp);
1837 need = XFS_MIN_FREELIST(agf, mp);
1839 * If there isn't enough total or single-extent, reject it.
1841 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1842 delta = need > be32_to_cpu(agf->agf_flcount) ?
1843 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1844 longest = be32_to_cpu(agf->agf_longest);
1845 longest = (longest > delta) ? (longest - delta) :
1846 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1847 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1848 longest ||
1849 ((int)(be32_to_cpu(agf->agf_freeblks) +
1850 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1851 (int)args->minleft)) {
1852 xfs_trans_brelse(tp, agbp);
1853 args->agbp = NULL;
1854 return 0;
1858 * Make the freelist shorter if it's too long.
1860 while (be32_to_cpu(agf->agf_flcount) > need) {
1861 xfs_buf_t *bp;
1863 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1864 if (error)
1865 return error;
1866 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1867 return error;
1868 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1869 xfs_trans_binval(tp, bp);
1872 * Initialize the args structure.
1874 targs.tp = tp;
1875 targs.mp = mp;
1876 targs.agbp = agbp;
1877 targs.agno = args->agno;
1878 targs.mod = targs.minleft = targs.wasdel = targs.userdata =
1879 targs.minalignslop = 0;
1880 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1881 targs.type = XFS_ALLOCTYPE_THIS_AG;
1882 targs.pag = pag;
1883 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1884 return error;
1886 * Make the freelist longer if it's too short.
1888 while (be32_to_cpu(agf->agf_flcount) < need) {
1889 targs.agbno = 0;
1890 targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
1892 * Allocate as many blocks as possible at once.
1894 if ((error = xfs_alloc_ag_vextent(&targs))) {
1895 xfs_trans_brelse(tp, agflbp);
1896 return error;
1899 * Stop if we run out. Won't happen if callers are obeying
1900 * the restrictions correctly. Can happen for free calls
1901 * on a completely full ag.
1903 if (targs.agbno == NULLAGBLOCK) {
1904 if (flags & XFS_ALLOC_FLAG_FREEING)
1905 break;
1906 xfs_trans_brelse(tp, agflbp);
1907 args->agbp = NULL;
1908 return 0;
1911 * Put each allocated block on the list.
1913 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
1914 error = xfs_alloc_put_freelist(tp, agbp,
1915 agflbp, bno, 0);
1916 if (error)
1917 return error;
1920 xfs_trans_brelse(tp, agflbp);
1921 args->agbp = agbp;
1922 return 0;
1926 * Get a block from the freelist.
1927 * Returns with the buffer for the block gotten.
1929 int /* error */
1930 xfs_alloc_get_freelist(
1931 xfs_trans_t *tp, /* transaction pointer */
1932 xfs_buf_t *agbp, /* buffer containing the agf structure */
1933 xfs_agblock_t *bnop, /* block address retrieved from freelist */
1934 int btreeblk) /* destination is a AGF btree */
1936 xfs_agf_t *agf; /* a.g. freespace structure */
1937 xfs_agfl_t *agfl; /* a.g. freelist structure */
1938 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
1939 xfs_agblock_t bno; /* block number returned */
1940 int error;
1941 int logflags;
1942 xfs_mount_t *mp; /* mount structure */
1943 xfs_perag_t *pag; /* per allocation group data */
1945 agf = XFS_BUF_TO_AGF(agbp);
1947 * Freelist is empty, give up.
1949 if (!agf->agf_flcount) {
1950 *bnop = NULLAGBLOCK;
1951 return 0;
1954 * Read the array of free blocks.
1956 mp = tp->t_mountp;
1957 if ((error = xfs_alloc_read_agfl(mp, tp,
1958 be32_to_cpu(agf->agf_seqno), &agflbp)))
1959 return error;
1960 agfl = XFS_BUF_TO_AGFL(agflbp);
1962 * Get the block number and update the data structures.
1964 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
1965 be32_add_cpu(&agf->agf_flfirst, 1);
1966 xfs_trans_brelse(tp, agflbp);
1967 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1968 agf->agf_flfirst = 0;
1970 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1971 be32_add_cpu(&agf->agf_flcount, -1);
1972 xfs_trans_agflist_delta(tp, -1);
1973 pag->pagf_flcount--;
1974 xfs_perag_put(pag);
1976 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
1977 if (btreeblk) {
1978 be32_add_cpu(&agf->agf_btreeblks, 1);
1979 pag->pagf_btreeblks++;
1980 logflags |= XFS_AGF_BTREEBLKS;
1983 xfs_alloc_log_agf(tp, agbp, logflags);
1984 *bnop = bno;
1986 return 0;
1990 * Log the given fields from the agf structure.
1992 void
1993 xfs_alloc_log_agf(
1994 xfs_trans_t *tp, /* transaction pointer */
1995 xfs_buf_t *bp, /* buffer for a.g. freelist header */
1996 int fields) /* mask of fields to be logged (XFS_AGF_...) */
1998 int first; /* first byte offset */
1999 int last; /* last byte offset */
2000 static const short offsets[] = {
2001 offsetof(xfs_agf_t, agf_magicnum),
2002 offsetof(xfs_agf_t, agf_versionnum),
2003 offsetof(xfs_agf_t, agf_seqno),
2004 offsetof(xfs_agf_t, agf_length),
2005 offsetof(xfs_agf_t, agf_roots[0]),
2006 offsetof(xfs_agf_t, agf_levels[0]),
2007 offsetof(xfs_agf_t, agf_flfirst),
2008 offsetof(xfs_agf_t, agf_fllast),
2009 offsetof(xfs_agf_t, agf_flcount),
2010 offsetof(xfs_agf_t, agf_freeblks),
2011 offsetof(xfs_agf_t, agf_longest),
2012 offsetof(xfs_agf_t, agf_btreeblks),
2013 sizeof(xfs_agf_t)
2016 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2018 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2019 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2023 * Interface for inode allocation to force the pag data to be initialized.
2025 int /* error */
2026 xfs_alloc_pagf_init(
2027 xfs_mount_t *mp, /* file system mount structure */
2028 xfs_trans_t *tp, /* transaction pointer */
2029 xfs_agnumber_t agno, /* allocation group number */
2030 int flags) /* XFS_ALLOC_FLAGS_... */
2032 xfs_buf_t *bp;
2033 int error;
2035 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2036 return error;
2037 if (bp)
2038 xfs_trans_brelse(tp, bp);
2039 return 0;
2043 * Put the block on the freelist for the allocation group.
2045 int /* error */
2046 xfs_alloc_put_freelist(
2047 xfs_trans_t *tp, /* transaction pointer */
2048 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2049 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2050 xfs_agblock_t bno, /* block being freed */
2051 int btreeblk) /* block came from a AGF btree */
2053 xfs_agf_t *agf; /* a.g. freespace structure */
2054 xfs_agfl_t *agfl; /* a.g. free block array */
2055 __be32 *blockp;/* pointer to array entry */
2056 int error;
2057 int logflags;
2058 xfs_mount_t *mp; /* mount structure */
2059 xfs_perag_t *pag; /* per allocation group data */
2061 agf = XFS_BUF_TO_AGF(agbp);
2062 mp = tp->t_mountp;
2064 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2065 be32_to_cpu(agf->agf_seqno), &agflbp)))
2066 return error;
2067 agfl = XFS_BUF_TO_AGFL(agflbp);
2068 be32_add_cpu(&agf->agf_fllast, 1);
2069 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
2070 agf->agf_fllast = 0;
2072 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2073 be32_add_cpu(&agf->agf_flcount, 1);
2074 xfs_trans_agflist_delta(tp, 1);
2075 pag->pagf_flcount++;
2077 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2078 if (btreeblk) {
2079 be32_add_cpu(&agf->agf_btreeblks, -1);
2080 pag->pagf_btreeblks--;
2081 logflags |= XFS_AGF_BTREEBLKS;
2083 xfs_perag_put(pag);
2085 xfs_alloc_log_agf(tp, agbp, logflags);
2087 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2088 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2089 *blockp = cpu_to_be32(bno);
2090 xfs_alloc_log_agf(tp, agbp, logflags);
2091 xfs_trans_log_buf(tp, agflbp,
2092 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2093 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2094 sizeof(xfs_agblock_t) - 1));
2095 return 0;
2099 * Read in the allocation group header (free/alloc section).
2101 int /* error */
2102 xfs_read_agf(
2103 struct xfs_mount *mp, /* mount point structure */
2104 struct xfs_trans *tp, /* transaction pointer */
2105 xfs_agnumber_t agno, /* allocation group number */
2106 int flags, /* XFS_BUF_ */
2107 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2109 struct xfs_agf *agf; /* ag freelist header */
2110 int agf_ok; /* set if agf is consistent */
2111 int error;
2113 ASSERT(agno != NULLAGNUMBER);
2114 error = xfs_trans_read_buf(
2115 mp, tp, mp->m_ddev_targp,
2116 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2117 XFS_FSS_TO_BB(mp, 1), flags, bpp);
2118 if (error)
2119 return error;
2120 if (!*bpp)
2121 return 0;
2123 ASSERT(!XFS_BUF_GETERROR(*bpp));
2124 agf = XFS_BUF_TO_AGF(*bpp);
2127 * Validate the magic number of the agf block.
2129 agf_ok =
2130 be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC &&
2131 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2132 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2133 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2134 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2135 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) &&
2136 be32_to_cpu(agf->agf_seqno) == agno;
2137 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2138 agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
2139 be32_to_cpu(agf->agf_length);
2140 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2141 XFS_RANDOM_ALLOC_READ_AGF))) {
2142 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2143 XFS_ERRLEVEL_LOW, mp, agf);
2144 xfs_trans_brelse(tp, *bpp);
2145 return XFS_ERROR(EFSCORRUPTED);
2147 XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGF, XFS_AGF_REF);
2148 return 0;
2152 * Read in the allocation group header (free/alloc section).
2154 int /* error */
2155 xfs_alloc_read_agf(
2156 struct xfs_mount *mp, /* mount point structure */
2157 struct xfs_trans *tp, /* transaction pointer */
2158 xfs_agnumber_t agno, /* allocation group number */
2159 int flags, /* XFS_ALLOC_FLAG_... */
2160 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2162 struct xfs_agf *agf; /* ag freelist header */
2163 struct xfs_perag *pag; /* per allocation group data */
2164 int error;
2166 ASSERT(agno != NULLAGNUMBER);
2168 error = xfs_read_agf(mp, tp, agno,
2169 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2170 bpp);
2171 if (error)
2172 return error;
2173 if (!*bpp)
2174 return 0;
2175 ASSERT(!XFS_BUF_GETERROR(*bpp));
2177 agf = XFS_BUF_TO_AGF(*bpp);
2178 pag = xfs_perag_get(mp, agno);
2179 if (!pag->pagf_init) {
2180 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2181 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2182 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2183 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2184 pag->pagf_levels[XFS_BTNUM_BNOi] =
2185 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2186 pag->pagf_levels[XFS_BTNUM_CNTi] =
2187 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2188 spin_lock_init(&pag->pagb_lock);
2189 pag->pagb_count = 0;
2190 pag->pagb_tree = RB_ROOT;
2191 pag->pagf_init = 1;
2193 #ifdef DEBUG
2194 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2195 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2196 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2197 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2198 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2199 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2200 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2201 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2202 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2204 #endif
2205 xfs_perag_put(pag);
2206 return 0;
2210 * Allocate an extent (variable-size).
2211 * Depending on the allocation type, we either look in a single allocation
2212 * group or loop over the allocation groups to find the result.
2214 int /* error */
2215 xfs_alloc_vextent(
2216 xfs_alloc_arg_t *args) /* allocation argument structure */
2218 xfs_agblock_t agsize; /* allocation group size */
2219 int error;
2220 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2221 xfs_extlen_t minleft;/* minimum left value, temp copy */
2222 xfs_mount_t *mp; /* mount structure pointer */
2223 xfs_agnumber_t sagno; /* starting allocation group number */
2224 xfs_alloctype_t type; /* input allocation type */
2225 int bump_rotor = 0;
2226 int no_min = 0;
2227 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2229 mp = args->mp;
2230 type = args->otype = args->type;
2231 args->agbno = NULLAGBLOCK;
2233 * Just fix this up, for the case where the last a.g. is shorter
2234 * (or there's only one a.g.) and the caller couldn't easily figure
2235 * that out (xfs_bmap_alloc).
2237 agsize = mp->m_sb.sb_agblocks;
2238 if (args->maxlen > agsize)
2239 args->maxlen = agsize;
2240 if (args->alignment == 0)
2241 args->alignment = 1;
2242 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2243 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2244 ASSERT(args->minlen <= args->maxlen);
2245 ASSERT(args->minlen <= agsize);
2246 ASSERT(args->mod < args->prod);
2247 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2248 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2249 args->minlen > args->maxlen || args->minlen > agsize ||
2250 args->mod >= args->prod) {
2251 args->fsbno = NULLFSBLOCK;
2252 trace_xfs_alloc_vextent_badargs(args);
2253 return 0;
2255 minleft = args->minleft;
2257 switch (type) {
2258 case XFS_ALLOCTYPE_THIS_AG:
2259 case XFS_ALLOCTYPE_NEAR_BNO:
2260 case XFS_ALLOCTYPE_THIS_BNO:
2262 * These three force us into a single a.g.
2264 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2265 args->pag = xfs_perag_get(mp, args->agno);
2266 args->minleft = 0;
2267 error = xfs_alloc_fix_freelist(args, 0);
2268 args->minleft = minleft;
2269 if (error) {
2270 trace_xfs_alloc_vextent_nofix(args);
2271 goto error0;
2273 if (!args->agbp) {
2274 trace_xfs_alloc_vextent_noagbp(args);
2275 break;
2277 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2278 if ((error = xfs_alloc_ag_vextent(args)))
2279 goto error0;
2280 break;
2281 case XFS_ALLOCTYPE_START_BNO:
2283 * Try near allocation first, then anywhere-in-ag after
2284 * the first a.g. fails.
2286 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2287 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2288 args->fsbno = XFS_AGB_TO_FSB(mp,
2289 ((mp->m_agfrotor / rotorstep) %
2290 mp->m_sb.sb_agcount), 0);
2291 bump_rotor = 1;
2293 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2294 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2295 /* FALLTHROUGH */
2296 case XFS_ALLOCTYPE_ANY_AG:
2297 case XFS_ALLOCTYPE_START_AG:
2298 case XFS_ALLOCTYPE_FIRST_AG:
2300 * Rotate through the allocation groups looking for a winner.
2302 if (type == XFS_ALLOCTYPE_ANY_AG) {
2304 * Start with the last place we left off.
2306 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2307 mp->m_sb.sb_agcount;
2308 args->type = XFS_ALLOCTYPE_THIS_AG;
2309 flags = XFS_ALLOC_FLAG_TRYLOCK;
2310 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2312 * Start with allocation group given by bno.
2314 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2315 args->type = XFS_ALLOCTYPE_THIS_AG;
2316 sagno = 0;
2317 flags = 0;
2318 } else {
2319 if (type == XFS_ALLOCTYPE_START_AG)
2320 args->type = XFS_ALLOCTYPE_THIS_AG;
2322 * Start with the given allocation group.
2324 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2325 flags = XFS_ALLOC_FLAG_TRYLOCK;
2328 * Loop over allocation groups twice; first time with
2329 * trylock set, second time without.
2331 for (;;) {
2332 args->pag = xfs_perag_get(mp, args->agno);
2333 if (no_min) args->minleft = 0;
2334 error = xfs_alloc_fix_freelist(args, flags);
2335 args->minleft = minleft;
2336 if (error) {
2337 trace_xfs_alloc_vextent_nofix(args);
2338 goto error0;
2341 * If we get a buffer back then the allocation will fly.
2343 if (args->agbp) {
2344 if ((error = xfs_alloc_ag_vextent(args)))
2345 goto error0;
2346 break;
2349 trace_xfs_alloc_vextent_loopfailed(args);
2352 * Didn't work, figure out the next iteration.
2354 if (args->agno == sagno &&
2355 type == XFS_ALLOCTYPE_START_BNO)
2356 args->type = XFS_ALLOCTYPE_THIS_AG;
2358 * For the first allocation, we can try any AG to get
2359 * space. However, if we already have allocated a
2360 * block, we don't want to try AGs whose number is below
2361 * sagno. Otherwise, we may end up with out-of-order
2362 * locking of AGF, which might cause deadlock.
2364 if (++(args->agno) == mp->m_sb.sb_agcount) {
2365 if (args->firstblock != NULLFSBLOCK)
2366 args->agno = sagno;
2367 else
2368 args->agno = 0;
2371 * Reached the starting a.g., must either be done
2372 * or switch to non-trylock mode.
2374 if (args->agno == sagno) {
2375 if (no_min == 1) {
2376 args->agbno = NULLAGBLOCK;
2377 trace_xfs_alloc_vextent_allfailed(args);
2378 break;
2380 if (flags == 0) {
2381 no_min = 1;
2382 } else {
2383 flags = 0;
2384 if (type == XFS_ALLOCTYPE_START_BNO) {
2385 args->agbno = XFS_FSB_TO_AGBNO(mp,
2386 args->fsbno);
2387 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2391 xfs_perag_put(args->pag);
2393 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2394 if (args->agno == sagno)
2395 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2396 (mp->m_sb.sb_agcount * rotorstep);
2397 else
2398 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2399 (mp->m_sb.sb_agcount * rotorstep);
2401 break;
2402 default:
2403 ASSERT(0);
2404 /* NOTREACHED */
2406 if (args->agbno == NULLAGBLOCK)
2407 args->fsbno = NULLFSBLOCK;
2408 else {
2409 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2410 #ifdef DEBUG
2411 ASSERT(args->len >= args->minlen);
2412 ASSERT(args->len <= args->maxlen);
2413 ASSERT(args->agbno % args->alignment == 0);
2414 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2415 args->len);
2416 #endif
2418 xfs_perag_put(args->pag);
2419 return 0;
2420 error0:
2421 xfs_perag_put(args->pag);
2422 return error;
2426 * Free an extent.
2427 * Just break up the extent address and hand off to xfs_free_ag_extent
2428 * after fixing up the freelist.
2430 int /* error */
2431 xfs_free_extent(
2432 xfs_trans_t *tp, /* transaction pointer */
2433 xfs_fsblock_t bno, /* starting block number of extent */
2434 xfs_extlen_t len) /* length of extent */
2436 xfs_alloc_arg_t args;
2437 int error;
2439 ASSERT(len != 0);
2440 memset(&args, 0, sizeof(xfs_alloc_arg_t));
2441 args.tp = tp;
2442 args.mp = tp->t_mountp;
2445 * validate that the block number is legal - the enables us to detect
2446 * and handle a silent filesystem corruption rather than crashing.
2448 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2449 if (args.agno >= args.mp->m_sb.sb_agcount)
2450 return EFSCORRUPTED;
2452 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2453 if (args.agbno >= args.mp->m_sb.sb_agblocks)
2454 return EFSCORRUPTED;
2456 args.pag = xfs_perag_get(args.mp, args.agno);
2457 ASSERT(args.pag);
2459 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2460 if (error)
2461 goto error0;
2463 /* validate the extent size is legal now we have the agf locked */
2464 if (args.agbno + len >
2465 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2466 error = EFSCORRUPTED;
2467 goto error0;
2470 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2471 if (!error)
2472 xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0);
2473 error0:
2474 xfs_perag_put(args.pag);
2475 return error;
2478 void
2479 xfs_alloc_busy_insert(
2480 struct xfs_trans *tp,
2481 xfs_agnumber_t agno,
2482 xfs_agblock_t bno,
2483 xfs_extlen_t len,
2484 unsigned int flags)
2486 struct xfs_busy_extent *new;
2487 struct xfs_busy_extent *busyp;
2488 struct xfs_perag *pag;
2489 struct rb_node **rbp;
2490 struct rb_node *parent = NULL;
2492 new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
2493 if (!new) {
2495 * No Memory! Since it is now not possible to track the free
2496 * block, make this a synchronous transaction to insure that
2497 * the block is not reused before this transaction commits.
2499 trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len);
2500 xfs_trans_set_sync(tp);
2501 return;
2504 new->agno = agno;
2505 new->bno = bno;
2506 new->length = len;
2507 INIT_LIST_HEAD(&new->list);
2508 new->flags = flags;
2510 /* trace before insert to be able to see failed inserts */
2511 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
2513 pag = xfs_perag_get(tp->t_mountp, new->agno);
2514 spin_lock(&pag->pagb_lock);
2515 rbp = &pag->pagb_tree.rb_node;
2516 while (*rbp) {
2517 parent = *rbp;
2518 busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
2520 if (new->bno < busyp->bno) {
2521 rbp = &(*rbp)->rb_left;
2522 ASSERT(new->bno + new->length <= busyp->bno);
2523 } else if (new->bno > busyp->bno) {
2524 rbp = &(*rbp)->rb_right;
2525 ASSERT(bno >= busyp->bno + busyp->length);
2526 } else {
2527 ASSERT(0);
2531 rb_link_node(&new->rb_node, parent, rbp);
2532 rb_insert_color(&new->rb_node, &pag->pagb_tree);
2534 list_add(&new->list, &tp->t_busy);
2535 spin_unlock(&pag->pagb_lock);
2536 xfs_perag_put(pag);
2540 * Search for a busy extent within the range of the extent we are about to
2541 * allocate. You need to be holding the busy extent tree lock when calling
2542 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
2543 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
2544 * match. This is done so that a non-zero return indicates an overlap that
2545 * will require a synchronous transaction, but it can still be
2546 * used to distinguish between a partial or exact match.
2549 xfs_alloc_busy_search(
2550 struct xfs_mount *mp,
2551 xfs_agnumber_t agno,
2552 xfs_agblock_t bno,
2553 xfs_extlen_t len)
2555 struct xfs_perag *pag;
2556 struct rb_node *rbp;
2557 struct xfs_busy_extent *busyp;
2558 int match = 0;
2560 pag = xfs_perag_get(mp, agno);
2561 spin_lock(&pag->pagb_lock);
2563 rbp = pag->pagb_tree.rb_node;
2565 /* find closest start bno overlap */
2566 while (rbp) {
2567 busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
2568 if (bno < busyp->bno) {
2569 /* may overlap, but exact start block is lower */
2570 if (bno + len > busyp->bno)
2571 match = -1;
2572 rbp = rbp->rb_left;
2573 } else if (bno > busyp->bno) {
2574 /* may overlap, but exact start block is higher */
2575 if (bno < busyp->bno + busyp->length)
2576 match = -1;
2577 rbp = rbp->rb_right;
2578 } else {
2579 /* bno matches busyp, length determines exact match */
2580 match = (busyp->length == len) ? 1 : -1;
2581 break;
2584 spin_unlock(&pag->pagb_lock);
2585 xfs_perag_put(pag);
2586 return match;
2590 * The found free extent [fbno, fend] overlaps part or all of the given busy
2591 * extent. If the overlap covers the beginning, the end, or all of the busy
2592 * extent, the overlapping portion can be made unbusy and used for the
2593 * allocation. We can't split a busy extent because we can't modify a
2594 * transaction/CIL context busy list, but we can update an entries block
2595 * number or length.
2597 * Returns true if the extent can safely be reused, or false if the search
2598 * needs to be restarted.
2600 STATIC bool
2601 xfs_alloc_busy_update_extent(
2602 struct xfs_mount *mp,
2603 struct xfs_perag *pag,
2604 struct xfs_busy_extent *busyp,
2605 xfs_agblock_t fbno,
2606 xfs_extlen_t flen,
2607 bool userdata)
2609 xfs_agblock_t fend = fbno + flen;
2610 xfs_agblock_t bbno = busyp->bno;
2611 xfs_agblock_t bend = bbno + busyp->length;
2614 * This extent is currently being discarded. Give the thread
2615 * performing the discard a chance to mark the extent unbusy
2616 * and retry.
2618 if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
2619 spin_unlock(&pag->pagb_lock);
2620 delay(1);
2621 spin_lock(&pag->pagb_lock);
2622 return false;
2626 * If there is a busy extent overlapping a user allocation, we have
2627 * no choice but to force the log and retry the search.
2629 * Fortunately this does not happen during normal operation, but
2630 * only if the filesystem is very low on space and has to dip into
2631 * the AGFL for normal allocations.
2633 if (userdata)
2634 goto out_force_log;
2636 if (bbno < fbno && bend > fend) {
2638 * Case 1:
2639 * bbno bend
2640 * +BBBBBBBBBBBBBBBBB+
2641 * +---------+
2642 * fbno fend
2646 * We would have to split the busy extent to be able to track
2647 * it correct, which we cannot do because we would have to
2648 * modify the list of busy extents attached to the transaction
2649 * or CIL context, which is immutable.
2651 * Force out the log to clear the busy extent and retry the
2652 * search.
2654 goto out_force_log;
2655 } else if (bbno >= fbno && bend <= fend) {
2657 * Case 2:
2658 * bbno bend
2659 * +BBBBBBBBBBBBBBBBB+
2660 * +-----------------+
2661 * fbno fend
2663 * Case 3:
2664 * bbno bend
2665 * +BBBBBBBBBBBBBBBBB+
2666 * +--------------------------+
2667 * fbno fend
2669 * Case 4:
2670 * bbno bend
2671 * +BBBBBBBBBBBBBBBBB+
2672 * +--------------------------+
2673 * fbno fend
2675 * Case 5:
2676 * bbno bend
2677 * +BBBBBBBBBBBBBBBBB+
2678 * +-----------------------------------+
2679 * fbno fend
2684 * The busy extent is fully covered by the extent we are
2685 * allocating, and can simply be removed from the rbtree.
2686 * However we cannot remove it from the immutable list
2687 * tracking busy extents in the transaction or CIL context,
2688 * so set the length to zero to mark it invalid.
2690 * We also need to restart the busy extent search from the
2691 * tree root, because erasing the node can rearrange the
2692 * tree topology.
2694 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2695 busyp->length = 0;
2696 return false;
2697 } else if (fend < bend) {
2699 * Case 6:
2700 * bbno bend
2701 * +BBBBBBBBBBBBBBBBB+
2702 * +---------+
2703 * fbno fend
2705 * Case 7:
2706 * bbno bend
2707 * +BBBBBBBBBBBBBBBBB+
2708 * +------------------+
2709 * fbno fend
2712 busyp->bno = fend;
2713 } else if (bbno < fbno) {
2715 * Case 8:
2716 * bbno bend
2717 * +BBBBBBBBBBBBBBBBB+
2718 * +-------------+
2719 * fbno fend
2721 * Case 9:
2722 * bbno bend
2723 * +BBBBBBBBBBBBBBBBB+
2724 * +----------------------+
2725 * fbno fend
2727 busyp->length = fbno - busyp->bno;
2728 } else {
2729 ASSERT(0);
2732 trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen);
2733 return true;
2735 out_force_log:
2736 spin_unlock(&pag->pagb_lock);
2737 xfs_log_force(mp, XFS_LOG_SYNC);
2738 trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen);
2739 spin_lock(&pag->pagb_lock);
2740 return false;
2745 * For a given extent [fbno, flen], make sure we can reuse it safely.
2747 void
2748 xfs_alloc_busy_reuse(
2749 struct xfs_mount *mp,
2750 xfs_agnumber_t agno,
2751 xfs_agblock_t fbno,
2752 xfs_extlen_t flen,
2753 bool userdata)
2755 struct xfs_perag *pag;
2756 struct rb_node *rbp;
2758 ASSERT(flen > 0);
2760 pag = xfs_perag_get(mp, agno);
2761 spin_lock(&pag->pagb_lock);
2762 restart:
2763 rbp = pag->pagb_tree.rb_node;
2764 while (rbp) {
2765 struct xfs_busy_extent *busyp =
2766 rb_entry(rbp, struct xfs_busy_extent, rb_node);
2767 xfs_agblock_t bbno = busyp->bno;
2768 xfs_agblock_t bend = bbno + busyp->length;
2770 if (fbno + flen <= bbno) {
2771 rbp = rbp->rb_left;
2772 continue;
2773 } else if (fbno >= bend) {
2774 rbp = rbp->rb_right;
2775 continue;
2778 if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen,
2779 userdata))
2780 goto restart;
2782 spin_unlock(&pag->pagb_lock);
2783 xfs_perag_put(pag);
2787 * For a given extent [fbno, flen], search the busy extent list to find a
2788 * subset of the extent that is not busy. If *rlen is smaller than
2789 * args->minlen no suitable extent could be found, and the higher level
2790 * code needs to force out the log and retry the allocation.
2792 STATIC void
2793 xfs_alloc_busy_trim(
2794 struct xfs_alloc_arg *args,
2795 xfs_agblock_t bno,
2796 xfs_extlen_t len,
2797 xfs_agblock_t *rbno,
2798 xfs_extlen_t *rlen)
2800 xfs_agblock_t fbno;
2801 xfs_extlen_t flen;
2802 struct rb_node *rbp;
2804 ASSERT(len > 0);
2806 spin_lock(&args->pag->pagb_lock);
2807 restart:
2808 fbno = bno;
2809 flen = len;
2810 rbp = args->pag->pagb_tree.rb_node;
2811 while (rbp && flen >= args->minlen) {
2812 struct xfs_busy_extent *busyp =
2813 rb_entry(rbp, struct xfs_busy_extent, rb_node);
2814 xfs_agblock_t fend = fbno + flen;
2815 xfs_agblock_t bbno = busyp->bno;
2816 xfs_agblock_t bend = bbno + busyp->length;
2818 if (fend <= bbno) {
2819 rbp = rbp->rb_left;
2820 continue;
2821 } else if (fbno >= bend) {
2822 rbp = rbp->rb_right;
2823 continue;
2827 * If this is a metadata allocation, try to reuse the busy
2828 * extent instead of trimming the allocation.
2830 if (!args->userdata &&
2831 !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
2832 if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
2833 busyp, fbno, flen,
2834 false))
2835 goto restart;
2836 continue;
2839 if (bbno <= fbno) {
2840 /* start overlap */
2843 * Case 1:
2844 * bbno bend
2845 * +BBBBBBBBBBBBBBBBB+
2846 * +---------+
2847 * fbno fend
2849 * Case 2:
2850 * bbno bend
2851 * +BBBBBBBBBBBBBBBBB+
2852 * +-------------+
2853 * fbno fend
2855 * Case 3:
2856 * bbno bend
2857 * +BBBBBBBBBBBBBBBBB+
2858 * +-------------+
2859 * fbno fend
2861 * Case 4:
2862 * bbno bend
2863 * +BBBBBBBBBBBBBBBBB+
2864 * +-----------------+
2865 * fbno fend
2867 * No unbusy region in extent, return failure.
2869 if (fend <= bend)
2870 goto fail;
2873 * Case 5:
2874 * bbno bend
2875 * +BBBBBBBBBBBBBBBBB+
2876 * +----------------------+
2877 * fbno fend
2879 * Case 6:
2880 * bbno bend
2881 * +BBBBBBBBBBBBBBBBB+
2882 * +--------------------------+
2883 * fbno fend
2885 * Needs to be trimmed to:
2886 * +-------+
2887 * fbno fend
2889 fbno = bend;
2890 } else if (bend >= fend) {
2891 /* end overlap */
2894 * Case 7:
2895 * bbno bend
2896 * +BBBBBBBBBBBBBBBBB+
2897 * +------------------+
2898 * fbno fend
2900 * Case 8:
2901 * bbno bend
2902 * +BBBBBBBBBBBBBBBBB+
2903 * +--------------------------+
2904 * fbno fend
2906 * Needs to be trimmed to:
2907 * +-------+
2908 * fbno fend
2910 fend = bbno;
2911 } else {
2912 /* middle overlap */
2915 * Case 9:
2916 * bbno bend
2917 * +BBBBBBBBBBBBBBBBB+
2918 * +-----------------------------------+
2919 * fbno fend
2921 * Can be trimmed to:
2922 * +-------+ OR +-------+
2923 * fbno fend fbno fend
2925 * Backward allocation leads to significant
2926 * fragmentation of directories, which degrades
2927 * directory performance, therefore we always want to
2928 * choose the option that produces forward allocation
2929 * patterns.
2930 * Preferring the lower bno extent will make the next
2931 * request use "fend" as the start of the next
2932 * allocation; if the segment is no longer busy at
2933 * that point, we'll get a contiguous allocation, but
2934 * even if it is still busy, we will get a forward
2935 * allocation.
2936 * We try to avoid choosing the segment at "bend",
2937 * because that can lead to the next allocation
2938 * taking the segment at "fbno", which would be a
2939 * backward allocation. We only use the segment at
2940 * "fbno" if it is much larger than the current
2941 * requested size, because in that case there's a
2942 * good chance subsequent allocations will be
2943 * contiguous.
2945 if (bbno - fbno >= args->maxlen) {
2946 /* left candidate fits perfect */
2947 fend = bbno;
2948 } else if (fend - bend >= args->maxlen * 4) {
2949 /* right candidate has enough free space */
2950 fbno = bend;
2951 } else if (bbno - fbno >= args->minlen) {
2952 /* left candidate fits minimum requirement */
2953 fend = bbno;
2954 } else {
2955 goto fail;
2959 flen = fend - fbno;
2961 spin_unlock(&args->pag->pagb_lock);
2963 if (fbno != bno || flen != len) {
2964 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len,
2965 fbno, flen);
2967 *rbno = fbno;
2968 *rlen = flen;
2969 return;
2970 fail:
2972 * Return a zero extent length as failure indications. All callers
2973 * re-check if the trimmed extent satisfies the minlen requirement.
2975 spin_unlock(&args->pag->pagb_lock);
2976 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
2977 *rbno = fbno;
2978 *rlen = 0;
2981 static void
2982 xfs_alloc_busy_clear_one(
2983 struct xfs_mount *mp,
2984 struct xfs_perag *pag,
2985 struct xfs_busy_extent *busyp)
2987 if (busyp->length) {
2988 trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
2989 busyp->length);
2990 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2993 list_del_init(&busyp->list);
2994 kmem_free(busyp);
2998 * Remove all extents on the passed in list from the busy extents tree.
2999 * If do_discard is set skip extents that need to be discarded, and mark
3000 * these as undergoing a discard operation instead.
3002 void
3003 xfs_alloc_busy_clear(
3004 struct xfs_mount *mp,
3005 struct list_head *list,
3006 bool do_discard)
3008 struct xfs_busy_extent *busyp, *n;
3009 struct xfs_perag *pag = NULL;
3010 xfs_agnumber_t agno = NULLAGNUMBER;
3012 list_for_each_entry_safe(busyp, n, list, list) {
3013 if (busyp->agno != agno) {
3014 if (pag) {
3015 spin_unlock(&pag->pagb_lock);
3016 xfs_perag_put(pag);
3018 pag = xfs_perag_get(mp, busyp->agno);
3019 spin_lock(&pag->pagb_lock);
3020 agno = busyp->agno;
3023 if (do_discard && busyp->length &&
3024 !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
3025 busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
3026 else
3027 xfs_alloc_busy_clear_one(mp, pag, busyp);
3030 if (pag) {
3031 spin_unlock(&pag->pagb_lock);
3032 xfs_perag_put(pag);
3037 * Callback for list_sort to sort busy extents by the AG they reside in.
3040 xfs_busy_extent_ag_cmp(
3041 void *priv,
3042 struct list_head *a,
3043 struct list_head *b)
3045 return container_of(a, struct xfs_busy_extent, list)->agno -
3046 container_of(b, struct xfs_busy_extent, list)->agno;