2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_btree.h"
34 #include "xfs_alloc.h"
35 #include "xfs_error.h"
36 #include "xfs_trace.h"
39 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
41 #define XFSA_FIXUP_BNO_OK 1
42 #define XFSA_FIXUP_CNT_OK 2
45 * Prototypes for per-ag allocation routines
48 STATIC
int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t
*);
49 STATIC
int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t
*);
50 STATIC
int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t
*);
51 STATIC
int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t
*,
52 xfs_btree_cur_t
*, xfs_agblock_t
*, xfs_extlen_t
*, int *);
59 * Lookup the record equal to [bno, len] in the btree given by cur.
61 STATIC
int /* error */
63 struct xfs_btree_cur
*cur
, /* btree cursor */
64 xfs_agblock_t bno
, /* starting block of extent */
65 xfs_extlen_t len
, /* length of extent */
66 int *stat
) /* success/failure */
68 cur
->bc_rec
.a
.ar_startblock
= bno
;
69 cur
->bc_rec
.a
.ar_blockcount
= len
;
70 return xfs_btree_lookup(cur
, XFS_LOOKUP_EQ
, stat
);
74 * Lookup the first record greater than or equal to [bno, len]
75 * in the btree given by cur.
77 STATIC
int /* error */
79 struct xfs_btree_cur
*cur
, /* btree cursor */
80 xfs_agblock_t bno
, /* starting block of extent */
81 xfs_extlen_t len
, /* length of extent */
82 int *stat
) /* success/failure */
84 cur
->bc_rec
.a
.ar_startblock
= bno
;
85 cur
->bc_rec
.a
.ar_blockcount
= len
;
86 return xfs_btree_lookup(cur
, XFS_LOOKUP_GE
, stat
);
90 * Lookup the first record less than or equal to [bno, len]
91 * in the btree given by cur.
95 struct xfs_btree_cur
*cur
, /* btree cursor */
96 xfs_agblock_t bno
, /* starting block of extent */
97 xfs_extlen_t len
, /* length of extent */
98 int *stat
) /* success/failure */
100 cur
->bc_rec
.a
.ar_startblock
= bno
;
101 cur
->bc_rec
.a
.ar_blockcount
= len
;
102 return xfs_btree_lookup(cur
, XFS_LOOKUP_LE
, stat
);
106 * Update the record referred to by cur to the value given
108 * This either works (return 0) or gets an EFSCORRUPTED error.
110 STATIC
int /* error */
112 struct xfs_btree_cur
*cur
, /* btree cursor */
113 xfs_agblock_t bno
, /* starting block of extent */
114 xfs_extlen_t len
) /* length of extent */
116 union xfs_btree_rec rec
;
118 rec
.alloc
.ar_startblock
= cpu_to_be32(bno
);
119 rec
.alloc
.ar_blockcount
= cpu_to_be32(len
);
120 return xfs_btree_update(cur
, &rec
);
124 * Get the data from the pointed-to record.
128 struct xfs_btree_cur
*cur
, /* btree cursor */
129 xfs_agblock_t
*bno
, /* output: starting block of extent */
130 xfs_extlen_t
*len
, /* output: length of extent */
131 int *stat
) /* output: success/failure */
133 union xfs_btree_rec
*rec
;
136 error
= xfs_btree_get_rec(cur
, &rec
, stat
);
137 if (!error
&& *stat
== 1) {
138 *bno
= be32_to_cpu(rec
->alloc
.ar_startblock
);
139 *len
= be32_to_cpu(rec
->alloc
.ar_blockcount
);
145 * Compute aligned version of the found extent.
146 * Takes alignment and min length into account.
149 xfs_alloc_compute_aligned(
150 xfs_alloc_arg_t
*args
, /* allocation argument structure */
151 xfs_agblock_t foundbno
, /* starting block in found extent */
152 xfs_extlen_t foundlen
, /* length in found extent */
153 xfs_agblock_t
*resbno
, /* result block number */
154 xfs_extlen_t
*reslen
) /* result length */
160 if (args
->alignment
> 1 && foundlen
>= args
->minlen
) {
161 bno
= roundup(foundbno
, args
->alignment
);
162 diff
= bno
- foundbno
;
163 len
= diff
>= foundlen
? 0 : foundlen
- diff
;
173 * Compute best start block and diff for "near" allocations.
174 * freelen >= wantlen already checked by caller.
176 STATIC xfs_extlen_t
/* difference value (absolute) */
177 xfs_alloc_compute_diff(
178 xfs_agblock_t wantbno
, /* target starting block */
179 xfs_extlen_t wantlen
, /* target length */
180 xfs_extlen_t alignment
, /* target alignment */
181 xfs_agblock_t freebno
, /* freespace's starting block */
182 xfs_extlen_t freelen
, /* freespace's length */
183 xfs_agblock_t
*newbnop
) /* result: best start block from free */
185 xfs_agblock_t freeend
; /* end of freespace extent */
186 xfs_agblock_t newbno1
; /* return block number */
187 xfs_agblock_t newbno2
; /* other new block number */
188 xfs_extlen_t newlen1
=0; /* length with newbno1 */
189 xfs_extlen_t newlen2
=0; /* length with newbno2 */
190 xfs_agblock_t wantend
; /* end of target extent */
192 ASSERT(freelen
>= wantlen
);
193 freeend
= freebno
+ freelen
;
194 wantend
= wantbno
+ wantlen
;
195 if (freebno
>= wantbno
) {
196 if ((newbno1
= roundup(freebno
, alignment
)) >= freeend
)
197 newbno1
= NULLAGBLOCK
;
198 } else if (freeend
>= wantend
&& alignment
> 1) {
199 newbno1
= roundup(wantbno
, alignment
);
200 newbno2
= newbno1
- alignment
;
201 if (newbno1
>= freeend
)
202 newbno1
= NULLAGBLOCK
;
204 newlen1
= XFS_EXTLEN_MIN(wantlen
, freeend
- newbno1
);
205 if (newbno2
< freebno
)
206 newbno2
= NULLAGBLOCK
;
208 newlen2
= XFS_EXTLEN_MIN(wantlen
, freeend
- newbno2
);
209 if (newbno1
!= NULLAGBLOCK
&& newbno2
!= NULLAGBLOCK
) {
210 if (newlen1
< newlen2
||
211 (newlen1
== newlen2
&&
212 XFS_ABSDIFF(newbno1
, wantbno
) >
213 XFS_ABSDIFF(newbno2
, wantbno
)))
215 } else if (newbno2
!= NULLAGBLOCK
)
217 } else if (freeend
>= wantend
) {
219 } else if (alignment
> 1) {
220 newbno1
= roundup(freeend
- wantlen
, alignment
);
221 if (newbno1
> freeend
- wantlen
&&
222 newbno1
- alignment
>= freebno
)
223 newbno1
-= alignment
;
224 else if (newbno1
>= freeend
)
225 newbno1
= NULLAGBLOCK
;
227 newbno1
= freeend
- wantlen
;
229 return newbno1
== NULLAGBLOCK
? 0 : XFS_ABSDIFF(newbno1
, wantbno
);
233 * Fix up the length, based on mod and prod.
234 * len should be k * prod + mod for some k.
235 * If len is too small it is returned unchanged.
236 * If len hits maxlen it is left alone.
240 xfs_alloc_arg_t
*args
) /* allocation argument structure */
245 ASSERT(args
->mod
< args
->prod
);
247 ASSERT(rlen
>= args
->minlen
);
248 ASSERT(rlen
<= args
->maxlen
);
249 if (args
->prod
<= 1 || rlen
< args
->mod
|| rlen
== args
->maxlen
||
250 (args
->mod
== 0 && rlen
< args
->prod
))
252 k
= rlen
% args
->prod
;
256 if ((int)(rlen
= rlen
- k
- args
->mod
) < (int)args
->minlen
)
259 if ((int)(rlen
= rlen
- args
->prod
- (args
->mod
- k
)) <
263 ASSERT(rlen
>= args
->minlen
);
264 ASSERT(rlen
<= args
->maxlen
);
269 * Fix up length if there is too little space left in the a.g.
270 * Return 1 if ok, 0 if too little, should give up.
273 xfs_alloc_fix_minleft(
274 xfs_alloc_arg_t
*args
) /* allocation argument structure */
276 xfs_agf_t
*agf
; /* a.g. freelist header */
277 int diff
; /* free space difference */
279 if (args
->minleft
== 0)
281 agf
= XFS_BUF_TO_AGF(args
->agbp
);
282 diff
= be32_to_cpu(agf
->agf_freeblks
)
283 + be32_to_cpu(agf
->agf_flcount
)
284 - args
->len
- args
->minleft
;
287 args
->len
+= diff
; /* shrink the allocated space */
288 if (args
->len
>= args
->minlen
)
290 args
->agbno
= NULLAGBLOCK
;
295 * Update the two btrees, logically removing from freespace the extent
296 * starting at rbno, rlen blocks. The extent is contained within the
297 * actual (current) free extent fbno for flen blocks.
298 * Flags are passed in indicating whether the cursors are set to the
301 STATIC
int /* error code */
302 xfs_alloc_fixup_trees(
303 xfs_btree_cur_t
*cnt_cur
, /* cursor for by-size btree */
304 xfs_btree_cur_t
*bno_cur
, /* cursor for by-block btree */
305 xfs_agblock_t fbno
, /* starting block of free extent */
306 xfs_extlen_t flen
, /* length of free extent */
307 xfs_agblock_t rbno
, /* starting block of returned extent */
308 xfs_extlen_t rlen
, /* length of returned extent */
309 int flags
) /* flags, XFSA_FIXUP_... */
311 int error
; /* error code */
312 int i
; /* operation results */
313 xfs_agblock_t nfbno1
; /* first new free startblock */
314 xfs_agblock_t nfbno2
; /* second new free startblock */
315 xfs_extlen_t nflen1
=0; /* first new free length */
316 xfs_extlen_t nflen2
=0; /* second new free length */
319 * Look up the record in the by-size tree if necessary.
321 if (flags
& XFSA_FIXUP_CNT_OK
) {
323 if ((error
= xfs_alloc_get_rec(cnt_cur
, &nfbno1
, &nflen1
, &i
)))
325 XFS_WANT_CORRUPTED_RETURN(
326 i
== 1 && nfbno1
== fbno
&& nflen1
== flen
);
329 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, fbno
, flen
, &i
)))
331 XFS_WANT_CORRUPTED_RETURN(i
== 1);
334 * Look up the record in the by-block tree if necessary.
336 if (flags
& XFSA_FIXUP_BNO_OK
) {
338 if ((error
= xfs_alloc_get_rec(bno_cur
, &nfbno1
, &nflen1
, &i
)))
340 XFS_WANT_CORRUPTED_RETURN(
341 i
== 1 && nfbno1
== fbno
&& nflen1
== flen
);
344 if ((error
= xfs_alloc_lookup_eq(bno_cur
, fbno
, flen
, &i
)))
346 XFS_WANT_CORRUPTED_RETURN(i
== 1);
350 if (bno_cur
->bc_nlevels
== 1 && cnt_cur
->bc_nlevels
== 1) {
351 struct xfs_btree_block
*bnoblock
;
352 struct xfs_btree_block
*cntblock
;
354 bnoblock
= XFS_BUF_TO_BLOCK(bno_cur
->bc_bufs
[0]);
355 cntblock
= XFS_BUF_TO_BLOCK(cnt_cur
->bc_bufs
[0]);
357 XFS_WANT_CORRUPTED_RETURN(
358 bnoblock
->bb_numrecs
== cntblock
->bb_numrecs
);
363 * Deal with all four cases: the allocated record is contained
364 * within the freespace record, so we can have new freespace
365 * at either (or both) end, or no freespace remaining.
367 if (rbno
== fbno
&& rlen
== flen
)
368 nfbno1
= nfbno2
= NULLAGBLOCK
;
369 else if (rbno
== fbno
) {
370 nfbno1
= rbno
+ rlen
;
371 nflen1
= flen
- rlen
;
372 nfbno2
= NULLAGBLOCK
;
373 } else if (rbno
+ rlen
== fbno
+ flen
) {
375 nflen1
= flen
- rlen
;
376 nfbno2
= NULLAGBLOCK
;
379 nflen1
= rbno
- fbno
;
380 nfbno2
= rbno
+ rlen
;
381 nflen2
= (fbno
+ flen
) - nfbno2
;
384 * Delete the entry from the by-size btree.
386 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
388 XFS_WANT_CORRUPTED_RETURN(i
== 1);
390 * Add new by-size btree entry(s).
392 if (nfbno1
!= NULLAGBLOCK
) {
393 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, nfbno1
, nflen1
, &i
)))
395 XFS_WANT_CORRUPTED_RETURN(i
== 0);
396 if ((error
= xfs_btree_insert(cnt_cur
, &i
)))
398 XFS_WANT_CORRUPTED_RETURN(i
== 1);
400 if (nfbno2
!= NULLAGBLOCK
) {
401 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, nfbno2
, nflen2
, &i
)))
403 XFS_WANT_CORRUPTED_RETURN(i
== 0);
404 if ((error
= xfs_btree_insert(cnt_cur
, &i
)))
406 XFS_WANT_CORRUPTED_RETURN(i
== 1);
409 * Fix up the by-block btree entry(s).
411 if (nfbno1
== NULLAGBLOCK
) {
413 * No remaining freespace, just delete the by-block tree entry.
415 if ((error
= xfs_btree_delete(bno_cur
, &i
)))
417 XFS_WANT_CORRUPTED_RETURN(i
== 1);
420 * Update the by-block entry to start later|be shorter.
422 if ((error
= xfs_alloc_update(bno_cur
, nfbno1
, nflen1
)))
425 if (nfbno2
!= NULLAGBLOCK
) {
427 * 2 resulting free entries, need to add one.
429 if ((error
= xfs_alloc_lookup_eq(bno_cur
, nfbno2
, nflen2
, &i
)))
431 XFS_WANT_CORRUPTED_RETURN(i
== 0);
432 if ((error
= xfs_btree_insert(bno_cur
, &i
)))
434 XFS_WANT_CORRUPTED_RETURN(i
== 1);
440 * Read in the allocation group free block array.
442 STATIC
int /* error */
444 xfs_mount_t
*mp
, /* mount point structure */
445 xfs_trans_t
*tp
, /* transaction pointer */
446 xfs_agnumber_t agno
, /* allocation group number */
447 xfs_buf_t
**bpp
) /* buffer for the ag free block array */
449 xfs_buf_t
*bp
; /* return value */
452 ASSERT(agno
!= NULLAGNUMBER
);
453 error
= xfs_trans_read_buf(
454 mp
, tp
, mp
->m_ddev_targp
,
455 XFS_AG_DADDR(mp
, agno
, XFS_AGFL_DADDR(mp
)),
456 XFS_FSS_TO_BB(mp
, 1), 0, &bp
);
460 ASSERT(!XFS_BUF_GETERROR(bp
));
461 XFS_BUF_SET_VTYPE_REF(bp
, B_FS_AGFL
, XFS_AGFL_REF
);
467 xfs_alloc_update_counters(
468 struct xfs_trans
*tp
,
469 struct xfs_perag
*pag
,
470 struct xfs_buf
*agbp
,
473 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
475 pag
->pagf_freeblks
+= len
;
476 be32_add_cpu(&agf
->agf_freeblks
, len
);
478 xfs_trans_agblocks_delta(tp
, len
);
479 if (unlikely(be32_to_cpu(agf
->agf_freeblks
) >
480 be32_to_cpu(agf
->agf_length
)))
483 xfs_alloc_log_agf(tp
, agbp
, XFS_AGF_FREEBLKS
);
488 * Allocation group level functions.
492 * Allocate a variable extent in the allocation group agno.
493 * Type and bno are used to determine where in the allocation group the
495 * Extent's length (returned in *len) will be between minlen and maxlen,
496 * and of the form k * prod + mod unless there's nothing that large.
497 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
499 STATIC
int /* error */
500 xfs_alloc_ag_vextent(
501 xfs_alloc_arg_t
*args
) /* argument structure for allocation */
505 ASSERT(args
->minlen
> 0);
506 ASSERT(args
->maxlen
> 0);
507 ASSERT(args
->minlen
<= args
->maxlen
);
508 ASSERT(args
->mod
< args
->prod
);
509 ASSERT(args
->alignment
> 0);
511 * Branch to correct routine based on the type.
514 switch (args
->type
) {
515 case XFS_ALLOCTYPE_THIS_AG
:
516 error
= xfs_alloc_ag_vextent_size(args
);
518 case XFS_ALLOCTYPE_NEAR_BNO
:
519 error
= xfs_alloc_ag_vextent_near(args
);
521 case XFS_ALLOCTYPE_THIS_BNO
:
522 error
= xfs_alloc_ag_vextent_exact(args
);
529 if (error
|| args
->agbno
== NULLAGBLOCK
)
532 ASSERT(args
->len
>= args
->minlen
);
533 ASSERT(args
->len
<= args
->maxlen
);
534 ASSERT(!args
->wasfromfl
|| !args
->isfl
);
535 ASSERT(args
->agbno
% args
->alignment
== 0);
537 if (!args
->wasfromfl
) {
538 error
= xfs_alloc_update_counters(args
->tp
, args
->pag
,
540 -((long)(args
->len
)));
545 * Search the busylist for these blocks and mark the
546 * transaction as synchronous if blocks are found. This
547 * avoids the need to block due to a synchronous log
548 * force to ensure correct ordering as the synchronous
549 * transaction will guarantee that for us.
551 if (xfs_alloc_busy_search(args
->mp
, args
->agno
,
552 args
->agbno
, args
->len
))
553 xfs_trans_set_sync(args
->tp
);
557 xfs_trans_mod_sb(args
->tp
, args
->wasdel
?
558 XFS_TRANS_SB_RES_FDBLOCKS
:
559 XFS_TRANS_SB_FDBLOCKS
,
560 -((long)(args
->len
)));
563 XFS_STATS_INC(xs_allocx
);
564 XFS_STATS_ADD(xs_allocb
, args
->len
);
569 * Allocate a variable extent at exactly agno/bno.
570 * Extent's length (returned in *len) will be between minlen and maxlen,
571 * and of the form k * prod + mod unless there's nothing that large.
572 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
574 STATIC
int /* error */
575 xfs_alloc_ag_vextent_exact(
576 xfs_alloc_arg_t
*args
) /* allocation argument structure */
578 xfs_btree_cur_t
*bno_cur
;/* by block-number btree cursor */
579 xfs_btree_cur_t
*cnt_cur
;/* by count btree cursor */
580 xfs_agblock_t end
; /* end of allocated extent */
582 xfs_agblock_t fbno
; /* start block of found extent */
583 xfs_agblock_t fend
; /* end block of found extent */
584 xfs_extlen_t flen
; /* length of found extent */
585 int i
; /* success/failure of operation */
586 xfs_agblock_t maxend
; /* end of maximal extent */
587 xfs_agblock_t minend
; /* end of minimal extent */
588 xfs_extlen_t rlen
; /* length of returned extent */
590 ASSERT(args
->alignment
== 1);
593 * Allocate/initialize a cursor for the by-number freespace btree.
595 bno_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
596 args
->agno
, XFS_BTNUM_BNO
);
599 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
600 * Look for the closest free block <= bno, it must contain bno
601 * if any free block does.
603 error
= xfs_alloc_lookup_le(bno_cur
, args
->agbno
, args
->minlen
, &i
);
610 * Grab the freespace record.
612 error
= xfs_alloc_get_rec(bno_cur
, &fbno
, &flen
, &i
);
615 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
616 ASSERT(fbno
<= args
->agbno
);
617 minend
= args
->agbno
+ args
->minlen
;
618 maxend
= args
->agbno
+ args
->maxlen
;
622 * Give up if the freespace isn't long enough for the minimum request.
628 * End of extent will be smaller of the freespace end and the
629 * maximal requested end.
631 * Fix the length according to mod and prod if given.
633 end
= XFS_AGBLOCK_MIN(fend
, maxend
);
634 args
->len
= end
- args
->agbno
;
635 xfs_alloc_fix_len(args
);
636 if (!xfs_alloc_fix_minleft(args
))
640 ASSERT(args
->agbno
+ rlen
<= fend
);
641 end
= args
->agbno
+ rlen
;
644 * We are allocating agbno for rlen [agbno .. end]
645 * Allocate/initialize a cursor for the by-size btree.
647 cnt_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
648 args
->agno
, XFS_BTNUM_CNT
);
649 ASSERT(args
->agbno
+ args
->len
<=
650 be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
));
651 error
= xfs_alloc_fixup_trees(cnt_cur
, bno_cur
, fbno
, flen
, args
->agbno
,
652 args
->len
, XFSA_FIXUP_BNO_OK
);
654 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_ERROR
);
658 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_NOERROR
);
659 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
662 trace_xfs_alloc_exact_done(args
);
666 /* Didn't find it, return null. */
667 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_NOERROR
);
668 args
->agbno
= NULLAGBLOCK
;
669 trace_xfs_alloc_exact_notfound(args
);
673 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_ERROR
);
674 trace_xfs_alloc_exact_error(args
);
679 * Search the btree in a given direction via the search cursor and compare
680 * the records found against the good extent we've already found.
683 xfs_alloc_find_best_extent(
684 struct xfs_alloc_arg
*args
, /* allocation argument structure */
685 struct xfs_btree_cur
**gcur
, /* good cursor */
686 struct xfs_btree_cur
**scur
, /* searching cursor */
687 xfs_agblock_t gdiff
, /* difference for search comparison */
688 xfs_agblock_t
*sbno
, /* extent found by search */
690 xfs_extlen_t
*slena
, /* aligned length */
691 int dir
) /* 0 = search right, 1 = search left */
699 /* The good extent is perfect, no need to search. */
704 * Look until we find a better one, run out of space or run off the end.
707 error
= xfs_alloc_get_rec(*scur
, sbno
, slen
, &i
);
710 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
711 xfs_alloc_compute_aligned(args
, *sbno
, *slen
, &bno
, slena
);
714 * The good extent is closer than this one.
717 if (bno
>= args
->agbno
+ gdiff
)
720 if (bno
<= args
->agbno
- gdiff
)
725 * Same distance, compare length and pick the best.
727 if (*slena
>= args
->minlen
) {
728 args
->len
= XFS_EXTLEN_MIN(*slena
, args
->maxlen
);
729 xfs_alloc_fix_len(args
);
731 sdiff
= xfs_alloc_compute_diff(args
->agbno
, args
->len
,
732 args
->alignment
, *sbno
,
736 * Choose closer size and invalidate other cursor.
744 error
= xfs_btree_increment(*scur
, 0, &i
);
746 error
= xfs_btree_decrement(*scur
, 0, &i
);
752 xfs_btree_del_cursor(*scur
, XFS_BTREE_NOERROR
);
757 xfs_btree_del_cursor(*gcur
, XFS_BTREE_NOERROR
);
762 /* caller invalidates cursors */
767 * Allocate a variable extent near bno in the allocation group agno.
768 * Extent's length (returned in len) will be between minlen and maxlen,
769 * and of the form k * prod + mod unless there's nothing that large.
770 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
772 STATIC
int /* error */
773 xfs_alloc_ag_vextent_near(
774 xfs_alloc_arg_t
*args
) /* allocation argument structure */
776 xfs_btree_cur_t
*bno_cur_gt
; /* cursor for bno btree, right side */
777 xfs_btree_cur_t
*bno_cur_lt
; /* cursor for bno btree, left side */
778 xfs_btree_cur_t
*cnt_cur
; /* cursor for count btree */
779 xfs_agblock_t gtbno
; /* start bno of right side entry */
780 xfs_agblock_t gtbnoa
; /* aligned ... */
781 xfs_extlen_t gtdiff
; /* difference to right side entry */
782 xfs_extlen_t gtlen
; /* length of right side entry */
783 xfs_extlen_t gtlena
= 0; /* aligned ... */
784 xfs_agblock_t gtnew
; /* useful start bno of right side */
785 int error
; /* error code */
786 int i
; /* result code, temporary */
787 int j
; /* result code, temporary */
788 xfs_agblock_t ltbno
; /* start bno of left side entry */
789 xfs_agblock_t ltbnoa
; /* aligned ... */
790 xfs_extlen_t ltdiff
; /* difference to left side entry */
791 xfs_extlen_t ltlen
; /* length of left side entry */
792 xfs_extlen_t ltlena
= 0; /* aligned ... */
793 xfs_agblock_t ltnew
; /* useful start bno of left side */
794 xfs_extlen_t rlen
; /* length of returned extent */
795 #if defined(DEBUG) && defined(__KERNEL__)
797 * Randomly don't execute the first algorithm.
799 int dofirst
; /* set to do first algorithm */
801 dofirst
= random32() & 1;
804 * Get a cursor for the by-size btree.
806 cnt_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
807 args
->agno
, XFS_BTNUM_CNT
);
809 bno_cur_lt
= bno_cur_gt
= NULL
;
811 * See if there are any free extents as big as maxlen.
813 if ((error
= xfs_alloc_lookup_ge(cnt_cur
, 0, args
->maxlen
, &i
)))
816 * If none, then pick up the last entry in the tree unless the
820 if ((error
= xfs_alloc_ag_vextent_small(args
, cnt_cur
, <bno
,
823 if (i
== 0 || ltlen
== 0) {
824 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
832 * If the requested extent is large wrt the freespaces available
833 * in this a.g., then the cursor will be pointing to a btree entry
834 * near the right edge of the tree. If it's in the last btree leaf
835 * block, then we just examine all the entries in that block
836 * that are big enough, and pick the best one.
837 * This is written as a while loop so we can break out of it,
838 * but we never loop back to the top.
840 while (xfs_btree_islastblock(cnt_cur
, 0)) {
844 xfs_agblock_t bnew
=0;
846 #if defined(DEBUG) && defined(__KERNEL__)
851 * Start from the entry that lookup found, sequence through
852 * all larger free blocks. If we're actually pointing at a
853 * record smaller than maxlen, go to the start of this block,
854 * and skip all those smaller than minlen.
856 if (ltlen
|| args
->alignment
> 1) {
857 cnt_cur
->bc_ptrs
[0] = 1;
859 if ((error
= xfs_alloc_get_rec(cnt_cur
, <bno
,
862 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
863 if (ltlen
>= args
->minlen
)
865 if ((error
= xfs_btree_increment(cnt_cur
, 0, &i
)))
868 ASSERT(ltlen
>= args
->minlen
);
872 i
= cnt_cur
->bc_ptrs
[0];
873 for (j
= 1, blen
= 0, bdiff
= 0;
874 !error
&& j
&& (blen
< args
->maxlen
|| bdiff
> 0);
875 error
= xfs_btree_increment(cnt_cur
, 0, &j
)) {
877 * For each entry, decide if it's better than
878 * the previous best entry.
880 if ((error
= xfs_alloc_get_rec(cnt_cur
, <bno
, <len
, &i
)))
882 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
883 xfs_alloc_compute_aligned(args
, ltbno
, ltlen
,
885 if (ltlena
< args
->minlen
)
887 args
->len
= XFS_EXTLEN_MIN(ltlena
, args
->maxlen
);
888 xfs_alloc_fix_len(args
);
889 ASSERT(args
->len
>= args
->minlen
);
890 if (args
->len
< blen
)
892 ltdiff
= xfs_alloc_compute_diff(args
->agbno
, args
->len
,
893 args
->alignment
, ltbno
, ltlen
, <new
);
894 if (ltnew
!= NULLAGBLOCK
&&
895 (args
->len
> blen
|| ltdiff
< bdiff
)) {
899 besti
= cnt_cur
->bc_ptrs
[0];
903 * It didn't work. We COULD be in a case where
904 * there's a good record somewhere, so try again.
909 * Point at the best entry, and retrieve it again.
911 cnt_cur
->bc_ptrs
[0] = besti
;
912 if ((error
= xfs_alloc_get_rec(cnt_cur
, <bno
, <len
, &i
)))
914 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
915 ASSERT(ltbno
+ ltlen
<= be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
));
917 if (!xfs_alloc_fix_minleft(args
)) {
918 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
919 trace_xfs_alloc_near_nominleft(args
);
924 * We are allocating starting at bnew for blen blocks.
927 ASSERT(bnew
>= ltbno
);
928 ASSERT(bnew
+ blen
<= ltbno
+ ltlen
);
930 * Set up a cursor for the by-bno tree.
932 bno_cur_lt
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
,
933 args
->agbp
, args
->agno
, XFS_BTNUM_BNO
);
935 * Fix up the btree entries.
937 if ((error
= xfs_alloc_fixup_trees(cnt_cur
, bno_cur_lt
, ltbno
,
938 ltlen
, bnew
, blen
, XFSA_FIXUP_CNT_OK
)))
940 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
941 xfs_btree_del_cursor(bno_cur_lt
, XFS_BTREE_NOERROR
);
943 trace_xfs_alloc_near_first(args
);
948 * Search in the by-bno tree to the left and to the right
949 * simultaneously, until in each case we find a space big enough,
950 * or run into the edge of the tree. When we run into the edge,
951 * we deallocate that cursor.
952 * If both searches succeed, we compare the two spaces and pick
954 * With alignment, it's possible for both to fail; the upper
955 * level algorithm that picks allocation groups for allocations
956 * is not supposed to do this.
959 * Allocate and initialize the cursor for the leftward search.
961 bno_cur_lt
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
962 args
->agno
, XFS_BTNUM_BNO
);
964 * Lookup <= bno to find the leftward search's starting point.
966 if ((error
= xfs_alloc_lookup_le(bno_cur_lt
, args
->agbno
, args
->maxlen
, &i
)))
970 * Didn't find anything; use this cursor for the rightward
973 bno_cur_gt
= bno_cur_lt
;
977 * Found something. Duplicate the cursor for the rightward search.
979 else if ((error
= xfs_btree_dup_cursor(bno_cur_lt
, &bno_cur_gt
)))
982 * Increment the cursor, so we will point at the entry just right
983 * of the leftward entry if any, or to the leftmost entry.
985 if ((error
= xfs_btree_increment(bno_cur_gt
, 0, &i
)))
989 * It failed, there are no rightward entries.
991 xfs_btree_del_cursor(bno_cur_gt
, XFS_BTREE_NOERROR
);
995 * Loop going left with the leftward cursor, right with the
996 * rightward cursor, until either both directions give up or
997 * we find an entry at least as big as minlen.
1001 if ((error
= xfs_alloc_get_rec(bno_cur_lt
, <bno
, <len
, &i
)))
1003 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1004 xfs_alloc_compute_aligned(args
, ltbno
, ltlen
,
1006 if (ltlena
>= args
->minlen
)
1008 if ((error
= xfs_btree_decrement(bno_cur_lt
, 0, &i
)))
1011 xfs_btree_del_cursor(bno_cur_lt
,
1017 if ((error
= xfs_alloc_get_rec(bno_cur_gt
, >bno
, >len
, &i
)))
1019 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1020 xfs_alloc_compute_aligned(args
, gtbno
, gtlen
,
1022 if (gtlena
>= args
->minlen
)
1024 if ((error
= xfs_btree_increment(bno_cur_gt
, 0, &i
)))
1027 xfs_btree_del_cursor(bno_cur_gt
,
1032 } while (bno_cur_lt
|| bno_cur_gt
);
1035 * Got both cursors still active, need to find better entry.
1037 if (bno_cur_lt
&& bno_cur_gt
) {
1038 if (ltlena
>= args
->minlen
) {
1040 * Left side is good, look for a right side entry.
1042 args
->len
= XFS_EXTLEN_MIN(ltlena
, args
->maxlen
);
1043 xfs_alloc_fix_len(args
);
1044 ltdiff
= xfs_alloc_compute_diff(args
->agbno
, args
->len
,
1045 args
->alignment
, ltbno
, ltlen
, <new
);
1047 error
= xfs_alloc_find_best_extent(args
,
1048 &bno_cur_lt
, &bno_cur_gt
,
1049 ltdiff
, >bno
, >len
, >lena
,
1050 0 /* search right */);
1052 ASSERT(gtlena
>= args
->minlen
);
1055 * Right side is good, look for a left side entry.
1057 args
->len
= XFS_EXTLEN_MIN(gtlena
, args
->maxlen
);
1058 xfs_alloc_fix_len(args
);
1059 gtdiff
= xfs_alloc_compute_diff(args
->agbno
, args
->len
,
1060 args
->alignment
, gtbno
, gtlen
, >new
);
1062 error
= xfs_alloc_find_best_extent(args
,
1063 &bno_cur_gt
, &bno_cur_lt
,
1064 gtdiff
, <bno
, <len
, <lena
,
1065 1 /* search left */);
1073 * If we couldn't get anything, give up.
1075 if (bno_cur_lt
== NULL
&& bno_cur_gt
== NULL
) {
1076 trace_xfs_alloc_size_neither(args
);
1077 args
->agbno
= NULLAGBLOCK
;
1082 * At this point we have selected a freespace entry, either to the
1083 * left or to the right. If it's on the right, copy all the
1084 * useful variables to the "left" set so we only have one
1085 * copy of this code.
1088 bno_cur_lt
= bno_cur_gt
;
1099 * Fix up the length and compute the useful address.
1101 args
->len
= XFS_EXTLEN_MIN(ltlena
, args
->maxlen
);
1102 xfs_alloc_fix_len(args
);
1103 if (!xfs_alloc_fix_minleft(args
)) {
1104 trace_xfs_alloc_near_nominleft(args
);
1105 xfs_btree_del_cursor(bno_cur_lt
, XFS_BTREE_NOERROR
);
1106 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1110 (void)xfs_alloc_compute_diff(args
->agbno
, rlen
, args
->alignment
, ltbno
,
1112 ASSERT(ltnew
>= ltbno
);
1113 ASSERT(ltnew
+ rlen
<= ltbno
+ ltlen
);
1114 ASSERT(ltnew
+ rlen
<= be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
));
1115 args
->agbno
= ltnew
;
1116 if ((error
= xfs_alloc_fixup_trees(cnt_cur
, bno_cur_lt
, ltbno
, ltlen
,
1117 ltnew
, rlen
, XFSA_FIXUP_BNO_OK
)))
1121 trace_xfs_alloc_near_greater(args
);
1123 trace_xfs_alloc_near_lesser(args
);
1125 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1126 xfs_btree_del_cursor(bno_cur_lt
, XFS_BTREE_NOERROR
);
1130 trace_xfs_alloc_near_error(args
);
1131 if (cnt_cur
!= NULL
)
1132 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_ERROR
);
1133 if (bno_cur_lt
!= NULL
)
1134 xfs_btree_del_cursor(bno_cur_lt
, XFS_BTREE_ERROR
);
1135 if (bno_cur_gt
!= NULL
)
1136 xfs_btree_del_cursor(bno_cur_gt
, XFS_BTREE_ERROR
);
1141 * Allocate a variable extent anywhere in the allocation group agno.
1142 * Extent's length (returned in len) will be between minlen and maxlen,
1143 * and of the form k * prod + mod unless there's nothing that large.
1144 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1146 STATIC
int /* error */
1147 xfs_alloc_ag_vextent_size(
1148 xfs_alloc_arg_t
*args
) /* allocation argument structure */
1150 xfs_btree_cur_t
*bno_cur
; /* cursor for bno btree */
1151 xfs_btree_cur_t
*cnt_cur
; /* cursor for cnt btree */
1152 int error
; /* error result */
1153 xfs_agblock_t fbno
; /* start of found freespace */
1154 xfs_extlen_t flen
; /* length of found freespace */
1155 int i
; /* temp status variable */
1156 xfs_agblock_t rbno
; /* returned block number */
1157 xfs_extlen_t rlen
; /* length of returned extent */
1160 * Allocate and initialize a cursor for the by-size btree.
1162 cnt_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
1163 args
->agno
, XFS_BTNUM_CNT
);
1166 * Look for an entry >= maxlen+alignment-1 blocks.
1168 if ((error
= xfs_alloc_lookup_ge(cnt_cur
, 0,
1169 args
->maxlen
+ args
->alignment
- 1, &i
)))
1172 * If none, then pick up the last entry in the tree unless the
1176 if ((error
= xfs_alloc_ag_vextent_small(args
, cnt_cur
, &fbno
,
1179 if (i
== 0 || flen
== 0) {
1180 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1181 trace_xfs_alloc_size_noentry(args
);
1187 * There's a freespace as big as maxlen+alignment-1, get it.
1190 if ((error
= xfs_alloc_get_rec(cnt_cur
, &fbno
, &flen
, &i
)))
1192 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1195 * In the first case above, we got the last entry in the
1196 * by-size btree. Now we check to see if the space hits maxlen
1197 * once aligned; if not, we search left for something better.
1198 * This can't happen in the second case above.
1200 xfs_alloc_compute_aligned(args
, fbno
, flen
, &rbno
, &rlen
);
1201 rlen
= XFS_EXTLEN_MIN(args
->maxlen
, rlen
);
1202 XFS_WANT_CORRUPTED_GOTO(rlen
== 0 ||
1203 (rlen
<= flen
&& rbno
+ rlen
<= fbno
+ flen
), error0
);
1204 if (rlen
< args
->maxlen
) {
1205 xfs_agblock_t bestfbno
;
1206 xfs_extlen_t bestflen
;
1207 xfs_agblock_t bestrbno
;
1208 xfs_extlen_t bestrlen
;
1215 if ((error
= xfs_btree_decrement(cnt_cur
, 0, &i
)))
1219 if ((error
= xfs_alloc_get_rec(cnt_cur
, &fbno
, &flen
,
1222 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1223 if (flen
< bestrlen
)
1225 xfs_alloc_compute_aligned(args
, fbno
, flen
,
1227 rlen
= XFS_EXTLEN_MIN(args
->maxlen
, rlen
);
1228 XFS_WANT_CORRUPTED_GOTO(rlen
== 0 ||
1229 (rlen
<= flen
&& rbno
+ rlen
<= fbno
+ flen
),
1231 if (rlen
> bestrlen
) {
1236 if (rlen
== args
->maxlen
)
1240 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, bestfbno
, bestflen
,
1243 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1249 args
->wasfromfl
= 0;
1251 * Fix up the length.
1254 xfs_alloc_fix_len(args
);
1255 if (rlen
< args
->minlen
|| !xfs_alloc_fix_minleft(args
)) {
1256 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1257 trace_xfs_alloc_size_nominleft(args
);
1258 args
->agbno
= NULLAGBLOCK
;
1262 XFS_WANT_CORRUPTED_GOTO(rlen
<= flen
, error0
);
1264 * Allocate and initialize a cursor for the by-block tree.
1266 bno_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
1267 args
->agno
, XFS_BTNUM_BNO
);
1268 if ((error
= xfs_alloc_fixup_trees(cnt_cur
, bno_cur
, fbno
, flen
,
1269 rbno
, rlen
, XFSA_FIXUP_CNT_OK
)))
1271 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1272 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_NOERROR
);
1273 cnt_cur
= bno_cur
= NULL
;
1276 XFS_WANT_CORRUPTED_GOTO(
1277 args
->agbno
+ args
->len
<=
1278 be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
),
1280 trace_xfs_alloc_size_done(args
);
1284 trace_xfs_alloc_size_error(args
);
1286 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_ERROR
);
1288 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_ERROR
);
1293 * Deal with the case where only small freespaces remain.
1294 * Either return the contents of the last freespace record,
1295 * or allocate space from the freelist if there is nothing in the tree.
1297 STATIC
int /* error */
1298 xfs_alloc_ag_vextent_small(
1299 xfs_alloc_arg_t
*args
, /* allocation argument structure */
1300 xfs_btree_cur_t
*ccur
, /* by-size cursor */
1301 xfs_agblock_t
*fbnop
, /* result block number */
1302 xfs_extlen_t
*flenp
, /* result length */
1303 int *stat
) /* status: 0-freelist, 1-normal/none */
1310 if ((error
= xfs_btree_decrement(ccur
, 0, &i
)))
1313 if ((error
= xfs_alloc_get_rec(ccur
, &fbno
, &flen
, &i
)))
1315 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1318 * Nothing in the btree, try the freelist. Make sure
1319 * to respect minleft even when pulling from the
1322 else if (args
->minlen
== 1 && args
->alignment
== 1 && !args
->isfl
&&
1323 (be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_flcount
)
1325 error
= xfs_alloc_get_freelist(args
->tp
, args
->agbp
, &fbno
, 0);
1328 if (fbno
!= NULLAGBLOCK
) {
1329 if (args
->userdata
) {
1332 bp
= xfs_btree_get_bufs(args
->mp
, args
->tp
,
1333 args
->agno
, fbno
, 0);
1334 xfs_trans_binval(args
->tp
, bp
);
1338 XFS_WANT_CORRUPTED_GOTO(
1339 args
->agbno
+ args
->len
<=
1340 be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
),
1342 args
->wasfromfl
= 1;
1343 trace_xfs_alloc_small_freelist(args
);
1348 * Nothing in the freelist.
1354 * Can't allocate from the freelist for some reason.
1361 * Can't do the allocation, give up.
1363 if (flen
< args
->minlen
) {
1364 args
->agbno
= NULLAGBLOCK
;
1365 trace_xfs_alloc_small_notenough(args
);
1371 trace_xfs_alloc_small_done(args
);
1375 trace_xfs_alloc_small_error(args
);
1380 * Free the extent starting at agno/bno for length.
1382 STATIC
int /* error */
1384 xfs_trans_t
*tp
, /* transaction pointer */
1385 xfs_buf_t
*agbp
, /* buffer for a.g. freelist header */
1386 xfs_agnumber_t agno
, /* allocation group number */
1387 xfs_agblock_t bno
, /* starting block number */
1388 xfs_extlen_t len
, /* length of extent */
1389 int isfl
) /* set if is freelist blocks - no sb acctg */
1391 xfs_btree_cur_t
*bno_cur
; /* cursor for by-block btree */
1392 xfs_btree_cur_t
*cnt_cur
; /* cursor for by-size btree */
1393 int error
; /* error return value */
1394 xfs_agblock_t gtbno
; /* start of right neighbor block */
1395 xfs_extlen_t gtlen
; /* length of right neighbor block */
1396 int haveleft
; /* have a left neighbor block */
1397 int haveright
; /* have a right neighbor block */
1398 int i
; /* temp, result code */
1399 xfs_agblock_t ltbno
; /* start of left neighbor block */
1400 xfs_extlen_t ltlen
; /* length of left neighbor block */
1401 xfs_mount_t
*mp
; /* mount point struct for filesystem */
1402 xfs_agblock_t nbno
; /* new starting block of freespace */
1403 xfs_extlen_t nlen
; /* new length of freespace */
1404 xfs_perag_t
*pag
; /* per allocation group data */
1408 * Allocate and initialize a cursor for the by-block btree.
1410 bno_cur
= xfs_allocbt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_BNO
);
1413 * Look for a neighboring block on the left (lower block numbers)
1414 * that is contiguous with this space.
1416 if ((error
= xfs_alloc_lookup_le(bno_cur
, bno
, len
, &haveleft
)))
1420 * There is a block to our left.
1422 if ((error
= xfs_alloc_get_rec(bno_cur
, <bno
, <len
, &i
)))
1424 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1426 * It's not contiguous, though.
1428 if (ltbno
+ ltlen
< bno
)
1432 * If this failure happens the request to free this
1433 * space was invalid, it's (partly) already free.
1436 XFS_WANT_CORRUPTED_GOTO(ltbno
+ ltlen
<= bno
, error0
);
1440 * Look for a neighboring block on the right (higher block numbers)
1441 * that is contiguous with this space.
1443 if ((error
= xfs_btree_increment(bno_cur
, 0, &haveright
)))
1447 * There is a block to our right.
1449 if ((error
= xfs_alloc_get_rec(bno_cur
, >bno
, >len
, &i
)))
1451 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1453 * It's not contiguous, though.
1455 if (bno
+ len
< gtbno
)
1459 * If this failure happens the request to free this
1460 * space was invalid, it's (partly) already free.
1463 XFS_WANT_CORRUPTED_GOTO(gtbno
>= bno
+ len
, error0
);
1467 * Now allocate and initialize a cursor for the by-size tree.
1469 cnt_cur
= xfs_allocbt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_CNT
);
1471 * Have both left and right contiguous neighbors.
1472 * Merge all three into a single free block.
1474 if (haveleft
&& haveright
) {
1476 * Delete the old by-size entry on the left.
1478 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, ltbno
, ltlen
, &i
)))
1480 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1481 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
1483 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1485 * Delete the old by-size entry on the right.
1487 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, gtbno
, gtlen
, &i
)))
1489 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1490 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
1492 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1494 * Delete the old by-block entry for the right block.
1496 if ((error
= xfs_btree_delete(bno_cur
, &i
)))
1498 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1500 * Move the by-block cursor back to the left neighbor.
1502 if ((error
= xfs_btree_decrement(bno_cur
, 0, &i
)))
1504 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1507 * Check that this is the right record: delete didn't
1508 * mangle the cursor.
1511 xfs_agblock_t xxbno
;
1514 if ((error
= xfs_alloc_get_rec(bno_cur
, &xxbno
, &xxlen
,
1517 XFS_WANT_CORRUPTED_GOTO(
1518 i
== 1 && xxbno
== ltbno
&& xxlen
== ltlen
,
1523 * Update remaining by-block entry to the new, joined block.
1526 nlen
= len
+ ltlen
+ gtlen
;
1527 if ((error
= xfs_alloc_update(bno_cur
, nbno
, nlen
)))
1531 * Have only a left contiguous neighbor.
1532 * Merge it together with the new freespace.
1534 else if (haveleft
) {
1536 * Delete the old by-size entry on the left.
1538 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, ltbno
, ltlen
, &i
)))
1540 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1541 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
1543 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1545 * Back up the by-block cursor to the left neighbor, and
1546 * update its length.
1548 if ((error
= xfs_btree_decrement(bno_cur
, 0, &i
)))
1550 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1553 if ((error
= xfs_alloc_update(bno_cur
, nbno
, nlen
)))
1557 * Have only a right contiguous neighbor.
1558 * Merge it together with the new freespace.
1560 else if (haveright
) {
1562 * Delete the old by-size entry on the right.
1564 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, gtbno
, gtlen
, &i
)))
1566 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1567 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
1569 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1571 * Update the starting block and length of the right
1572 * neighbor in the by-block tree.
1576 if ((error
= xfs_alloc_update(bno_cur
, nbno
, nlen
)))
1580 * No contiguous neighbors.
1581 * Insert the new freespace into the by-block tree.
1586 if ((error
= xfs_btree_insert(bno_cur
, &i
)))
1588 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1590 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_NOERROR
);
1593 * In all cases we need to insert the new freespace in the by-size tree.
1595 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, nbno
, nlen
, &i
)))
1597 XFS_WANT_CORRUPTED_GOTO(i
== 0, error0
);
1598 if ((error
= xfs_btree_insert(cnt_cur
, &i
)))
1600 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1601 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1605 * Update the freespace totals in the ag and superblock.
1607 pag
= xfs_perag_get(mp
, agno
);
1608 error
= xfs_alloc_update_counters(tp
, pag
, agbp
, len
);
1614 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_FDBLOCKS
, (long)len
);
1615 XFS_STATS_INC(xs_freex
);
1616 XFS_STATS_ADD(xs_freeb
, len
);
1618 trace_xfs_free_extent(mp
, agno
, bno
, len
, isfl
, haveleft
, haveright
);
1621 * Since blocks move to the free list without the coordination
1622 * used in xfs_bmap_finish, we can't allow block to be available
1623 * for reallocation and non-transaction writing (user data)
1624 * until we know that the transaction that moved it to the free
1625 * list is permanently on disk. We track the blocks by declaring
1626 * these blocks as "busy"; the busy list is maintained on a per-ag
1627 * basis and each transaction records which entries should be removed
1628 * when the iclog commits to disk. If a busy block is allocated,
1629 * the iclog is pushed up to the LSN that freed the block.
1631 xfs_alloc_busy_insert(tp
, agno
, bno
, len
);
1635 trace_xfs_free_extent(mp
, agno
, bno
, len
, isfl
, -1, -1);
1637 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_ERROR
);
1639 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_ERROR
);
1644 * Visible (exported) allocation/free functions.
1645 * Some of these are used just by xfs_alloc_btree.c and this file.
1649 * Compute and fill in value of m_ag_maxlevels.
1652 xfs_alloc_compute_maxlevels(
1653 xfs_mount_t
*mp
) /* file system mount structure */
1661 maxleafents
= (mp
->m_sb
.sb_agblocks
+ 1) / 2;
1662 minleafrecs
= mp
->m_alloc_mnr
[0];
1663 minnoderecs
= mp
->m_alloc_mnr
[1];
1664 maxblocks
= (maxleafents
+ minleafrecs
- 1) / minleafrecs
;
1665 for (level
= 1; maxblocks
> 1; level
++)
1666 maxblocks
= (maxblocks
+ minnoderecs
- 1) / minnoderecs
;
1667 mp
->m_ag_maxlevels
= level
;
1671 * Find the length of the longest extent in an AG.
1674 xfs_alloc_longest_free_extent(
1675 struct xfs_mount
*mp
,
1676 struct xfs_perag
*pag
)
1678 xfs_extlen_t need
, delta
= 0;
1680 need
= XFS_MIN_FREELIST_PAG(pag
, mp
);
1681 if (need
> pag
->pagf_flcount
)
1682 delta
= need
- pag
->pagf_flcount
;
1684 if (pag
->pagf_longest
> delta
)
1685 return pag
->pagf_longest
- delta
;
1686 return pag
->pagf_flcount
> 0 || pag
->pagf_longest
> 0;
1690 * Decide whether to use this allocation group for this allocation.
1691 * If so, fix up the btree freelist's size.
1693 STATIC
int /* error */
1694 xfs_alloc_fix_freelist(
1695 xfs_alloc_arg_t
*args
, /* allocation argument structure */
1696 int flags
) /* XFS_ALLOC_FLAG_... */
1698 xfs_buf_t
*agbp
; /* agf buffer pointer */
1699 xfs_agf_t
*agf
; /* a.g. freespace structure pointer */
1700 xfs_buf_t
*agflbp
;/* agfl buffer pointer */
1701 xfs_agblock_t bno
; /* freelist block */
1702 xfs_extlen_t delta
; /* new blocks needed in freelist */
1703 int error
; /* error result code */
1704 xfs_extlen_t longest
;/* longest extent in allocation group */
1705 xfs_mount_t
*mp
; /* file system mount point structure */
1706 xfs_extlen_t need
; /* total blocks needed in freelist */
1707 xfs_perag_t
*pag
; /* per-ag information structure */
1708 xfs_alloc_arg_t targs
; /* local allocation arguments */
1709 xfs_trans_t
*tp
; /* transaction pointer */
1715 if (!pag
->pagf_init
) {
1716 if ((error
= xfs_alloc_read_agf(mp
, tp
, args
->agno
, flags
,
1719 if (!pag
->pagf_init
) {
1720 ASSERT(flags
& XFS_ALLOC_FLAG_TRYLOCK
);
1721 ASSERT(!(flags
& XFS_ALLOC_FLAG_FREEING
));
1729 * If this is a metadata preferred pag and we are user data
1730 * then try somewhere else if we are not being asked to
1731 * try harder at this point
1733 if (pag
->pagf_metadata
&& args
->userdata
&&
1734 (flags
& XFS_ALLOC_FLAG_TRYLOCK
)) {
1735 ASSERT(!(flags
& XFS_ALLOC_FLAG_FREEING
));
1740 if (!(flags
& XFS_ALLOC_FLAG_FREEING
)) {
1742 * If it looks like there isn't a long enough extent, or enough
1743 * total blocks, reject it.
1745 need
= XFS_MIN_FREELIST_PAG(pag
, mp
);
1746 longest
= xfs_alloc_longest_free_extent(mp
, pag
);
1747 if ((args
->minlen
+ args
->alignment
+ args
->minalignslop
- 1) >
1749 ((int)(pag
->pagf_freeblks
+ pag
->pagf_flcount
-
1750 need
- args
->total
) < (int)args
->minleft
)) {
1752 xfs_trans_brelse(tp
, agbp
);
1759 * Get the a.g. freespace buffer.
1760 * Can fail if we're not blocking on locks, and it's held.
1763 if ((error
= xfs_alloc_read_agf(mp
, tp
, args
->agno
, flags
,
1767 ASSERT(flags
& XFS_ALLOC_FLAG_TRYLOCK
);
1768 ASSERT(!(flags
& XFS_ALLOC_FLAG_FREEING
));
1774 * Figure out how many blocks we should have in the freelist.
1776 agf
= XFS_BUF_TO_AGF(agbp
);
1777 need
= XFS_MIN_FREELIST(agf
, mp
);
1779 * If there isn't enough total or single-extent, reject it.
1781 if (!(flags
& XFS_ALLOC_FLAG_FREEING
)) {
1782 delta
= need
> be32_to_cpu(agf
->agf_flcount
) ?
1783 (need
- be32_to_cpu(agf
->agf_flcount
)) : 0;
1784 longest
= be32_to_cpu(agf
->agf_longest
);
1785 longest
= (longest
> delta
) ? (longest
- delta
) :
1786 (be32_to_cpu(agf
->agf_flcount
) > 0 || longest
> 0);
1787 if ((args
->minlen
+ args
->alignment
+ args
->minalignslop
- 1) >
1789 ((int)(be32_to_cpu(agf
->agf_freeblks
) +
1790 be32_to_cpu(agf
->agf_flcount
) - need
- args
->total
) <
1791 (int)args
->minleft
)) {
1792 xfs_trans_brelse(tp
, agbp
);
1798 * Make the freelist shorter if it's too long.
1800 while (be32_to_cpu(agf
->agf_flcount
) > need
) {
1803 error
= xfs_alloc_get_freelist(tp
, agbp
, &bno
, 0);
1806 if ((error
= xfs_free_ag_extent(tp
, agbp
, args
->agno
, bno
, 1, 1)))
1808 bp
= xfs_btree_get_bufs(mp
, tp
, args
->agno
, bno
, 0);
1809 xfs_trans_binval(tp
, bp
);
1812 * Initialize the args structure.
1817 targs
.agno
= args
->agno
;
1818 targs
.mod
= targs
.minleft
= targs
.wasdel
= targs
.userdata
=
1819 targs
.minalignslop
= 0;
1820 targs
.alignment
= targs
.minlen
= targs
.prod
= targs
.isfl
= 1;
1821 targs
.type
= XFS_ALLOCTYPE_THIS_AG
;
1823 if ((error
= xfs_alloc_read_agfl(mp
, tp
, targs
.agno
, &agflbp
)))
1826 * Make the freelist longer if it's too short.
1828 while (be32_to_cpu(agf
->agf_flcount
) < need
) {
1830 targs
.maxlen
= need
- be32_to_cpu(agf
->agf_flcount
);
1832 * Allocate as many blocks as possible at once.
1834 if ((error
= xfs_alloc_ag_vextent(&targs
))) {
1835 xfs_trans_brelse(tp
, agflbp
);
1839 * Stop if we run out. Won't happen if callers are obeying
1840 * the restrictions correctly. Can happen for free calls
1841 * on a completely full ag.
1843 if (targs
.agbno
== NULLAGBLOCK
) {
1844 if (flags
& XFS_ALLOC_FLAG_FREEING
)
1846 xfs_trans_brelse(tp
, agflbp
);
1851 * Put each allocated block on the list.
1853 for (bno
= targs
.agbno
; bno
< targs
.agbno
+ targs
.len
; bno
++) {
1854 error
= xfs_alloc_put_freelist(tp
, agbp
,
1860 xfs_trans_brelse(tp
, agflbp
);
1866 * Get a block from the freelist.
1867 * Returns with the buffer for the block gotten.
1870 xfs_alloc_get_freelist(
1871 xfs_trans_t
*tp
, /* transaction pointer */
1872 xfs_buf_t
*agbp
, /* buffer containing the agf structure */
1873 xfs_agblock_t
*bnop
, /* block address retrieved from freelist */
1874 int btreeblk
) /* destination is a AGF btree */
1876 xfs_agf_t
*agf
; /* a.g. freespace structure */
1877 xfs_agfl_t
*agfl
; /* a.g. freelist structure */
1878 xfs_buf_t
*agflbp
;/* buffer for a.g. freelist structure */
1879 xfs_agblock_t bno
; /* block number returned */
1882 xfs_mount_t
*mp
; /* mount structure */
1883 xfs_perag_t
*pag
; /* per allocation group data */
1885 agf
= XFS_BUF_TO_AGF(agbp
);
1887 * Freelist is empty, give up.
1889 if (!agf
->agf_flcount
) {
1890 *bnop
= NULLAGBLOCK
;
1894 * Read the array of free blocks.
1897 if ((error
= xfs_alloc_read_agfl(mp
, tp
,
1898 be32_to_cpu(agf
->agf_seqno
), &agflbp
)))
1900 agfl
= XFS_BUF_TO_AGFL(agflbp
);
1902 * Get the block number and update the data structures.
1904 bno
= be32_to_cpu(agfl
->agfl_bno
[be32_to_cpu(agf
->agf_flfirst
)]);
1905 be32_add_cpu(&agf
->agf_flfirst
, 1);
1906 xfs_trans_brelse(tp
, agflbp
);
1907 if (be32_to_cpu(agf
->agf_flfirst
) == XFS_AGFL_SIZE(mp
))
1908 agf
->agf_flfirst
= 0;
1910 pag
= xfs_perag_get(mp
, be32_to_cpu(agf
->agf_seqno
));
1911 be32_add_cpu(&agf
->agf_flcount
, -1);
1912 xfs_trans_agflist_delta(tp
, -1);
1913 pag
->pagf_flcount
--;
1916 logflags
= XFS_AGF_FLFIRST
| XFS_AGF_FLCOUNT
;
1918 be32_add_cpu(&agf
->agf_btreeblks
, 1);
1919 pag
->pagf_btreeblks
++;
1920 logflags
|= XFS_AGF_BTREEBLKS
;
1923 xfs_alloc_log_agf(tp
, agbp
, logflags
);
1927 * As blocks are freed, they are added to the per-ag busy list and
1928 * remain there until the freeing transaction is committed to disk.
1929 * Now that we have allocated blocks, this list must be searched to see
1930 * if a block is being reused. If one is, then the freeing transaction
1931 * must be pushed to disk before this transaction.
1933 * We do this by setting the current transaction to a sync transaction
1934 * which guarantees that the freeing transaction is on disk before this
1935 * transaction. This is done instead of a synchronous log force here so
1936 * that we don't sit and wait with the AGF locked in the transaction
1937 * during the log force.
1939 if (xfs_alloc_busy_search(mp
, be32_to_cpu(agf
->agf_seqno
), bno
, 1))
1940 xfs_trans_set_sync(tp
);
1945 * Log the given fields from the agf structure.
1949 xfs_trans_t
*tp
, /* transaction pointer */
1950 xfs_buf_t
*bp
, /* buffer for a.g. freelist header */
1951 int fields
) /* mask of fields to be logged (XFS_AGF_...) */
1953 int first
; /* first byte offset */
1954 int last
; /* last byte offset */
1955 static const short offsets
[] = {
1956 offsetof(xfs_agf_t
, agf_magicnum
),
1957 offsetof(xfs_agf_t
, agf_versionnum
),
1958 offsetof(xfs_agf_t
, agf_seqno
),
1959 offsetof(xfs_agf_t
, agf_length
),
1960 offsetof(xfs_agf_t
, agf_roots
[0]),
1961 offsetof(xfs_agf_t
, agf_levels
[0]),
1962 offsetof(xfs_agf_t
, agf_flfirst
),
1963 offsetof(xfs_agf_t
, agf_fllast
),
1964 offsetof(xfs_agf_t
, agf_flcount
),
1965 offsetof(xfs_agf_t
, agf_freeblks
),
1966 offsetof(xfs_agf_t
, agf_longest
),
1967 offsetof(xfs_agf_t
, agf_btreeblks
),
1971 trace_xfs_agf(tp
->t_mountp
, XFS_BUF_TO_AGF(bp
), fields
, _RET_IP_
);
1973 xfs_btree_offsets(fields
, offsets
, XFS_AGF_NUM_BITS
, &first
, &last
);
1974 xfs_trans_log_buf(tp
, bp
, (uint
)first
, (uint
)last
);
1978 * Interface for inode allocation to force the pag data to be initialized.
1981 xfs_alloc_pagf_init(
1982 xfs_mount_t
*mp
, /* file system mount structure */
1983 xfs_trans_t
*tp
, /* transaction pointer */
1984 xfs_agnumber_t agno
, /* allocation group number */
1985 int flags
) /* XFS_ALLOC_FLAGS_... */
1990 if ((error
= xfs_alloc_read_agf(mp
, tp
, agno
, flags
, &bp
)))
1993 xfs_trans_brelse(tp
, bp
);
1998 * Put the block on the freelist for the allocation group.
2001 xfs_alloc_put_freelist(
2002 xfs_trans_t
*tp
, /* transaction pointer */
2003 xfs_buf_t
*agbp
, /* buffer for a.g. freelist header */
2004 xfs_buf_t
*agflbp
,/* buffer for a.g. free block array */
2005 xfs_agblock_t bno
, /* block being freed */
2006 int btreeblk
) /* block came from a AGF btree */
2008 xfs_agf_t
*agf
; /* a.g. freespace structure */
2009 xfs_agfl_t
*agfl
; /* a.g. free block array */
2010 __be32
*blockp
;/* pointer to array entry */
2013 xfs_mount_t
*mp
; /* mount structure */
2014 xfs_perag_t
*pag
; /* per allocation group data */
2016 agf
= XFS_BUF_TO_AGF(agbp
);
2019 if (!agflbp
&& (error
= xfs_alloc_read_agfl(mp
, tp
,
2020 be32_to_cpu(agf
->agf_seqno
), &agflbp
)))
2022 agfl
= XFS_BUF_TO_AGFL(agflbp
);
2023 be32_add_cpu(&agf
->agf_fllast
, 1);
2024 if (be32_to_cpu(agf
->agf_fllast
) == XFS_AGFL_SIZE(mp
))
2025 agf
->agf_fllast
= 0;
2027 pag
= xfs_perag_get(mp
, be32_to_cpu(agf
->agf_seqno
));
2028 be32_add_cpu(&agf
->agf_flcount
, 1);
2029 xfs_trans_agflist_delta(tp
, 1);
2030 pag
->pagf_flcount
++;
2032 logflags
= XFS_AGF_FLLAST
| XFS_AGF_FLCOUNT
;
2034 be32_add_cpu(&agf
->agf_btreeblks
, -1);
2035 pag
->pagf_btreeblks
--;
2036 logflags
|= XFS_AGF_BTREEBLKS
;
2040 xfs_alloc_log_agf(tp
, agbp
, logflags
);
2042 ASSERT(be32_to_cpu(agf
->agf_flcount
) <= XFS_AGFL_SIZE(mp
));
2043 blockp
= &agfl
->agfl_bno
[be32_to_cpu(agf
->agf_fllast
)];
2044 *blockp
= cpu_to_be32(bno
);
2045 xfs_alloc_log_agf(tp
, agbp
, logflags
);
2046 xfs_trans_log_buf(tp
, agflbp
,
2047 (int)((xfs_caddr_t
)blockp
- (xfs_caddr_t
)agfl
),
2048 (int)((xfs_caddr_t
)blockp
- (xfs_caddr_t
)agfl
+
2049 sizeof(xfs_agblock_t
) - 1));
2054 * Read in the allocation group header (free/alloc section).
2058 struct xfs_mount
*mp
, /* mount point structure */
2059 struct xfs_trans
*tp
, /* transaction pointer */
2060 xfs_agnumber_t agno
, /* allocation group number */
2061 int flags
, /* XFS_BUF_ */
2062 struct xfs_buf
**bpp
) /* buffer for the ag freelist header */
2064 struct xfs_agf
*agf
; /* ag freelist header */
2065 int agf_ok
; /* set if agf is consistent */
2068 ASSERT(agno
!= NULLAGNUMBER
);
2069 error
= xfs_trans_read_buf(
2070 mp
, tp
, mp
->m_ddev_targp
,
2071 XFS_AG_DADDR(mp
, agno
, XFS_AGF_DADDR(mp
)),
2072 XFS_FSS_TO_BB(mp
, 1), flags
, bpp
);
2078 ASSERT(!XFS_BUF_GETERROR(*bpp
));
2079 agf
= XFS_BUF_TO_AGF(*bpp
);
2082 * Validate the magic number of the agf block.
2085 be32_to_cpu(agf
->agf_magicnum
) == XFS_AGF_MAGIC
&&
2086 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf
->agf_versionnum
)) &&
2087 be32_to_cpu(agf
->agf_freeblks
) <= be32_to_cpu(agf
->agf_length
) &&
2088 be32_to_cpu(agf
->agf_flfirst
) < XFS_AGFL_SIZE(mp
) &&
2089 be32_to_cpu(agf
->agf_fllast
) < XFS_AGFL_SIZE(mp
) &&
2090 be32_to_cpu(agf
->agf_flcount
) <= XFS_AGFL_SIZE(mp
) &&
2091 be32_to_cpu(agf
->agf_seqno
) == agno
;
2092 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
2093 agf_ok
= agf_ok
&& be32_to_cpu(agf
->agf_btreeblks
) <=
2094 be32_to_cpu(agf
->agf_length
);
2095 if (unlikely(XFS_TEST_ERROR(!agf_ok
, mp
, XFS_ERRTAG_ALLOC_READ_AGF
,
2096 XFS_RANDOM_ALLOC_READ_AGF
))) {
2097 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2098 XFS_ERRLEVEL_LOW
, mp
, agf
);
2099 xfs_trans_brelse(tp
, *bpp
);
2100 return XFS_ERROR(EFSCORRUPTED
);
2102 XFS_BUF_SET_VTYPE_REF(*bpp
, B_FS_AGF
, XFS_AGF_REF
);
2107 * Read in the allocation group header (free/alloc section).
2111 struct xfs_mount
*mp
, /* mount point structure */
2112 struct xfs_trans
*tp
, /* transaction pointer */
2113 xfs_agnumber_t agno
, /* allocation group number */
2114 int flags
, /* XFS_ALLOC_FLAG_... */
2115 struct xfs_buf
**bpp
) /* buffer for the ag freelist header */
2117 struct xfs_agf
*agf
; /* ag freelist header */
2118 struct xfs_perag
*pag
; /* per allocation group data */
2121 ASSERT(agno
!= NULLAGNUMBER
);
2123 error
= xfs_read_agf(mp
, tp
, agno
,
2124 (flags
& XFS_ALLOC_FLAG_TRYLOCK
) ? XBF_TRYLOCK
: 0,
2130 ASSERT(!XFS_BUF_GETERROR(*bpp
));
2132 agf
= XFS_BUF_TO_AGF(*bpp
);
2133 pag
= xfs_perag_get(mp
, agno
);
2134 if (!pag
->pagf_init
) {
2135 pag
->pagf_freeblks
= be32_to_cpu(agf
->agf_freeblks
);
2136 pag
->pagf_btreeblks
= be32_to_cpu(agf
->agf_btreeblks
);
2137 pag
->pagf_flcount
= be32_to_cpu(agf
->agf_flcount
);
2138 pag
->pagf_longest
= be32_to_cpu(agf
->agf_longest
);
2139 pag
->pagf_levels
[XFS_BTNUM_BNOi
] =
2140 be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_BNOi
]);
2141 pag
->pagf_levels
[XFS_BTNUM_CNTi
] =
2142 be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_CNTi
]);
2143 spin_lock_init(&pag
->pagb_lock
);
2144 pag
->pagb_count
= 0;
2145 pag
->pagb_tree
= RB_ROOT
;
2149 else if (!XFS_FORCED_SHUTDOWN(mp
)) {
2150 ASSERT(pag
->pagf_freeblks
== be32_to_cpu(agf
->agf_freeblks
));
2151 ASSERT(pag
->pagf_btreeblks
== be32_to_cpu(agf
->agf_btreeblks
));
2152 ASSERT(pag
->pagf_flcount
== be32_to_cpu(agf
->agf_flcount
));
2153 ASSERT(pag
->pagf_longest
== be32_to_cpu(agf
->agf_longest
));
2154 ASSERT(pag
->pagf_levels
[XFS_BTNUM_BNOi
] ==
2155 be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_BNOi
]));
2156 ASSERT(pag
->pagf_levels
[XFS_BTNUM_CNTi
] ==
2157 be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_CNTi
]));
2165 * Allocate an extent (variable-size).
2166 * Depending on the allocation type, we either look in a single allocation
2167 * group or loop over the allocation groups to find the result.
2171 xfs_alloc_arg_t
*args
) /* allocation argument structure */
2173 xfs_agblock_t agsize
; /* allocation group size */
2175 int flags
; /* XFS_ALLOC_FLAG_... locking flags */
2176 xfs_extlen_t minleft
;/* minimum left value, temp copy */
2177 xfs_mount_t
*mp
; /* mount structure pointer */
2178 xfs_agnumber_t sagno
; /* starting allocation group number */
2179 xfs_alloctype_t type
; /* input allocation type */
2182 xfs_agnumber_t rotorstep
= xfs_rotorstep
; /* inode32 agf stepper */
2185 type
= args
->otype
= args
->type
;
2186 args
->agbno
= NULLAGBLOCK
;
2188 * Just fix this up, for the case where the last a.g. is shorter
2189 * (or there's only one a.g.) and the caller couldn't easily figure
2190 * that out (xfs_bmap_alloc).
2192 agsize
= mp
->m_sb
.sb_agblocks
;
2193 if (args
->maxlen
> agsize
)
2194 args
->maxlen
= agsize
;
2195 if (args
->alignment
== 0)
2196 args
->alignment
= 1;
2197 ASSERT(XFS_FSB_TO_AGNO(mp
, args
->fsbno
) < mp
->m_sb
.sb_agcount
);
2198 ASSERT(XFS_FSB_TO_AGBNO(mp
, args
->fsbno
) < agsize
);
2199 ASSERT(args
->minlen
<= args
->maxlen
);
2200 ASSERT(args
->minlen
<= agsize
);
2201 ASSERT(args
->mod
< args
->prod
);
2202 if (XFS_FSB_TO_AGNO(mp
, args
->fsbno
) >= mp
->m_sb
.sb_agcount
||
2203 XFS_FSB_TO_AGBNO(mp
, args
->fsbno
) >= agsize
||
2204 args
->minlen
> args
->maxlen
|| args
->minlen
> agsize
||
2205 args
->mod
>= args
->prod
) {
2206 args
->fsbno
= NULLFSBLOCK
;
2207 trace_xfs_alloc_vextent_badargs(args
);
2210 minleft
= args
->minleft
;
2213 case XFS_ALLOCTYPE_THIS_AG
:
2214 case XFS_ALLOCTYPE_NEAR_BNO
:
2215 case XFS_ALLOCTYPE_THIS_BNO
:
2217 * These three force us into a single a.g.
2219 args
->agno
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
2220 args
->pag
= xfs_perag_get(mp
, args
->agno
);
2222 error
= xfs_alloc_fix_freelist(args
, 0);
2223 args
->minleft
= minleft
;
2225 trace_xfs_alloc_vextent_nofix(args
);
2229 trace_xfs_alloc_vextent_noagbp(args
);
2232 args
->agbno
= XFS_FSB_TO_AGBNO(mp
, args
->fsbno
);
2233 if ((error
= xfs_alloc_ag_vextent(args
)))
2236 case XFS_ALLOCTYPE_START_BNO
:
2238 * Try near allocation first, then anywhere-in-ag after
2239 * the first a.g. fails.
2241 if ((args
->userdata
== XFS_ALLOC_INITIAL_USER_DATA
) &&
2242 (mp
->m_flags
& XFS_MOUNT_32BITINODES
)) {
2243 args
->fsbno
= XFS_AGB_TO_FSB(mp
,
2244 ((mp
->m_agfrotor
/ rotorstep
) %
2245 mp
->m_sb
.sb_agcount
), 0);
2248 args
->agbno
= XFS_FSB_TO_AGBNO(mp
, args
->fsbno
);
2249 args
->type
= XFS_ALLOCTYPE_NEAR_BNO
;
2251 case XFS_ALLOCTYPE_ANY_AG
:
2252 case XFS_ALLOCTYPE_START_AG
:
2253 case XFS_ALLOCTYPE_FIRST_AG
:
2255 * Rotate through the allocation groups looking for a winner.
2257 if (type
== XFS_ALLOCTYPE_ANY_AG
) {
2259 * Start with the last place we left off.
2261 args
->agno
= sagno
= (mp
->m_agfrotor
/ rotorstep
) %
2262 mp
->m_sb
.sb_agcount
;
2263 args
->type
= XFS_ALLOCTYPE_THIS_AG
;
2264 flags
= XFS_ALLOC_FLAG_TRYLOCK
;
2265 } else if (type
== XFS_ALLOCTYPE_FIRST_AG
) {
2267 * Start with allocation group given by bno.
2269 args
->agno
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
2270 args
->type
= XFS_ALLOCTYPE_THIS_AG
;
2274 if (type
== XFS_ALLOCTYPE_START_AG
)
2275 args
->type
= XFS_ALLOCTYPE_THIS_AG
;
2277 * Start with the given allocation group.
2279 args
->agno
= sagno
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
2280 flags
= XFS_ALLOC_FLAG_TRYLOCK
;
2283 * Loop over allocation groups twice; first time with
2284 * trylock set, second time without.
2287 args
->pag
= xfs_perag_get(mp
, args
->agno
);
2288 if (no_min
) args
->minleft
= 0;
2289 error
= xfs_alloc_fix_freelist(args
, flags
);
2290 args
->minleft
= minleft
;
2292 trace_xfs_alloc_vextent_nofix(args
);
2296 * If we get a buffer back then the allocation will fly.
2299 if ((error
= xfs_alloc_ag_vextent(args
)))
2304 trace_xfs_alloc_vextent_loopfailed(args
);
2307 * Didn't work, figure out the next iteration.
2309 if (args
->agno
== sagno
&&
2310 type
== XFS_ALLOCTYPE_START_BNO
)
2311 args
->type
= XFS_ALLOCTYPE_THIS_AG
;
2313 * For the first allocation, we can try any AG to get
2314 * space. However, if we already have allocated a
2315 * block, we don't want to try AGs whose number is below
2316 * sagno. Otherwise, we may end up with out-of-order
2317 * locking of AGF, which might cause deadlock.
2319 if (++(args
->agno
) == mp
->m_sb
.sb_agcount
) {
2320 if (args
->firstblock
!= NULLFSBLOCK
)
2326 * Reached the starting a.g., must either be done
2327 * or switch to non-trylock mode.
2329 if (args
->agno
== sagno
) {
2331 args
->agbno
= NULLAGBLOCK
;
2332 trace_xfs_alloc_vextent_allfailed(args
);
2339 if (type
== XFS_ALLOCTYPE_START_BNO
) {
2340 args
->agbno
= XFS_FSB_TO_AGBNO(mp
,
2342 args
->type
= XFS_ALLOCTYPE_NEAR_BNO
;
2346 xfs_perag_put(args
->pag
);
2348 if (bump_rotor
|| (type
== XFS_ALLOCTYPE_ANY_AG
)) {
2349 if (args
->agno
== sagno
)
2350 mp
->m_agfrotor
= (mp
->m_agfrotor
+ 1) %
2351 (mp
->m_sb
.sb_agcount
* rotorstep
);
2353 mp
->m_agfrotor
= (args
->agno
* rotorstep
+ 1) %
2354 (mp
->m_sb
.sb_agcount
* rotorstep
);
2361 if (args
->agbno
== NULLAGBLOCK
)
2362 args
->fsbno
= NULLFSBLOCK
;
2364 args
->fsbno
= XFS_AGB_TO_FSB(mp
, args
->agno
, args
->agbno
);
2366 ASSERT(args
->len
>= args
->minlen
);
2367 ASSERT(args
->len
<= args
->maxlen
);
2368 ASSERT(args
->agbno
% args
->alignment
== 0);
2369 XFS_AG_CHECK_DADDR(mp
, XFS_FSB_TO_DADDR(mp
, args
->fsbno
),
2373 xfs_perag_put(args
->pag
);
2376 xfs_perag_put(args
->pag
);
2382 * Just break up the extent address and hand off to xfs_free_ag_extent
2383 * after fixing up the freelist.
2387 xfs_trans_t
*tp
, /* transaction pointer */
2388 xfs_fsblock_t bno
, /* starting block number of extent */
2389 xfs_extlen_t len
) /* length of extent */
2391 xfs_alloc_arg_t args
;
2395 memset(&args
, 0, sizeof(xfs_alloc_arg_t
));
2397 args
.mp
= tp
->t_mountp
;
2398 args
.agno
= XFS_FSB_TO_AGNO(args
.mp
, bno
);
2399 ASSERT(args
.agno
< args
.mp
->m_sb
.sb_agcount
);
2400 args
.agbno
= XFS_FSB_TO_AGBNO(args
.mp
, bno
);
2401 args
.pag
= xfs_perag_get(args
.mp
, args
.agno
);
2402 if ((error
= xfs_alloc_fix_freelist(&args
, XFS_ALLOC_FLAG_FREEING
)))
2405 ASSERT(args
.agbp
!= NULL
);
2406 ASSERT((args
.agbno
+ len
) <=
2407 be32_to_cpu(XFS_BUF_TO_AGF(args
.agbp
)->agf_length
));
2409 error
= xfs_free_ag_extent(tp
, args
.agbp
, args
.agno
, args
.agbno
, len
, 0);
2411 xfs_perag_put(args
.pag
);
2417 * AG Busy list management
2418 * The busy list contains block ranges that have been freed but whose
2419 * transactions have not yet hit disk. If any block listed in a busy
2420 * list is reused, the transaction that freed it must be forced to disk
2421 * before continuing to use the block.
2423 * xfs_alloc_busy_insert - add to the per-ag busy list
2424 * xfs_alloc_busy_clear - remove an item from the per-ag busy list
2425 * xfs_alloc_busy_search - search for a busy extent
2429 * Insert a new extent into the busy tree.
2431 * The busy extent tree is indexed by the start block of the busy extent.
2432 * there can be multiple overlapping ranges in the busy extent tree but only
2433 * ever one entry at a given start block. The reason for this is that
2434 * multi-block extents can be freed, then smaller chunks of that extent
2435 * allocated and freed again before the first transaction commit is on disk.
2436 * If the exact same start block is freed a second time, we have to wait for
2437 * that busy extent to pass out of the tree before the new extent is inserted.
2438 * There are two main cases we have to handle here.
2440 * The first case is a transaction that triggers a "free - allocate - free"
2441 * cycle. This can occur during btree manipulations as a btree block is freed
2442 * to the freelist, then allocated from the free list, then freed again. In
2443 * this case, the second extxpnet free is what triggers the duplicate and as
2444 * such the transaction IDs should match. Because the extent was allocated in
2445 * this transaction, the transaction must be marked as synchronous. This is
2446 * true for all cases where the free/alloc/free occurs in the one transaction,
2447 * hence the addition of the ASSERT(tp->t_flags & XFS_TRANS_SYNC) to this case.
2448 * This serves to catch violations of the second case quite effectively.
2450 * The second case is where the free/alloc/free occur in different
2451 * transactions. In this case, the thread freeing the extent the second time
2452 * can't mark the extent busy immediately because it is already tracked in a
2453 * transaction that may be committing. When the log commit for the existing
2454 * busy extent completes, the busy extent will be removed from the tree. If we
2455 * allow the second busy insert to continue using that busy extent structure,
2456 * it can be freed before this transaction is safely in the log. Hence our
2457 * only option in this case is to force the log to remove the existing busy
2458 * extent from the list before we insert the new one with the current
2461 * The problem we are trying to avoid in the free-alloc-free in separate
2462 * transactions is most easily described with a timeline:
2464 * Thread 1 Thread 2 Thread 3 xfslogd
2487 * checkpoint completes
2489 * By issuing a log force in thread 3 @ "KABOOM", the thread will block until
2490 * the checkpoint completes, and the busy extent it matched will have been
2491 * removed from the tree when it is woken. Hence it can then continue safely.
2493 * However, to ensure this matching process is robust, we need to use the
2494 * transaction ID for identifying transaction, as delayed logging results in
2495 * the busy extent and transaction lifecycles being different. i.e. the busy
2496 * extent is active for a lot longer than the transaction. Hence the
2497 * transaction structure can be freed and reallocated, then mark the same
2498 * extent busy again in the new transaction. In this case the new transaction
2499 * will have a different tid but can have the same address, and hence we need
2500 * to check against the tid.
2502 * Future: for delayed logging, we could avoid the log force if the extent was
2503 * first freed in the current checkpoint sequence. This, however, requires the
2504 * ability to pin the current checkpoint in memory until this transaction
2505 * commits to ensure that both the original free and the current one combine
2506 * logically into the one checkpoint. If the checkpoint sequences are
2507 * different, however, we still need to wait on a log force.
2510 xfs_alloc_busy_insert(
2511 struct xfs_trans
*tp
,
2512 xfs_agnumber_t agno
,
2516 struct xfs_busy_extent
*new;
2517 struct xfs_busy_extent
*busyp
;
2518 struct xfs_perag
*pag
;
2519 struct rb_node
**rbp
;
2520 struct rb_node
*parent
;
2524 new = kmem_zalloc(sizeof(struct xfs_busy_extent
), KM_MAYFAIL
);
2527 * No Memory! Since it is now not possible to track the free
2528 * block, make this a synchronous transaction to insure that
2529 * the block is not reused before this transaction commits.
2531 trace_xfs_alloc_busy(tp
, agno
, bno
, len
, 1);
2532 xfs_trans_set_sync(tp
);
2539 new->tid
= xfs_log_get_trans_ident(tp
);
2541 INIT_LIST_HEAD(&new->list
);
2543 /* trace before insert to be able to see failed inserts */
2544 trace_xfs_alloc_busy(tp
, agno
, bno
, len
, 0);
2546 pag
= xfs_perag_get(tp
->t_mountp
, new->agno
);
2548 spin_lock(&pag
->pagb_lock
);
2549 rbp
= &pag
->pagb_tree
.rb_node
;
2553 while (*rbp
&& match
>= 0) {
2555 busyp
= rb_entry(parent
, struct xfs_busy_extent
, rb_node
);
2557 if (new->bno
< busyp
->bno
) {
2558 /* may overlap, but exact start block is lower */
2559 rbp
= &(*rbp
)->rb_left
;
2560 if (new->bno
+ new->length
> busyp
->bno
)
2561 match
= busyp
->tid
== new->tid
? 1 : -1;
2562 } else if (new->bno
> busyp
->bno
) {
2563 /* may overlap, but exact start block is higher */
2564 rbp
= &(*rbp
)->rb_right
;
2565 if (bno
< busyp
->bno
+ busyp
->length
)
2566 match
= busyp
->tid
== new->tid
? 1 : -1;
2568 match
= busyp
->tid
== new->tid
? 1 : -1;
2573 /* overlap marked busy in different transaction */
2574 spin_unlock(&pag
->pagb_lock
);
2575 xfs_log_force(tp
->t_mountp
, XFS_LOG_SYNC
);
2580 * overlap marked busy in same transaction. Update if exact
2581 * start block match, otherwise combine the busy extents into
2584 if (busyp
->bno
== new->bno
) {
2585 busyp
->length
= max(busyp
->length
, new->length
);
2586 spin_unlock(&pag
->pagb_lock
);
2587 ASSERT(tp
->t_flags
& XFS_TRANS_SYNC
);
2592 rb_erase(&busyp
->rb_node
, &pag
->pagb_tree
);
2593 new->length
= max(busyp
->bno
+ busyp
->length
,
2594 new->bno
+ new->length
) -
2595 min(busyp
->bno
, new->bno
);
2596 new->bno
= min(busyp
->bno
, new->bno
);
2600 rb_link_node(&new->rb_node
, parent
, rbp
);
2601 rb_insert_color(&new->rb_node
, &pag
->pagb_tree
);
2603 list_add(&new->list
, &tp
->t_busy
);
2604 spin_unlock(&pag
->pagb_lock
);
2610 * Search for a busy extent within the range of the extent we are about to
2611 * allocate. You need to be holding the busy extent tree lock when calling
2612 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
2613 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
2614 * match. This is done so that a non-zero return indicates an overlap that
2615 * will require a synchronous transaction, but it can still be
2616 * used to distinguish between a partial or exact match.
2619 xfs_alloc_busy_search(
2620 struct xfs_mount
*mp
,
2621 xfs_agnumber_t agno
,
2625 struct xfs_perag
*pag
;
2626 struct rb_node
*rbp
;
2627 struct xfs_busy_extent
*busyp
;
2630 pag
= xfs_perag_get(mp
, agno
);
2631 spin_lock(&pag
->pagb_lock
);
2633 rbp
= pag
->pagb_tree
.rb_node
;
2635 /* find closest start bno overlap */
2637 busyp
= rb_entry(rbp
, struct xfs_busy_extent
, rb_node
);
2638 if (bno
< busyp
->bno
) {
2639 /* may overlap, but exact start block is lower */
2640 if (bno
+ len
> busyp
->bno
)
2643 } else if (bno
> busyp
->bno
) {
2644 /* may overlap, but exact start block is higher */
2645 if (bno
< busyp
->bno
+ busyp
->length
)
2647 rbp
= rbp
->rb_right
;
2649 /* bno matches busyp, length determines exact match */
2650 match
= (busyp
->length
== len
) ? 1 : -1;
2654 spin_unlock(&pag
->pagb_lock
);
2655 trace_xfs_alloc_busysearch(mp
, agno
, bno
, len
, !!match
);
2661 xfs_alloc_busy_clear(
2662 struct xfs_mount
*mp
,
2663 struct xfs_busy_extent
*busyp
)
2665 struct xfs_perag
*pag
;
2667 trace_xfs_alloc_unbusy(mp
, busyp
->agno
, busyp
->bno
,
2670 ASSERT(xfs_alloc_busy_search(mp
, busyp
->agno
, busyp
->bno
,
2671 busyp
->length
) == 1);
2673 list_del_init(&busyp
->list
);
2675 pag
= xfs_perag_get(mp
, busyp
->agno
);
2676 spin_lock(&pag
->pagb_lock
);
2677 rb_erase(&busyp
->rb_node
, &pag
->pagb_tree
);
2678 spin_unlock(&pag
->pagb_lock
);