1 /* $NetBSD: ffs_alloc.c,v 1.16 2005/08/19 02:09:50 christos Exp $ */
2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
8 * This software was developed for the FreeBSD Project by Marshall
9 * Kirk McKusick and Network Associates Laboratories, the Security
10 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
14 * Copyright (c) 1982, 1986, 1989, 1993
15 * The Regents of the University of California. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
44 #if HAVE_NBTOOL_CONFIG_H
45 #include "nbtool_config.h"
48 #include <sys/cdefs.h>
49 #if defined(__RCSID) && !defined(__lint)
50 __RCSID("$NetBSD: ffs_alloc.c,v 1.16 2005/08/19 02:09:50 christos Exp $");
53 #include <sys/param.h>
60 #include <ufs/ufs/dinode.h>
61 #include <ufs/ufs/ufs_bswap.h>
62 #include <ufs/ffs/fs.h>
65 #include "ffs/ufs_inode.h"
66 #include "ffs/ffs_extern.h"
69 static int scanc(u_int
, const u_char
*, const u_char
*, int);
71 static daddr_t
ffs_alloccg(struct inode
*, int, daddr_t
, int);
72 static daddr_t
ffs_alloccgblk(struct inode
*, struct buf
*, daddr_t
);
73 static daddr_t
ffs_hashalloc(struct inode
*, int, daddr_t
, int,
74 daddr_t (*)(struct inode
*, int, daddr_t
, int));
75 static int32_t ffs_mapsearch(struct fs
*, struct cg
*, daddr_t
, int);
78 extern const int inside
[], around
[];
79 extern const u_char
* const fragtbl
[];
82 * Allocate a block in the file system.
84 * The size of the requested block is given, which must be some
85 * multiple of fs_fsize and <= fs_bsize.
86 * A preference may be optionally specified. If a preference is given
87 * the following hierarchy is used to allocate a block:
88 * 1) allocate the requested block.
89 * 2) allocate a rotationally optimal block in the same cylinder.
90 * 3) allocate a block in the same cylinder group.
91 * 4) quadradically rehash into other cylinder groups, until an
92 * available block is located.
93 * If no block preference is given the following hierarchy is used
94 * to allocate a block:
95 * 1) allocate a block in the cylinder group that contains the
97 * 2) quadradically rehash into other cylinder groups, until an
98 * available block is located.
101 ffs_alloc(struct inode
*ip
, daddr_t lbn __unused
, daddr_t bpref
, int size
,
104 struct fs
*fs
= ip
->i_fs
;
109 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
110 errx(1, "ffs_alloc: bad size: bsize %d size %d",
113 if (size
== fs
->fs_bsize
&& fs
->fs_cstotal
.cs_nbfree
== 0)
115 if (bpref
>= fs
->fs_size
)
118 cg
= ino_to_cg(fs
, ip
->i_number
);
120 cg
= dtog(fs
, bpref
);
121 bno
= ffs_hashalloc(ip
, cg
, bpref
, size
, ffs_alloccg
);
123 DIP_ADD(ip
, blocks
, size
/ DEV_BSIZE
);
132 * Select the desired position for the next block in a file. The file is
133 * logically divided into sections. The first section is composed of the
134 * direct blocks. Each additional section contains fs_maxbpg blocks.
136 * If no blocks have been allocated in the first section, the policy is to
137 * request a block in the same cylinder group as the inode that describes
138 * the file. If no blocks have been allocated in any other section, the
139 * policy is to place the section in a cylinder group with a greater than
140 * average number of free blocks. An appropriate cylinder group is found
141 * by using a rotor that sweeps the cylinder groups. When a new group of
142 * blocks is needed, the sweep begins in the cylinder group following the
143 * cylinder group from which the previous allocation was made. The sweep
144 * continues until a cylinder group with greater than the average number
145 * of free blocks is found. If the allocation is for the first block in an
146 * indirect block, the information on the previous allocation is unavailable;
147 * here a best guess is made based upon the logical block number being
150 * If a section is already partially allocated, the policy is to
151 * contiguously allocate fs_maxcontig blocks. The end of one of these
152 * contiguous blocks and the beginning of the next is physically separated
153 * so that the disk head will be in transit between them for at least
154 * fs_rotdelay milliseconds. This is to allow time for the processor to
155 * schedule another I/O transfer.
159 ffs_blkpref_ufs1(struct inode
*ip
, daddr_t lbn
, int indx
, int32_t *bap
)
163 int avgbfree
, startcg
;
166 if (indx
% fs
->fs_maxbpg
== 0 || bap
[indx
- 1] == 0) {
167 if (lbn
< NDADDR
+ NINDIR(fs
)) {
168 cg
= ino_to_cg(fs
, ip
->i_number
);
169 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
172 * Find a cylinder with greater than average number of
173 * unused data blocks.
175 if (indx
== 0 || bap
[indx
- 1] == 0)
177 ino_to_cg(fs
, ip
->i_number
) + lbn
/ fs
->fs_maxbpg
;
180 ufs_rw32(bap
[indx
- 1], UFS_FSNEEDSWAP(fs
)) + 1);
181 startcg
%= fs
->fs_ncg
;
182 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
183 for (cg
= startcg
; cg
< fs
->fs_ncg
; cg
++)
184 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
)
185 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
186 for (cg
= 0; cg
<= startcg
; cg
++)
187 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
)
188 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
192 * We just always try to lay things out contiguously.
194 return ufs_rw32(bap
[indx
- 1], UFS_FSNEEDSWAP(fs
)) + fs
->fs_frag
;
198 ffs_blkpref_ufs2(ip
, lbn
, indx
, bap
)
206 int avgbfree
, startcg
;
209 if (indx
% fs
->fs_maxbpg
== 0 || bap
[indx
- 1] == 0) {
210 if (lbn
< NDADDR
+ NINDIR(fs
)) {
211 cg
= ino_to_cg(fs
, ip
->i_number
);
212 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
215 * Find a cylinder with greater than average number of
216 * unused data blocks.
218 if (indx
== 0 || bap
[indx
- 1] == 0)
220 ino_to_cg(fs
, ip
->i_number
) + lbn
/ fs
->fs_maxbpg
;
223 ufs_rw64(bap
[indx
- 1], UFS_FSNEEDSWAP(fs
)) + 1);
224 startcg
%= fs
->fs_ncg
;
225 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
226 for (cg
= startcg
; cg
< fs
->fs_ncg
; cg
++)
227 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
228 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
230 for (cg
= 0; cg
< startcg
; cg
++)
231 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
232 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
237 * We just always try to lay things out contiguously.
239 return ufs_rw64(bap
[indx
- 1], UFS_FSNEEDSWAP(fs
)) + fs
->fs_frag
;
243 * Implement the cylinder overflow algorithm.
245 * The policy implemented by this algorithm is:
246 * 1) allocate the block in its requested cylinder group.
247 * 2) quadradically rehash on the cylinder group number.
248 * 3) brute force search for a free block.
250 * `size': size for data blocks, mode for inodes
254 ffs_hashalloc(struct inode
*ip
, int cg
, daddr_t pref
, int size
,
255 daddr_t (*allocator
)(struct inode
*, int, daddr_t
, int))
263 * 1: preferred cylinder group
265 result
= (*allocator
)(ip
, cg
, pref
, size
);
269 * 2: quadratic rehash
271 for (i
= 1; i
< fs
->fs_ncg
; i
*= 2) {
273 if (cg
>= fs
->fs_ncg
)
275 result
= (*allocator
)(ip
, cg
, 0, size
);
280 * 3: brute force search
281 * Note that we start at i == 2, since 0 was checked initially,
282 * and 1 is always checked in the quadratic rehash.
284 cg
= (icg
+ 2) % fs
->fs_ncg
;
285 for (i
= 2; i
< fs
->fs_ncg
; i
++) {
286 result
= (*allocator
)(ip
, cg
, 0, size
);
290 if (cg
== fs
->fs_ncg
)
297 * Determine whether a block can be allocated.
299 * Check to see if a block of the appropriate size is available,
300 * and if it is, allocate it.
303 ffs_alloccg(struct inode
*ip
, int cg
, daddr_t bpref
, int size
)
308 int error
, frags
, allocsiz
, i
;
309 struct fs
*fs
= ip
->i_fs
;
310 const int needswap
= UFS_FSNEEDSWAP(fs
);
312 if (fs
->fs_cs(fs
, cg
).cs_nbfree
== 0 && size
== fs
->fs_bsize
)
314 error
= bread(ip
->i_fd
, ip
->i_fs
, fsbtodb(fs
, cgtod(fs
, cg
)),
315 (int)fs
->fs_cgsize
, &bp
);
320 cgp
= (struct cg
*)bp
->b_data
;
321 if (!cg_chkmagic(cgp
, needswap
) ||
322 (cgp
->cg_cs
.cs_nbfree
== 0 && size
== fs
->fs_bsize
)) {
326 if (size
== fs
->fs_bsize
) {
327 bno
= ffs_alloccgblk(ip
, bp
, bpref
);
332 * check to see if any fragments are already available
333 * allocsiz is the size which will be allocated, hacking
334 * it down to a smaller size if necessary
336 frags
= numfrags(fs
, size
);
337 for (allocsiz
= frags
; allocsiz
< fs
->fs_frag
; allocsiz
++)
338 if (cgp
->cg_frsum
[allocsiz
] != 0)
340 if (allocsiz
== fs
->fs_frag
) {
342 * no fragments were available, so a block will be
343 * allocated, and hacked up
345 if (cgp
->cg_cs
.cs_nbfree
== 0) {
349 bno
= ffs_alloccgblk(ip
, bp
, bpref
);
350 bpref
= dtogd(fs
, bno
);
351 for (i
= frags
; i
< fs
->fs_frag
; i
++)
352 setbit(cg_blksfree(cgp
, needswap
), bpref
+ i
);
353 i
= fs
->fs_frag
- frags
;
354 ufs_add32(cgp
->cg_cs
.cs_nffree
, i
, needswap
);
355 fs
->fs_cstotal
.cs_nffree
+= i
;
356 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
358 ufs_add32(cgp
->cg_frsum
[i
], 1, needswap
);
362 bno
= ffs_mapsearch(fs
, cgp
, bpref
, allocsiz
);
363 for (i
= 0; i
< frags
; i
++)
364 clrbit(cg_blksfree(cgp
, needswap
), bno
+ i
);
365 ufs_add32(cgp
->cg_cs
.cs_nffree
, -frags
, needswap
);
366 fs
->fs_cstotal
.cs_nffree
-= frags
;
367 fs
->fs_cs(fs
, cg
).cs_nffree
-= frags
;
369 ufs_add32(cgp
->cg_frsum
[allocsiz
], -1, needswap
);
370 if (frags
!= allocsiz
)
371 ufs_add32(cgp
->cg_frsum
[allocsiz
- frags
], 1, needswap
);
372 blkno
= cg
* fs
->fs_fpg
+ bno
;
378 * Allocate a block in a cylinder group.
380 * This algorithm implements the following policy:
381 * 1) allocate the requested block.
382 * 2) allocate a rotationally optimal block in the same cylinder.
383 * 3) allocate the next available block on the block rotor for the
384 * specified cylinder group.
385 * Note that this routine only allocates fs_bsize blocks; these
386 * blocks may be fragmented by the routine that allocates them.
389 ffs_alloccgblk(struct inode
*ip
, struct buf
*bp
, daddr_t bpref
)
394 struct fs
*fs
= ip
->i_fs
;
395 const int needswap
= UFS_FSNEEDSWAP(fs
);
398 cgp
= (struct cg
*)bp
->b_data
;
399 blksfree
= cg_blksfree(cgp
, needswap
);
400 if (bpref
== 0 || dtog(fs
, bpref
) != ufs_rw32(cgp
->cg_cgx
, needswap
)) {
401 bpref
= ufs_rw32(cgp
->cg_rotor
, needswap
);
403 bpref
= blknum(fs
, bpref
);
404 bno
= dtogd(fs
, bpref
);
406 * if the requested block is available, use it
408 if (ffs_isblock(fs
, blksfree
, fragstoblks(fs
, bno
)))
412 * Take the next available one in this cylinder group.
414 bno
= ffs_mapsearch(fs
, cgp
, bpref
, (int)fs
->fs_frag
);
417 cgp
->cg_rotor
= ufs_rw32(bno
, needswap
);
419 blkno
= fragstoblks(fs
, bno
);
420 ffs_clrblock(fs
, blksfree
, (long)blkno
);
421 ffs_clusteracct(fs
, cgp
, blkno
, -1);
422 ufs_add32(cgp
->cg_cs
.cs_nbfree
, -1, needswap
);
423 fs
->fs_cstotal
.cs_nbfree
--;
424 fs
->fs_cs(fs
, ufs_rw32(cgp
->cg_cgx
, needswap
)).cs_nbfree
--;
426 blkno
= ufs_rw32(cgp
->cg_cgx
, needswap
) * fs
->fs_fpg
+ bno
;
431 * Free a block or fragment.
433 * The specified block or fragment is placed back in the
434 * free map. If a fragment is deallocated, a possible
435 * block reassembly is checked.
438 ffs_blkfree(struct inode
*ip
, daddr_t bno
, long size
)
442 int32_t fragno
, cgbno
;
443 int i
, error
, cg
, blk
, frags
, bbase
;
444 struct fs
*fs
= ip
->i_fs
;
445 const int needswap
= UFS_FSNEEDSWAP(fs
);
447 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0 ||
448 fragnum(fs
, bno
) + numfrags(fs
, size
) > fs
->fs_frag
) {
449 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
450 (long long)bno
, fs
->fs_bsize
, size
);
453 if (bno
>= fs
->fs_size
) {
454 warnx("bad block %lld, ino %llu", (long long)bno
,
455 (unsigned long long)ip
->i_number
);
458 error
= bread(ip
->i_fd
, ip
->i_fs
, fsbtodb(fs
, cgtod(fs
, cg
)),
459 (int)fs
->fs_cgsize
, &bp
);
464 cgp
= (struct cg
*)bp
->b_data
;
465 if (!cg_chkmagic(cgp
, needswap
)) {
469 cgbno
= dtogd(fs
, bno
);
470 if (size
== fs
->fs_bsize
) {
471 fragno
= fragstoblks(fs
, cgbno
);
472 if (!ffs_isfreeblock(fs
, cg_blksfree(cgp
, needswap
), fragno
)) {
473 errx(1, "blkfree: freeing free block %lld",
476 ffs_setblock(fs
, cg_blksfree(cgp
, needswap
), fragno
);
477 ffs_clusteracct(fs
, cgp
, fragno
, 1);
478 ufs_add32(cgp
->cg_cs
.cs_nbfree
, 1, needswap
);
479 fs
->fs_cstotal
.cs_nbfree
++;
480 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
482 bbase
= cgbno
- fragnum(fs
, cgbno
);
484 * decrement the counts associated with the old frags
486 blk
= blkmap(fs
, cg_blksfree(cgp
, needswap
), bbase
);
487 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, -1, needswap
);
489 * deallocate the fragment
491 frags
= numfrags(fs
, size
);
492 for (i
= 0; i
< frags
; i
++) {
493 if (isset(cg_blksfree(cgp
, needswap
), cgbno
+ i
)) {
494 errx(1, "blkfree: freeing free frag: block %lld",
495 (long long)(cgbno
+ i
));
497 setbit(cg_blksfree(cgp
, needswap
), cgbno
+ i
);
499 ufs_add32(cgp
->cg_cs
.cs_nffree
, i
, needswap
);
500 fs
->fs_cstotal
.cs_nffree
+= i
;
501 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
503 * add back in counts associated with the new frags
505 blk
= blkmap(fs
, cg_blksfree(cgp
, needswap
), bbase
);
506 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, 1, needswap
);
508 * if a complete block has been reassembled, account for it
510 fragno
= fragstoblks(fs
, bbase
);
511 if (ffs_isblock(fs
, cg_blksfree(cgp
, needswap
), fragno
)) {
512 ufs_add32(cgp
->cg_cs
.cs_nffree
, -fs
->fs_frag
, needswap
);
513 fs
->fs_cstotal
.cs_nffree
-= fs
->fs_frag
;
514 fs
->fs_cs(fs
, cg
).cs_nffree
-= fs
->fs_frag
;
515 ffs_clusteracct(fs
, cgp
, fragno
, 1);
516 ufs_add32(cgp
->cg_cs
.cs_nbfree
, 1, needswap
);
517 fs
->fs_cstotal
.cs_nbfree
++;
518 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
527 scanc(u_int size
, const u_char
*cp
, const u_char table
[], int mask
)
529 const u_char
*end
= &cp
[size
];
531 while (cp
< end
&& (table
[*cp
] & mask
) == 0)
537 * Find a block of the specified size in the specified cylinder group.
539 * It is a panic if a request is made to find a block if none are
543 ffs_mapsearch(struct fs
*fs
, struct cg
*cgp
, daddr_t bpref
, int allocsiz
)
546 int start
, len
, loc
, i
;
547 int blk
, field
, subfield
, pos
;
549 const int needswap
= UFS_FSNEEDSWAP(fs
);
552 * find the fragment by searching through the free block
553 * map for an appropriate bit pattern
556 start
= dtogd(fs
, bpref
) / NBBY
;
558 start
= ufs_rw32(cgp
->cg_frotor
, needswap
) / NBBY
;
559 len
= howmany(fs
->fs_fpg
, NBBY
) - start
;
562 loc
= scanc((u_int
)len
,
563 (const u_char
*)&cg_blksfree(cgp
, needswap
)[start
],
564 (const u_char
*)fragtbl
[fs
->fs_frag
],
565 (1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
569 loc
= scanc((u_int
)len
,
570 (const u_char
*)&cg_blksfree(cgp
, needswap
)[0],
571 (const u_char
*)fragtbl
[fs
->fs_frag
],
572 (1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
575 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
577 ufs_rw32(cgp
->cg_freeoff
, needswap
),
578 (long)cg_blksfree(cgp
, needswap
) - (long)cgp
);
582 bno
= (start
+ len
- loc
) * NBBY
;
583 cgp
->cg_frotor
= ufs_rw32(bno
, needswap
);
585 * found the byte in the map
586 * sift through the bits to find the selected frag
588 for (i
= bno
+ NBBY
; bno
< i
; bno
+= fs
->fs_frag
) {
589 blk
= blkmap(fs
, cg_blksfree(cgp
, needswap
), bno
);
591 field
= around
[allocsiz
];
592 subfield
= inside
[allocsiz
];
593 for (pos
= 0; pos
<= fs
->fs_frag
- allocsiz
; pos
++) {
594 if ((blk
& field
) == subfield
)
600 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno
);
605 * Update the cluster map because of an allocation or free.
607 * Cnt == 1 means free; cnt == -1 means allocating.
610 ffs_clusteracct(struct fs
*fs
, struct cg
*cgp
, int32_t blkno
, int cnt
)
614 u_char
*freemapp
, *mapp
;
615 int i
, start
, end
, forw
, back
, map
, bit
;
616 const int needswap
= UFS_FSNEEDSWAP(fs
);
618 if (fs
->fs_contigsumsize
<= 0)
620 freemapp
= cg_clustersfree(cgp
, needswap
);
621 sump
= cg_clustersum(cgp
, needswap
);
623 * Allocate or clear the actual block.
626 setbit(freemapp
, blkno
);
628 clrbit(freemapp
, blkno
);
630 * Find the size of the cluster going forward.
633 end
= start
+ fs
->fs_contigsumsize
;
634 if (end
>= ufs_rw32(cgp
->cg_nclusterblks
, needswap
))
635 end
= ufs_rw32(cgp
->cg_nclusterblks
, needswap
);
636 mapp
= &freemapp
[start
/ NBBY
];
638 bit
= 1 << (start
% NBBY
);
639 for (i
= start
; i
< end
; i
++) {
640 if ((map
& bit
) == 0)
642 if ((i
& (NBBY
- 1)) != (NBBY
- 1)) {
651 * Find the size of the cluster going backward.
654 end
= start
- fs
->fs_contigsumsize
;
657 mapp
= &freemapp
[start
/ NBBY
];
659 bit
= 1 << (start
% NBBY
);
660 for (i
= start
; i
> end
; i
--) {
661 if ((map
& bit
) == 0)
663 if ((i
& (NBBY
- 1)) != 0) {
667 bit
= 1 << (NBBY
- 1);
672 * Account for old cluster and the possibly new forward and
676 if (i
> fs
->fs_contigsumsize
)
677 i
= fs
->fs_contigsumsize
;
678 ufs_add32(sump
[i
], cnt
, needswap
);
680 ufs_add32(sump
[back
], -cnt
, needswap
);
682 ufs_add32(sump
[forw
], -cnt
, needswap
);
685 * Update cluster summary information.
687 lp
= &sump
[fs
->fs_contigsumsize
];
688 for (i
= fs
->fs_contigsumsize
; i
> 0; i
--)
689 if (ufs_rw32(*lp
--, needswap
) > 0)
691 fs
->fs_maxcluster
[ufs_rw32(cgp
->cg_cgx
, needswap
)] = i
;