No empty .Rs/.Re
[netbsd-mini2440.git] / usr.sbin / makefs / ffs / ffs_alloc.c
blobcf6c2058cbb233e98755803c21243f111363c75c
1 /* $NetBSD: ffs_alloc.c,v 1.16 2005/08/19 02:09:50 christos Exp $ */
2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
4 /*
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
8 * This software was developed for the FreeBSD Project by Marshall
9 * Kirk McKusick and Network Associates Laboratories, the Security
10 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
12 * research program
14 * Copyright (c) 1982, 1986, 1989, 1993
15 * The Regents of the University of California. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
41 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
44 #if HAVE_NBTOOL_CONFIG_H
45 #include "nbtool_config.h"
46 #endif
48 #include <sys/cdefs.h>
49 #if defined(__RCSID) && !defined(__lint)
50 __RCSID("$NetBSD: ffs_alloc.c,v 1.16 2005/08/19 02:09:50 christos Exp $");
51 #endif /* !__lint */
53 #include <sys/param.h>
54 #include <sys/time.h>
56 #include <errno.h>
58 #include "makefs.h"
60 #include <ufs/ufs/dinode.h>
61 #include <ufs/ufs/ufs_bswap.h>
62 #include <ufs/ffs/fs.h>
64 #include "ffs/buf.h"
65 #include "ffs/ufs_inode.h"
66 #include "ffs/ffs_extern.h"
69 static int scanc(u_int, const u_char *, const u_char *, int);
71 static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
72 static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
73 static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int,
74 daddr_t (*)(struct inode *, int, daddr_t, int));
75 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
77 /* in ffs_tables.c */
78 extern const int inside[], around[];
79 extern const u_char * const fragtbl[];
82 * Allocate a block in the file system.
84 * The size of the requested block is given, which must be some
85 * multiple of fs_fsize and <= fs_bsize.
86 * A preference may be optionally specified. If a preference is given
87 * the following hierarchy is used to allocate a block:
88 * 1) allocate the requested block.
89 * 2) allocate a rotationally optimal block in the same cylinder.
90 * 3) allocate a block in the same cylinder group.
91 * 4) quadradically rehash into other cylinder groups, until an
92 * available block is located.
93 * If no block preference is given the following hierarchy is used
94 * to allocate a block:
95 * 1) allocate a block in the cylinder group that contains the
96 * inode for the file.
97 * 2) quadradically rehash into other cylinder groups, until an
98 * available block is located.
101 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size,
102 daddr_t *bnp)
104 struct fs *fs = ip->i_fs;
105 daddr_t bno;
106 int cg;
108 *bnp = 0;
109 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
110 errx(1, "ffs_alloc: bad size: bsize %d size %d",
111 fs->fs_bsize, size);
113 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
114 goto nospace;
115 if (bpref >= fs->fs_size)
116 bpref = 0;
117 if (bpref == 0)
118 cg = ino_to_cg(fs, ip->i_number);
119 else
120 cg = dtog(fs, bpref);
121 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
122 if (bno > 0) {
123 DIP_ADD(ip, blocks, size / DEV_BSIZE);
124 *bnp = bno;
125 return (0);
127 nospace:
128 return (ENOSPC);
132 * Select the desired position for the next block in a file. The file is
133 * logically divided into sections. The first section is composed of the
134 * direct blocks. Each additional section contains fs_maxbpg blocks.
136 * If no blocks have been allocated in the first section, the policy is to
137 * request a block in the same cylinder group as the inode that describes
138 * the file. If no blocks have been allocated in any other section, the
139 * policy is to place the section in a cylinder group with a greater than
140 * average number of free blocks. An appropriate cylinder group is found
141 * by using a rotor that sweeps the cylinder groups. When a new group of
142 * blocks is needed, the sweep begins in the cylinder group following the
143 * cylinder group from which the previous allocation was made. The sweep
144 * continues until a cylinder group with greater than the average number
145 * of free blocks is found. If the allocation is for the first block in an
146 * indirect block, the information on the previous allocation is unavailable;
147 * here a best guess is made based upon the logical block number being
148 * allocated.
150 * If a section is already partially allocated, the policy is to
151 * contiguously allocate fs_maxcontig blocks. The end of one of these
152 * contiguous blocks and the beginning of the next is physically separated
153 * so that the disk head will be in transit between them for at least
154 * fs_rotdelay milliseconds. This is to allow time for the processor to
155 * schedule another I/O transfer.
157 /* XXX ondisk32 */
158 daddr_t
159 ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
161 struct fs *fs;
162 int cg;
163 int avgbfree, startcg;
165 fs = ip->i_fs;
166 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
167 if (lbn < NDADDR + NINDIR(fs)) {
168 cg = ino_to_cg(fs, ip->i_number);
169 return (fs->fs_fpg * cg + fs->fs_frag);
172 * Find a cylinder with greater than average number of
173 * unused data blocks.
175 if (indx == 0 || bap[indx - 1] == 0)
176 startcg =
177 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
178 else
179 startcg = dtog(fs,
180 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
181 startcg %= fs->fs_ncg;
182 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
183 for (cg = startcg; cg < fs->fs_ncg; cg++)
184 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
185 return (fs->fs_fpg * cg + fs->fs_frag);
186 for (cg = 0; cg <= startcg; cg++)
187 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
188 return (fs->fs_fpg * cg + fs->fs_frag);
189 return (0);
192 * We just always try to lay things out contiguously.
194 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
197 daddr_t
198 ffs_blkpref_ufs2(ip, lbn, indx, bap)
199 struct inode *ip;
200 daddr_t lbn;
201 int indx;
202 int64_t *bap;
204 struct fs *fs;
205 int cg;
206 int avgbfree, startcg;
208 fs = ip->i_fs;
209 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
210 if (lbn < NDADDR + NINDIR(fs)) {
211 cg = ino_to_cg(fs, ip->i_number);
212 return (fs->fs_fpg * cg + fs->fs_frag);
215 * Find a cylinder with greater than average number of
216 * unused data blocks.
218 if (indx == 0 || bap[indx - 1] == 0)
219 startcg =
220 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
221 else
222 startcg = dtog(fs,
223 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
224 startcg %= fs->fs_ncg;
225 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
226 for (cg = startcg; cg < fs->fs_ncg; cg++)
227 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
228 return (fs->fs_fpg * cg + fs->fs_frag);
230 for (cg = 0; cg < startcg; cg++)
231 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
232 return (fs->fs_fpg * cg + fs->fs_frag);
234 return (0);
237 * We just always try to lay things out contiguously.
239 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
243 * Implement the cylinder overflow algorithm.
245 * The policy implemented by this algorithm is:
246 * 1) allocate the block in its requested cylinder group.
247 * 2) quadradically rehash on the cylinder group number.
248 * 3) brute force search for a free block.
250 * `size': size for data blocks, mode for inodes
252 /*VARARGS5*/
253 static daddr_t
254 ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size,
255 daddr_t (*allocator)(struct inode *, int, daddr_t, int))
257 struct fs *fs;
258 daddr_t result;
259 int i, icg = cg;
261 fs = ip->i_fs;
263 * 1: preferred cylinder group
265 result = (*allocator)(ip, cg, pref, size);
266 if (result)
267 return (result);
269 * 2: quadratic rehash
271 for (i = 1; i < fs->fs_ncg; i *= 2) {
272 cg += i;
273 if (cg >= fs->fs_ncg)
274 cg -= fs->fs_ncg;
275 result = (*allocator)(ip, cg, 0, size);
276 if (result)
277 return (result);
280 * 3: brute force search
281 * Note that we start at i == 2, since 0 was checked initially,
282 * and 1 is always checked in the quadratic rehash.
284 cg = (icg + 2) % fs->fs_ncg;
285 for (i = 2; i < fs->fs_ncg; i++) {
286 result = (*allocator)(ip, cg, 0, size);
287 if (result)
288 return (result);
289 cg++;
290 if (cg == fs->fs_ncg)
291 cg = 0;
293 return (0);
297 * Determine whether a block can be allocated.
299 * Check to see if a block of the appropriate size is available,
300 * and if it is, allocate it.
302 static daddr_t
303 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
305 struct cg *cgp;
306 struct buf *bp;
307 daddr_t bno, blkno;
308 int error, frags, allocsiz, i;
309 struct fs *fs = ip->i_fs;
310 const int needswap = UFS_FSNEEDSWAP(fs);
312 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
313 return (0);
314 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
315 (int)fs->fs_cgsize, &bp);
316 if (error) {
317 brelse(bp);
318 return (0);
320 cgp = (struct cg *)bp->b_data;
321 if (!cg_chkmagic(cgp, needswap) ||
322 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
323 brelse(bp);
324 return (0);
326 if (size == fs->fs_bsize) {
327 bno = ffs_alloccgblk(ip, bp, bpref);
328 bdwrite(bp);
329 return (bno);
332 * check to see if any fragments are already available
333 * allocsiz is the size which will be allocated, hacking
334 * it down to a smaller size if necessary
336 frags = numfrags(fs, size);
337 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
338 if (cgp->cg_frsum[allocsiz] != 0)
339 break;
340 if (allocsiz == fs->fs_frag) {
342 * no fragments were available, so a block will be
343 * allocated, and hacked up
345 if (cgp->cg_cs.cs_nbfree == 0) {
346 brelse(bp);
347 return (0);
349 bno = ffs_alloccgblk(ip, bp, bpref);
350 bpref = dtogd(fs, bno);
351 for (i = frags; i < fs->fs_frag; i++)
352 setbit(cg_blksfree(cgp, needswap), bpref + i);
353 i = fs->fs_frag - frags;
354 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
355 fs->fs_cstotal.cs_nffree += i;
356 fs->fs_cs(fs, cg).cs_nffree += i;
357 fs->fs_fmod = 1;
358 ufs_add32(cgp->cg_frsum[i], 1, needswap);
359 bdwrite(bp);
360 return (bno);
362 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
363 for (i = 0; i < frags; i++)
364 clrbit(cg_blksfree(cgp, needswap), bno + i);
365 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
366 fs->fs_cstotal.cs_nffree -= frags;
367 fs->fs_cs(fs, cg).cs_nffree -= frags;
368 fs->fs_fmod = 1;
369 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
370 if (frags != allocsiz)
371 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
372 blkno = cg * fs->fs_fpg + bno;
373 bdwrite(bp);
374 return blkno;
378 * Allocate a block in a cylinder group.
380 * This algorithm implements the following policy:
381 * 1) allocate the requested block.
382 * 2) allocate a rotationally optimal block in the same cylinder.
383 * 3) allocate the next available block on the block rotor for the
384 * specified cylinder group.
385 * Note that this routine only allocates fs_bsize blocks; these
386 * blocks may be fragmented by the routine that allocates them.
388 static daddr_t
389 ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
391 struct cg *cgp;
392 daddr_t blkno;
393 int32_t bno;
394 struct fs *fs = ip->i_fs;
395 const int needswap = UFS_FSNEEDSWAP(fs);
396 u_int8_t *blksfree;
398 cgp = (struct cg *)bp->b_data;
399 blksfree = cg_blksfree(cgp, needswap);
400 if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
401 bpref = ufs_rw32(cgp->cg_rotor, needswap);
402 } else {
403 bpref = blknum(fs, bpref);
404 bno = dtogd(fs, bpref);
406 * if the requested block is available, use it
408 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
409 goto gotit;
412 * Take the next available one in this cylinder group.
414 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
415 if (bno < 0)
416 return (0);
417 cgp->cg_rotor = ufs_rw32(bno, needswap);
418 gotit:
419 blkno = fragstoblks(fs, bno);
420 ffs_clrblock(fs, blksfree, (long)blkno);
421 ffs_clusteracct(fs, cgp, blkno, -1);
422 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
423 fs->fs_cstotal.cs_nbfree--;
424 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
425 fs->fs_fmod = 1;
426 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
427 return (blkno);
431 * Free a block or fragment.
433 * The specified block or fragment is placed back in the
434 * free map. If a fragment is deallocated, a possible
435 * block reassembly is checked.
437 void
438 ffs_blkfree(struct inode *ip, daddr_t bno, long size)
440 struct cg *cgp;
441 struct buf *bp;
442 int32_t fragno, cgbno;
443 int i, error, cg, blk, frags, bbase;
444 struct fs *fs = ip->i_fs;
445 const int needswap = UFS_FSNEEDSWAP(fs);
447 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
448 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
449 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
450 (long long)bno, fs->fs_bsize, size);
452 cg = dtog(fs, bno);
453 if (bno >= fs->fs_size) {
454 warnx("bad block %lld, ino %llu", (long long)bno,
455 (unsigned long long)ip->i_number);
456 return;
458 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
459 (int)fs->fs_cgsize, &bp);
460 if (error) {
461 brelse(bp);
462 return;
464 cgp = (struct cg *)bp->b_data;
465 if (!cg_chkmagic(cgp, needswap)) {
466 brelse(bp);
467 return;
469 cgbno = dtogd(fs, bno);
470 if (size == fs->fs_bsize) {
471 fragno = fragstoblks(fs, cgbno);
472 if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) {
473 errx(1, "blkfree: freeing free block %lld",
474 (long long)bno);
476 ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno);
477 ffs_clusteracct(fs, cgp, fragno, 1);
478 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
479 fs->fs_cstotal.cs_nbfree++;
480 fs->fs_cs(fs, cg).cs_nbfree++;
481 } else {
482 bbase = cgbno - fragnum(fs, cgbno);
484 * decrement the counts associated with the old frags
486 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
487 ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap);
489 * deallocate the fragment
491 frags = numfrags(fs, size);
492 for (i = 0; i < frags; i++) {
493 if (isset(cg_blksfree(cgp, needswap), cgbno + i)) {
494 errx(1, "blkfree: freeing free frag: block %lld",
495 (long long)(cgbno + i));
497 setbit(cg_blksfree(cgp, needswap), cgbno + i);
499 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
500 fs->fs_cstotal.cs_nffree += i;
501 fs->fs_cs(fs, cg).cs_nffree += i;
503 * add back in counts associated with the new frags
505 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
506 ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap);
508 * if a complete block has been reassembled, account for it
510 fragno = fragstoblks(fs, bbase);
511 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) {
512 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
513 fs->fs_cstotal.cs_nffree -= fs->fs_frag;
514 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
515 ffs_clusteracct(fs, cgp, fragno, 1);
516 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
517 fs->fs_cstotal.cs_nbfree++;
518 fs->fs_cs(fs, cg).cs_nbfree++;
521 fs->fs_fmod = 1;
522 bdwrite(bp);
526 static int
527 scanc(u_int size, const u_char *cp, const u_char table[], int mask)
529 const u_char *end = &cp[size];
531 while (cp < end && (table[*cp] & mask) == 0)
532 cp++;
533 return (end - cp);
537 * Find a block of the specified size in the specified cylinder group.
539 * It is a panic if a request is made to find a block if none are
540 * available.
542 static int32_t
543 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
545 int32_t bno;
546 int start, len, loc, i;
547 int blk, field, subfield, pos;
548 int ostart, olen;
549 const int needswap = UFS_FSNEEDSWAP(fs);
552 * find the fragment by searching through the free block
553 * map for an appropriate bit pattern
555 if (bpref)
556 start = dtogd(fs, bpref) / NBBY;
557 else
558 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
559 len = howmany(fs->fs_fpg, NBBY) - start;
560 ostart = start;
561 olen = len;
562 loc = scanc((u_int)len,
563 (const u_char *)&cg_blksfree(cgp, needswap)[start],
564 (const u_char *)fragtbl[fs->fs_frag],
565 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
566 if (loc == 0) {
567 len = start + 1;
568 start = 0;
569 loc = scanc((u_int)len,
570 (const u_char *)&cg_blksfree(cgp, needswap)[0],
571 (const u_char *)fragtbl[fs->fs_frag],
572 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
573 if (loc == 0) {
574 errx(1,
575 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
576 ostart, olen,
577 ufs_rw32(cgp->cg_freeoff, needswap),
578 (long)cg_blksfree(cgp, needswap) - (long)cgp);
579 /* NOTREACHED */
582 bno = (start + len - loc) * NBBY;
583 cgp->cg_frotor = ufs_rw32(bno, needswap);
585 * found the byte in the map
586 * sift through the bits to find the selected frag
588 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
589 blk = blkmap(fs, cg_blksfree(cgp, needswap), bno);
590 blk <<= 1;
591 field = around[allocsiz];
592 subfield = inside[allocsiz];
593 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
594 if ((blk & field) == subfield)
595 return (bno + pos);
596 field <<= 1;
597 subfield <<= 1;
600 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
601 return (-1);
605 * Update the cluster map because of an allocation or free.
607 * Cnt == 1 means free; cnt == -1 means allocating.
609 void
610 ffs_clusteracct(struct fs *fs, struct cg *cgp, int32_t blkno, int cnt)
612 int32_t *sump;
613 int32_t *lp;
614 u_char *freemapp, *mapp;
615 int i, start, end, forw, back, map, bit;
616 const int needswap = UFS_FSNEEDSWAP(fs);
618 if (fs->fs_contigsumsize <= 0)
619 return;
620 freemapp = cg_clustersfree(cgp, needswap);
621 sump = cg_clustersum(cgp, needswap);
623 * Allocate or clear the actual block.
625 if (cnt > 0)
626 setbit(freemapp, blkno);
627 else
628 clrbit(freemapp, blkno);
630 * Find the size of the cluster going forward.
632 start = blkno + 1;
633 end = start + fs->fs_contigsumsize;
634 if (end >= ufs_rw32(cgp->cg_nclusterblks, needswap))
635 end = ufs_rw32(cgp->cg_nclusterblks, needswap);
636 mapp = &freemapp[start / NBBY];
637 map = *mapp++;
638 bit = 1 << (start % NBBY);
639 for (i = start; i < end; i++) {
640 if ((map & bit) == 0)
641 break;
642 if ((i & (NBBY - 1)) != (NBBY - 1)) {
643 bit <<= 1;
644 } else {
645 map = *mapp++;
646 bit = 1;
649 forw = i - start;
651 * Find the size of the cluster going backward.
653 start = blkno - 1;
654 end = start - fs->fs_contigsumsize;
655 if (end < 0)
656 end = -1;
657 mapp = &freemapp[start / NBBY];
658 map = *mapp--;
659 bit = 1 << (start % NBBY);
660 for (i = start; i > end; i--) {
661 if ((map & bit) == 0)
662 break;
663 if ((i & (NBBY - 1)) != 0) {
664 bit >>= 1;
665 } else {
666 map = *mapp--;
667 bit = 1 << (NBBY - 1);
670 back = start - i;
672 * Account for old cluster and the possibly new forward and
673 * back clusters.
675 i = back + forw + 1;
676 if (i > fs->fs_contigsumsize)
677 i = fs->fs_contigsumsize;
678 ufs_add32(sump[i], cnt, needswap);
679 if (back > 0)
680 ufs_add32(sump[back], -cnt, needswap);
681 if (forw > 0)
682 ufs_add32(sump[forw], -cnt, needswap);
685 * Update cluster summary information.
687 lp = &sump[fs->fs_contigsumsize];
688 for (i = fs->fs_contigsumsize; i > 0; i--)
689 if (ufs_rw32(*lp--, needswap) > 0)
690 break;
691 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i;