1 /* $NetBSD: ffs_alloc.c,v 1.123 2009/04/25 08:32:32 sborrill Exp $ */
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 2002 Networks Associates Technology, Inc.
34 * All rights reserved.
36 * This software was developed for the FreeBSD Project by Marshall
37 * Kirk McKusick and Network Associates Laboratories, the Security
38 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
39 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
42 * Copyright (c) 1982, 1986, 1989, 1993
43 * The Regents of the University of California. All rights reserved.
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. Neither the name of the University nor the names of its contributors
54 * may be used to endorse or promote products derived from this software
55 * without specific prior written permission.
57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: ffs_alloc.c,v 1.123 2009/04/25 08:32:32 sborrill Exp $");
75 #if defined(_KERNEL_OPT)
77 #include "opt_quota.h"
80 #include <sys/param.h>
81 #include <sys/systm.h>
83 #include <sys/fstrans.h>
84 #include <sys/kauth.h>
85 #include <sys/kernel.h>
86 #include <sys/mount.h>
88 #include <sys/syslog.h>
89 #include <sys/vnode.h>
90 #include <sys/wapbl.h>
92 #include <miscfs/specfs/specdev.h>
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/ufsmount.h>
95 #include <ufs/ufs/inode.h>
96 #include <ufs/ufs/ufs_extern.h>
97 #include <ufs/ufs/ufs_bswap.h>
98 #include <ufs/ufs/ufs_wapbl.h>
100 #include <ufs/ffs/fs.h>
101 #include <ufs/ffs/ffs_extern.h>
103 static daddr_t
ffs_alloccg(struct inode
*, int, daddr_t
, int, int);
104 static daddr_t
ffs_alloccgblk(struct inode
*, struct buf
*, daddr_t
, int);
105 static ino_t
ffs_dirpref(struct inode
*);
106 static daddr_t
ffs_fragextend(struct inode
*, int, daddr_t
, int, int);
107 static void ffs_fserr(struct fs
*, u_int
, const char *);
108 static daddr_t
ffs_hashalloc(struct inode
*, int, daddr_t
, int, int,
109 daddr_t (*)(struct inode
*, int, daddr_t
, int, int));
110 static daddr_t
ffs_nodealloccg(struct inode
*, int, daddr_t
, int, int);
111 static int32_t ffs_mapsearch(struct fs
*, struct cg
*,
113 static void ffs_blkfree_common(struct ufsmount
*, struct fs
*, dev_t
, struct buf
*,
114 daddr_t
, long, bool);
115 static void ffs_freefile_common(struct ufsmount
*, struct fs
*, dev_t
, struct buf
*, ino_t
,
118 /* if 1, changes in optimalization strategy are logged */
119 int ffs_log_changeopt
= 0;
121 /* in ffs_tables.c */
122 extern const int inside
[], around
[];
123 extern const u_char
* const fragtbl
[];
125 /* Basic consistency check for block allocations */
127 ffs_check_bad_allocation(const char *func
, struct fs
*fs
, daddr_t bno
,
128 long size
, dev_t dev
, ino_t inum
)
130 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0 ||
131 fragnum(fs
, bno
) + numfrags(fs
, size
) > fs
->fs_frag
) {
132 printf("dev = 0x%llx, bno = %" PRId64
" bsize = %d, "
133 "size = %ld, fs = %s\n",
134 (long long)dev
, bno
, fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
135 panic("%s: bad size", func
);
138 if (bno
>= fs
->fs_size
) {
139 printf("bad block %" PRId64
", ino %llu\n", bno
,
140 (unsigned long long)inum
);
141 ffs_fserr(fs
, inum
, "bad block");
148 * Allocate a block in the file system.
150 * The size of the requested block is given, which must be some
151 * multiple of fs_fsize and <= fs_bsize.
152 * A preference may be optionally specified. If a preference is given
153 * the following hierarchy is used to allocate a block:
154 * 1) allocate the requested block.
155 * 2) allocate a rotationally optimal block in the same cylinder.
156 * 3) allocate a block in the same cylinder group.
157 * 4) quadradically rehash into other cylinder groups, until an
158 * available block is located.
159 * If no block preference is given the following hierarchy is used
160 * to allocate a block:
161 * 1) allocate a block in the cylinder group that contains the
162 * inode for the file.
163 * 2) quadradically rehash into other cylinder groups, until an
164 * available block is located.
166 * => called with um_lock held
167 * => releases um_lock before returning
170 ffs_alloc(struct inode
*ip
, daddr_t lbn
, daddr_t bpref
, int size
, int flags
,
171 kauth_cred_t cred
, daddr_t
*bnp
)
173 struct ufsmount
*ump
;
184 KASSERT(mutex_owned(&ump
->um_lock
));
186 #ifdef UVM_PAGE_TRKOWN
187 if (ITOV(ip
)->v_type
== VREG
&&
188 lblktosize(fs
, (voff_t
)lbn
) < round_page(ITOV(ip
)->v_size
)) {
190 struct uvm_object
*uobj
= &ITOV(ip
)->v_uobj
;
191 voff_t off
= trunc_page(lblktosize(fs
, lbn
));
192 voff_t endoff
= round_page(lblktosize(fs
, lbn
) + size
);
194 mutex_enter(&uobj
->vmobjlock
);
195 while (off
< endoff
) {
196 pg
= uvm_pagelookup(uobj
, off
);
198 KASSERT(pg
->owner
== curproc
->p_pid
);
201 mutex_exit(&uobj
->vmobjlock
);
207 if ((u_int
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
208 printf("dev = 0x%llx, bsize = %d, size = %d, fs = %s\n",
209 (unsigned long long)ip
->i_dev
, fs
->fs_bsize
, size
,
211 panic("ffs_alloc: bad size");
214 panic("ffs_alloc: missing credential");
215 #endif /* DIAGNOSTIC */
216 if (size
== fs
->fs_bsize
&& fs
->fs_cstotal
.cs_nbfree
== 0)
218 if (freespace(fs
, fs
->fs_minfree
) <= 0 &&
219 kauth_authorize_system(cred
, KAUTH_SYSTEM_FS_RESERVEDSPACE
, 0, NULL
,
223 mutex_exit(&ump
->um_lock
);
224 if ((error
= chkdq(ip
, btodb(size
), cred
, 0)) != 0)
226 mutex_enter(&ump
->um_lock
);
229 if (bpref
>= fs
->fs_size
)
232 cg
= ino_to_cg(fs
, ip
->i_number
);
234 cg
= dtog(fs
, bpref
);
235 bno
= ffs_hashalloc(ip
, cg
, bpref
, size
, flags
, ffs_alloccg
);
237 DIP_ADD(ip
, blocks
, btodb(size
));
238 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
244 * Restore user's disk quota because allocation failed.
246 (void) chkdq(ip
, -btodb(size
), cred
, FORCE
);
248 if (flags
& B_CONTIG
) {
250 * XXX ump->um_lock handling is "suspect" at best.
251 * For the case where ffs_hashalloc() fails early
252 * in the B_CONTIG case we reach here with um_lock
253 * already unlocked, so we can't release it again
254 * like in the normal error path. See kern/39206.
257 * Fail silently - it's up to our caller to report
263 mutex_exit(&ump
->um_lock
);
264 ffs_fserr(fs
, kauth_cred_geteuid(cred
), "file system full");
265 uprintf("\n%s: write failed, file system is full\n", fs
->fs_fsmnt
);
270 * Reallocate a fragment to a bigger size
272 * The number and size of the old block is given, and a preference
273 * and new size is also specified. The allocator attempts to extend
274 * the original block. Failing that, the regular block allocator is
275 * invoked to get an appropriate block.
277 * => called with um_lock held
278 * => return with um_lock released
281 ffs_realloccg(struct inode
*ip
, daddr_t lbprev
, daddr_t bpref
, int osize
,
282 int nsize
, kauth_cred_t cred
, struct buf
**bpp
, daddr_t
*blknop
)
284 struct ufsmount
*ump
;
287 int cg
, request
, error
;
293 KASSERT(mutex_owned(&ump
->um_lock
));
295 #ifdef UVM_PAGE_TRKOWN
296 if (ITOV(ip
)->v_type
== VREG
) {
298 struct uvm_object
*uobj
= &ITOV(ip
)->v_uobj
;
299 voff_t off
= trunc_page(lblktosize(fs
, lbprev
));
300 voff_t endoff
= round_page(lblktosize(fs
, lbprev
) + osize
);
302 mutex_enter(&uobj
->vmobjlock
);
303 while (off
< endoff
) {
304 pg
= uvm_pagelookup(uobj
, off
);
306 KASSERT(pg
->owner
== curproc
->p_pid
);
307 KASSERT((pg
->flags
& PG_CLEAN
) == 0);
310 mutex_exit(&uobj
->vmobjlock
);
315 if ((u_int
)osize
> fs
->fs_bsize
|| fragoff(fs
, osize
) != 0 ||
316 (u_int
)nsize
> fs
->fs_bsize
|| fragoff(fs
, nsize
) != 0) {
318 "dev = 0x%llx, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
319 (unsigned long long)ip
->i_dev
, fs
->fs_bsize
, osize
, nsize
,
321 panic("ffs_realloccg: bad size");
324 panic("ffs_realloccg: missing credential");
325 #endif /* DIAGNOSTIC */
326 if (freespace(fs
, fs
->fs_minfree
) <= 0 &&
327 kauth_authorize_system(cred
, KAUTH_SYSTEM_FS_RESERVEDSPACE
, 0, NULL
,
329 mutex_exit(&ump
->um_lock
);
332 if (fs
->fs_magic
== FS_UFS2_MAGIC
)
333 bprev
= ufs_rw64(ip
->i_ffs2_db
[lbprev
], UFS_FSNEEDSWAP(fs
));
335 bprev
= ufs_rw32(ip
->i_ffs1_db
[lbprev
], UFS_FSNEEDSWAP(fs
));
338 printf("dev = 0x%llx, bsize = %d, bprev = %" PRId64
", fs = %s\n",
339 (unsigned long long)ip
->i_dev
, fs
->fs_bsize
, bprev
,
341 panic("ffs_realloccg: bad bprev");
343 mutex_exit(&ump
->um_lock
);
346 * Allocate the extra space in the buffer.
349 (error
= bread(ITOV(ip
), lbprev
, osize
, NOCRED
, 0, &bp
)) != 0) {
354 if ((error
= chkdq(ip
, btodb(nsize
- osize
), cred
, 0)) != 0) {
362 * Check for extension in the existing location.
364 cg
= dtog(fs
, bprev
);
365 mutex_enter(&ump
->um_lock
);
366 if ((bno
= ffs_fragextend(ip
, cg
, bprev
, osize
, nsize
)) != 0) {
367 DIP_ADD(ip
, blocks
, btodb(nsize
- osize
));
368 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
371 if (bp
->b_blkno
!= fsbtodb(fs
, bno
))
372 panic("bad blockno");
373 allocbuf(bp
, nsize
, 1);
374 memset((char *)bp
->b_data
+ osize
, 0, nsize
- osize
);
375 mutex_enter(bp
->b_objlock
);
376 KASSERT(!cv_has_waiters(&bp
->b_done
));
377 bp
->b_oflags
|= BO_DONE
;
378 mutex_exit(bp
->b_objlock
);
381 if (blknop
!= NULL
) {
387 * Allocate a new disk location.
389 if (bpref
>= fs
->fs_size
)
391 switch ((int)fs
->fs_optim
) {
394 * Allocate an exact sized fragment. Although this makes
395 * best use of space, we will waste time relocating it if
396 * the file continues to grow. If the fragmentation is
397 * less than half of the minimum free reserve, we choose
398 * to begin optimizing for time.
401 if (fs
->fs_minfree
< 5 ||
402 fs
->fs_cstotal
.cs_nffree
>
403 fs
->fs_dsize
* fs
->fs_minfree
/ (2 * 100))
406 if (ffs_log_changeopt
) {
408 "%s: optimization changed from SPACE to TIME\n",
412 fs
->fs_optim
= FS_OPTTIME
;
416 * At this point we have discovered a file that is trying to
417 * grow a small fragment to a larger fragment. To save time,
418 * we allocate a full sized block, then free the unused portion.
419 * If the file continues to grow, the `ffs_fragextend' call
420 * above will be able to grow it in place without further
421 * copying. If aberrant programs cause disk fragmentation to
422 * grow within 2% of the free reserve, we choose to begin
423 * optimizing for space.
425 request
= fs
->fs_bsize
;
426 if (fs
->fs_cstotal
.cs_nffree
<
427 fs
->fs_dsize
* (fs
->fs_minfree
- 2) / 100)
430 if (ffs_log_changeopt
) {
432 "%s: optimization changed from TIME to SPACE\n",
436 fs
->fs_optim
= FS_OPTSPACE
;
439 printf("dev = 0x%llx, optim = %d, fs = %s\n",
440 (unsigned long long)ip
->i_dev
, fs
->fs_optim
, fs
->fs_fsmnt
);
441 panic("ffs_realloccg: bad optim");
444 bno
= ffs_hashalloc(ip
, cg
, bpref
, request
, 0, ffs_alloccg
);
446 if ((ip
->i_ump
->um_mountp
->mnt_wapbl
) &&
447 (ITOV(ip
)->v_type
!= VREG
)) {
448 UFS_WAPBL_REGISTER_DEALLOCATION(
449 ip
->i_ump
->um_mountp
, fsbtodb(fs
, bprev
),
452 ffs_blkfree(fs
, ip
->i_devvp
, bprev
, (long)osize
,
455 if (nsize
< request
) {
456 if ((ip
->i_ump
->um_mountp
->mnt_wapbl
) &&
457 (ITOV(ip
)->v_type
!= VREG
)) {
458 UFS_WAPBL_REGISTER_DEALLOCATION(
459 ip
->i_ump
->um_mountp
,
460 fsbtodb(fs
, (bno
+ numfrags(fs
, nsize
))),
463 ffs_blkfree(fs
, ip
->i_devvp
,
464 bno
+ numfrags(fs
, nsize
),
465 (long)(request
- nsize
), ip
->i_number
);
467 DIP_ADD(ip
, blocks
, btodb(nsize
- osize
));
468 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
470 bp
->b_blkno
= fsbtodb(fs
, bno
);
471 allocbuf(bp
, nsize
, 1);
472 memset((char *)bp
->b_data
+ osize
, 0, (u_int
)nsize
- osize
);
473 mutex_enter(bp
->b_objlock
);
474 KASSERT(!cv_has_waiters(&bp
->b_done
));
475 bp
->b_oflags
|= BO_DONE
;
476 mutex_exit(bp
->b_objlock
);
479 if (blknop
!= NULL
) {
484 mutex_exit(&ump
->um_lock
);
488 * Restore user's disk quota because allocation failed.
490 (void) chkdq(ip
, -btodb(nsize
- osize
), cred
, FORCE
);
500 ffs_fserr(fs
, kauth_cred_geteuid(cred
), "file system full");
501 uprintf("\n%s: write failed, file system is full\n", fs
->fs_fsmnt
);
506 * Allocate an inode in the file system.
508 * If allocating a directory, use ffs_dirpref to select the inode.
509 * If allocating in a directory, the following hierarchy is followed:
510 * 1) allocate the preferred inode.
511 * 2) allocate an inode in the same cylinder group.
512 * 3) quadradically rehash into other cylinder groups, until an
513 * available inode is located.
514 * If no inode preference is given the following hierarchy is used
515 * to allocate an inode:
516 * 1) allocate an inode in cylinder group 0.
517 * 2) quadradically rehash into other cylinder groups, until an
518 * available inode is located.
520 * => um_lock not held upon entry or return
523 ffs_valloc(struct vnode
*pvp
, int mode
, kauth_cred_t cred
,
526 struct ufsmount
*ump
;
534 UFS_WAPBL_JUNLOCK_ASSERT(pvp
->v_mount
);
541 error
= UFS_WAPBL_BEGIN(pvp
->v_mount
);
545 mutex_enter(&ump
->um_lock
);
546 if (fs
->fs_cstotal
.cs_nifree
== 0)
549 if ((mode
& IFMT
) == IFDIR
)
550 ipref
= ffs_dirpref(pip
);
552 ipref
= pip
->i_number
;
553 if (ipref
>= fs
->fs_ncg
* fs
->fs_ipg
)
555 cg
= ino_to_cg(fs
, ipref
);
557 * Track number of dirs created one after another
558 * in a same cg without intervening by files.
560 if ((mode
& IFMT
) == IFDIR
) {
561 if (fs
->fs_contigdirs
[cg
] < 255)
562 fs
->fs_contigdirs
[cg
]++;
564 if (fs
->fs_contigdirs
[cg
] > 0)
565 fs
->fs_contigdirs
[cg
]--;
567 ino
= (ino_t
)ffs_hashalloc(pip
, cg
, ipref
, mode
, 0, ffs_nodealloccg
);
570 UFS_WAPBL_END(pvp
->v_mount
);
571 error
= VFS_VGET(pvp
->v_mount
, ino
, vpp
);
574 err
= UFS_WAPBL_BEGIN(pvp
->v_mount
);
576 ffs_vfree(pvp
, ino
, mode
);
578 UFS_WAPBL_END(pvp
->v_mount
);
581 KASSERT((*vpp
)->v_type
== VNON
);
585 printf("mode = 0%o, inum = %d, fs = %s\n",
586 ip
->i_mode
, ip
->i_number
, fs
->fs_fsmnt
);
588 printf("dmode %x mode %x dgen %x gen %x\n",
589 DIP(ip
, mode
), ip
->i_mode
,
590 DIP(ip
, gen
), ip
->i_gen
);
591 printf("size %llx blocks %llx\n",
592 (long long)DIP(ip
, size
), (long long)DIP(ip
, blocks
));
593 printf("ino %llu ipref %llu\n", (unsigned long long)ino
,
594 (unsigned long long)ipref
);
596 error
= bread(ump
->um_devvp
, fsbtodb(fs
, ino_to_fsba(fs
, ino
)),
597 (int)fs
->fs_bsize
, NOCRED
, 0, &bp
);
601 panic("ffs_valloc: dup alloc");
603 if (DIP(ip
, blocks
)) { /* XXX */
604 printf("free inode %s/%llu had %" PRId64
" blocks\n",
605 fs
->fs_fsmnt
, (unsigned long long)ino
, DIP(ip
, blocks
));
606 DIP_ASSIGN(ip
, blocks
, 0);
608 ip
->i_flag
&= ~IN_SPACECOUNTED
;
610 DIP_ASSIGN(ip
, flags
, 0);
612 * Set up a new generation number for this inode.
615 DIP_ASSIGN(ip
, gen
, ip
->i_gen
);
616 if (fs
->fs_magic
== FS_UFS2_MAGIC
) {
618 ip
->i_ffs2_birthtime
= ts
.tv_sec
;
619 ip
->i_ffs2_birthnsec
= ts
.tv_nsec
;
623 mutex_exit(&ump
->um_lock
);
624 UFS_WAPBL_END(pvp
->v_mount
);
625 ffs_fserr(fs
, kauth_cred_geteuid(cred
), "out of inodes");
626 uprintf("\n%s: create/symlink failed, no inodes free\n", fs
->fs_fsmnt
);
631 * Find a cylinder group in which to place a directory.
633 * The policy implemented by this algorithm is to allocate a
634 * directory inode in the same cylinder group as its parent
635 * directory, but also to reserve space for its files inodes
636 * and data. Restrict the number of directories which may be
637 * allocated one after another in the same cylinder group
638 * without intervening allocation of files.
640 * If we allocate a first level directory then force allocation
641 * in another cylinder group.
644 ffs_dirpref(struct inode
*pip
)
646 register struct fs
*fs
;
648 int64_t dirsize
, cgsize
, curdsz
;
649 int avgifree
, avgbfree
, avgndir
;
650 int minifree
, minbfree
, maxndir
;
654 KASSERT(mutex_owned(&pip
->i_ump
->um_lock
));
658 avgifree
= fs
->fs_cstotal
.cs_nifree
/ fs
->fs_ncg
;
659 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
660 avgndir
= fs
->fs_cstotal
.cs_ndir
/ fs
->fs_ncg
;
663 * Force allocation in another cg if creating a first level dir.
665 if (ITOV(pip
)->v_vflag
& VV_ROOT
) {
666 prefcg
= random() % fs
->fs_ncg
;
668 minndir
= fs
->fs_ipg
;
669 for (cg
= prefcg
; cg
< fs
->fs_ncg
; cg
++)
670 if (fs
->fs_cs(fs
, cg
).cs_ndir
< minndir
&&
671 fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
&&
672 fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
674 minndir
= fs
->fs_cs(fs
, cg
).cs_ndir
;
676 for (cg
= 0; cg
< prefcg
; cg
++)
677 if (fs
->fs_cs(fs
, cg
).cs_ndir
< minndir
&&
678 fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
&&
679 fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
681 minndir
= fs
->fs_cs(fs
, cg
).cs_ndir
;
683 return ((ino_t
)(fs
->fs_ipg
* mincg
));
687 * Count various limits which used for
688 * optimal allocation of a directory inode.
690 maxndir
= min(avgndir
+ fs
->fs_ipg
/ 16, fs
->fs_ipg
);
691 minifree
= avgifree
- fs
->fs_ipg
/ 4;
694 minbfree
= avgbfree
- fragstoblks(fs
, fs
->fs_fpg
) / 4;
697 cgsize
= (int64_t)fs
->fs_fsize
* fs
->fs_fpg
;
698 dirsize
= (int64_t)fs
->fs_avgfilesize
* fs
->fs_avgfpdir
;
700 curdsz
= (cgsize
- (int64_t)avgbfree
* fs
->fs_bsize
) / avgndir
;
701 if (dirsize
< curdsz
)
704 if (cgsize
< dirsize
* 255)
705 maxcontigdirs
= cgsize
/ dirsize
;
708 if (fs
->fs_avgfpdir
> 0)
709 maxcontigdirs
= min(maxcontigdirs
,
710 fs
->fs_ipg
/ fs
->fs_avgfpdir
);
711 if (maxcontigdirs
== 0)
715 * Limit number of dirs in one cg and reserve space for
716 * regular files, but only if we have no deficit in
719 prefcg
= ino_to_cg(fs
, pip
->i_number
);
720 for (cg
= prefcg
; cg
< fs
->fs_ncg
; cg
++)
721 if (fs
->fs_cs(fs
, cg
).cs_ndir
< maxndir
&&
722 fs
->fs_cs(fs
, cg
).cs_nifree
>= minifree
&&
723 fs
->fs_cs(fs
, cg
).cs_nbfree
>= minbfree
) {
724 if (fs
->fs_contigdirs
[cg
] < maxcontigdirs
)
725 return ((ino_t
)(fs
->fs_ipg
* cg
));
727 for (cg
= 0; cg
< prefcg
; cg
++)
728 if (fs
->fs_cs(fs
, cg
).cs_ndir
< maxndir
&&
729 fs
->fs_cs(fs
, cg
).cs_nifree
>= minifree
&&
730 fs
->fs_cs(fs
, cg
).cs_nbfree
>= minbfree
) {
731 if (fs
->fs_contigdirs
[cg
] < maxcontigdirs
)
732 return ((ino_t
)(fs
->fs_ipg
* cg
));
735 * This is a backstop when we are deficient in space.
737 for (cg
= prefcg
; cg
< fs
->fs_ncg
; cg
++)
738 if (fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
)
739 return ((ino_t
)(fs
->fs_ipg
* cg
));
740 for (cg
= 0; cg
< prefcg
; cg
++)
741 if (fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
)
743 return ((ino_t
)(fs
->fs_ipg
* cg
));
747 * Select the desired position for the next block in a file. The file is
748 * logically divided into sections. The first section is composed of the
749 * direct blocks. Each additional section contains fs_maxbpg blocks.
751 * If no blocks have been allocated in the first section, the policy is to
752 * request a block in the same cylinder group as the inode that describes
753 * the file. If no blocks have been allocated in any other section, the
754 * policy is to place the section in a cylinder group with a greater than
755 * average number of free blocks. An appropriate cylinder group is found
756 * by using a rotor that sweeps the cylinder groups. When a new group of
757 * blocks is needed, the sweep begins in the cylinder group following the
758 * cylinder group from which the previous allocation was made. The sweep
759 * continues until a cylinder group with greater than the average number
760 * of free blocks is found. If the allocation is for the first block in an
761 * indirect block, the information on the previous allocation is unavailable;
762 * here a best guess is made based upon the logical block number being
765 * If a section is already partially allocated, the policy is to
766 * contiguously allocate fs_maxcontig blocks. The end of one of these
767 * contiguous blocks and the beginning of the next is laid out
768 * contigously if possible.
770 * => um_lock held on entry and exit
773 ffs_blkpref_ufs1(struct inode
*ip
, daddr_t lbn
, int indx
, int flags
,
774 int32_t *bap
/* XXX ondisk32 */)
778 int avgbfree
, startcg
;
780 KASSERT(mutex_owned(&ip
->i_ump
->um_lock
));
785 * If allocating a contiguous file with B_CONTIG, use the hints
786 * in the inode extentions to return the desired block.
788 * For metadata (indirect blocks) return the address of where
789 * the first indirect block resides - we'll scan for the next
790 * available slot if we need to allocate more than one indirect
791 * block. For data, return the address of the actual block
792 * relative to the address of the first data block.
794 if (flags
& B_CONTIG
) {
795 KASSERT(ip
->i_ffs_first_data_blk
!= 0);
796 KASSERT(ip
->i_ffs_first_indir_blk
!= 0);
797 if (flags
& B_METAONLY
)
798 return ip
->i_ffs_first_indir_blk
;
800 return ip
->i_ffs_first_data_blk
+ blkstofrags(fs
, lbn
);
803 if (indx
% fs
->fs_maxbpg
== 0 || bap
[indx
- 1] == 0) {
804 if (lbn
< NDADDR
+ NINDIR(fs
)) {
805 cg
= ino_to_cg(fs
, ip
->i_number
);
806 return (cgbase(fs
, cg
) + fs
->fs_frag
);
809 * Find a cylinder with greater than average number of
810 * unused data blocks.
812 if (indx
== 0 || bap
[indx
- 1] == 0)
814 ino_to_cg(fs
, ip
->i_number
) + lbn
/ fs
->fs_maxbpg
;
817 ufs_rw32(bap
[indx
- 1], UFS_FSNEEDSWAP(fs
)) + 1);
818 startcg
%= fs
->fs_ncg
;
819 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
820 for (cg
= startcg
; cg
< fs
->fs_ncg
; cg
++)
821 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
822 return (cgbase(fs
, cg
) + fs
->fs_frag
);
824 for (cg
= 0; cg
< startcg
; cg
++)
825 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
826 return (cgbase(fs
, cg
) + fs
->fs_frag
);
831 * We just always try to lay things out contiguously.
833 return ufs_rw32(bap
[indx
- 1], UFS_FSNEEDSWAP(fs
)) + fs
->fs_frag
;
837 ffs_blkpref_ufs2(struct inode
*ip
, daddr_t lbn
, int indx
, int flags
,
842 int avgbfree
, startcg
;
844 KASSERT(mutex_owned(&ip
->i_ump
->um_lock
));
849 * If allocating a contiguous file with B_CONTIG, use the hints
850 * in the inode extentions to return the desired block.
852 * For metadata (indirect blocks) return the address of where
853 * the first indirect block resides - we'll scan for the next
854 * available slot if we need to allocate more than one indirect
855 * block. For data, return the address of the actual block
856 * relative to the address of the first data block.
858 if (flags
& B_CONTIG
) {
859 KASSERT(ip
->i_ffs_first_data_blk
!= 0);
860 KASSERT(ip
->i_ffs_first_indir_blk
!= 0);
861 if (flags
& B_METAONLY
)
862 return ip
->i_ffs_first_indir_blk
;
864 return ip
->i_ffs_first_data_blk
+ blkstofrags(fs
, lbn
);
867 if (indx
% fs
->fs_maxbpg
== 0 || bap
[indx
- 1] == 0) {
868 if (lbn
< NDADDR
+ NINDIR(fs
)) {
869 cg
= ino_to_cg(fs
, ip
->i_number
);
870 return (cgbase(fs
, cg
) + fs
->fs_frag
);
873 * Find a cylinder with greater than average number of
874 * unused data blocks.
876 if (indx
== 0 || bap
[indx
- 1] == 0)
878 ino_to_cg(fs
, ip
->i_number
) + lbn
/ fs
->fs_maxbpg
;
881 ufs_rw64(bap
[indx
- 1], UFS_FSNEEDSWAP(fs
)) + 1);
882 startcg
%= fs
->fs_ncg
;
883 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
884 for (cg
= startcg
; cg
< fs
->fs_ncg
; cg
++)
885 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
886 return (cgbase(fs
, cg
) + fs
->fs_frag
);
888 for (cg
= 0; cg
< startcg
; cg
++)
889 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
890 return (cgbase(fs
, cg
) + fs
->fs_frag
);
895 * We just always try to lay things out contiguously.
897 return ufs_rw64(bap
[indx
- 1], UFS_FSNEEDSWAP(fs
)) + fs
->fs_frag
;
902 * Implement the cylinder overflow algorithm.
904 * The policy implemented by this algorithm is:
905 * 1) allocate the block in its requested cylinder group.
906 * 2) quadradically rehash on the cylinder group number.
907 * 3) brute force search for a free block.
909 * => called with um_lock held
910 * => returns with um_lock released on success, held on failure
911 * (*allocator releases lock on success, retains lock on failure)
915 ffs_hashalloc(struct inode
*ip
, int cg
, daddr_t pref
,
916 int size
/* size for data blocks, mode for inodes */,
917 int flags
, daddr_t (*allocator
)(struct inode
*, int, daddr_t
, int, int))
925 * 1: preferred cylinder group
927 result
= (*allocator
)(ip
, cg
, pref
, size
, flags
);
931 if (flags
& B_CONTIG
)
934 * 2: quadratic rehash
936 for (i
= 1; i
< fs
->fs_ncg
; i
*= 2) {
938 if (cg
>= fs
->fs_ncg
)
940 result
= (*allocator
)(ip
, cg
, 0, size
, flags
);
945 * 3: brute force search
946 * Note that we start at i == 2, since 0 was checked initially,
947 * and 1 is always checked in the quadratic rehash.
949 cg
= (icg
+ 2) % fs
->fs_ncg
;
950 for (i
= 2; i
< fs
->fs_ncg
; i
++) {
951 result
= (*allocator
)(ip
, cg
, 0, size
, flags
);
955 if (cg
== fs
->fs_ncg
)
962 * Determine whether a fragment can be extended.
964 * Check to see if the necessary fragments are available, and
965 * if they are, allocate them.
967 * => called with um_lock held
968 * => returns with um_lock released on success, held on failure
971 ffs_fragextend(struct inode
*ip
, int cg
, daddr_t bprev
, int osize
, int nsize
)
973 struct ufsmount
*ump
;
985 KASSERT(mutex_owned(&ump
->um_lock
));
987 if (fs
->fs_cs(fs
, cg
).cs_nffree
< numfrags(fs
, nsize
- osize
))
989 frags
= numfrags(fs
, nsize
);
990 bbase
= fragnum(fs
, bprev
);
991 if (bbase
> fragnum(fs
, (bprev
+ frags
- 1))) {
992 /* cannot extend across a block boundary */
995 mutex_exit(&ump
->um_lock
);
996 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
997 (int)fs
->fs_cgsize
, NOCRED
, B_MODIFY
, &bp
);
1000 cgp
= (struct cg
*)bp
->b_data
;
1001 if (!cg_chkmagic(cgp
, UFS_FSNEEDSWAP(fs
)))
1003 cgp
->cg_old_time
= ufs_rw32(time_second
, UFS_FSNEEDSWAP(fs
));
1004 if ((fs
->fs_magic
!= FS_UFS1_MAGIC
) ||
1005 (fs
->fs_old_flags
& FS_FLAGS_UPDATED
))
1006 cgp
->cg_time
= ufs_rw64(time_second
, UFS_FSNEEDSWAP(fs
));
1007 bno
= dtogd(fs
, bprev
);
1008 blksfree
= cg_blksfree(cgp
, UFS_FSNEEDSWAP(fs
));
1009 for (i
= numfrags(fs
, osize
); i
< frags
; i
++)
1010 if (isclr(blksfree
, bno
+ i
))
1013 * the current fragment can be extended
1014 * deduct the count on fragment being extended into
1015 * increase the count on the remaining fragment (if any)
1016 * allocate the extended piece
1018 for (i
= frags
; i
< fs
->fs_frag
- bbase
; i
++)
1019 if (isclr(blksfree
, bno
+ i
))
1021 ufs_add32(cgp
->cg_frsum
[i
- numfrags(fs
, osize
)], -1, UFS_FSNEEDSWAP(fs
));
1023 ufs_add32(cgp
->cg_frsum
[i
- frags
], 1, UFS_FSNEEDSWAP(fs
));
1024 mutex_enter(&ump
->um_lock
);
1025 for (i
= numfrags(fs
, osize
); i
< frags
; i
++) {
1026 clrbit(blksfree
, bno
+ i
);
1027 ufs_add32(cgp
->cg_cs
.cs_nffree
, -1, UFS_FSNEEDSWAP(fs
));
1028 fs
->fs_cstotal
.cs_nffree
--;
1029 fs
->fs_cs(fs
, cg
).cs_nffree
--;
1032 ACTIVECG_CLR(fs
, cg
);
1033 mutex_exit(&ump
->um_lock
);
1039 mutex_enter(&ump
->um_lock
);
1044 * Determine whether a block can be allocated.
1046 * Check to see if a block of the appropriate size is available,
1047 * and if it is, allocate it.
1050 ffs_alloccg(struct inode
*ip
, int cg
, daddr_t bpref
, int size
, int flags
)
1052 struct ufsmount
*ump
;
1053 struct fs
*fs
= ip
->i_fs
;
1058 int error
, frags
, allocsiz
, i
;
1061 const int needswap
= UFS_FSNEEDSWAP(fs
);
1066 KASSERT(mutex_owned(&ump
->um_lock
));
1068 if (fs
->fs_cs(fs
, cg
).cs_nbfree
== 0 && size
== fs
->fs_bsize
)
1070 mutex_exit(&ump
->um_lock
);
1071 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1072 (int)fs
->fs_cgsize
, NOCRED
, B_MODIFY
, &bp
);
1075 cgp
= (struct cg
*)bp
->b_data
;
1076 if (!cg_chkmagic(cgp
, needswap
) ||
1077 (cgp
->cg_cs
.cs_nbfree
== 0 && size
== fs
->fs_bsize
))
1079 cgp
->cg_old_time
= ufs_rw32(time_second
, needswap
);
1080 if ((fs
->fs_magic
!= FS_UFS1_MAGIC
) ||
1081 (fs
->fs_old_flags
& FS_FLAGS_UPDATED
))
1082 cgp
->cg_time
= ufs_rw64(time_second
, needswap
);
1083 if (size
== fs
->fs_bsize
) {
1084 mutex_enter(&ump
->um_lock
);
1085 blkno
= ffs_alloccgblk(ip
, bp
, bpref
, flags
);
1086 ACTIVECG_CLR(fs
, cg
);
1087 mutex_exit(&ump
->um_lock
);
1092 * check to see if any fragments are already available
1093 * allocsiz is the size which will be allocated, hacking
1094 * it down to a smaller size if necessary
1096 blksfree
= cg_blksfree(cgp
, needswap
);
1097 frags
= numfrags(fs
, size
);
1098 for (allocsiz
= frags
; allocsiz
< fs
->fs_frag
; allocsiz
++)
1099 if (cgp
->cg_frsum
[allocsiz
] != 0)
1101 if (allocsiz
== fs
->fs_frag
) {
1103 * no fragments were available, so a block will be
1104 * allocated, and hacked up
1106 if (cgp
->cg_cs
.cs_nbfree
== 0)
1108 mutex_enter(&ump
->um_lock
);
1109 blkno
= ffs_alloccgblk(ip
, bp
, bpref
, flags
);
1110 bno
= dtogd(fs
, blkno
);
1111 for (i
= frags
; i
< fs
->fs_frag
; i
++)
1112 setbit(blksfree
, bno
+ i
);
1113 i
= fs
->fs_frag
- frags
;
1114 ufs_add32(cgp
->cg_cs
.cs_nffree
, i
, needswap
);
1115 fs
->fs_cstotal
.cs_nffree
+= i
;
1116 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
1118 ufs_add32(cgp
->cg_frsum
[i
], 1, needswap
);
1119 ACTIVECG_CLR(fs
, cg
);
1120 mutex_exit(&ump
->um_lock
);
1124 bno
= ffs_mapsearch(fs
, cgp
, bpref
, allocsiz
);
1127 * XXX fvdl mapsearch will panic, and never return -1
1128 * also: returning NULL as daddr_t ?
1133 for (i
= 0; i
< frags
; i
++)
1134 clrbit(blksfree
, bno
+ i
);
1135 mutex_enter(&ump
->um_lock
);
1136 ufs_add32(cgp
->cg_cs
.cs_nffree
, -frags
, needswap
);
1137 fs
->fs_cstotal
.cs_nffree
-= frags
;
1138 fs
->fs_cs(fs
, cg
).cs_nffree
-= frags
;
1140 ufs_add32(cgp
->cg_frsum
[allocsiz
], -1, needswap
);
1141 if (frags
!= allocsiz
)
1142 ufs_add32(cgp
->cg_frsum
[allocsiz
- frags
], 1, needswap
);
1143 blkno
= cgbase(fs
, cg
) + bno
;
1144 ACTIVECG_CLR(fs
, cg
);
1145 mutex_exit(&ump
->um_lock
);
1151 mutex_enter(&ump
->um_lock
);
1156 * Allocate a block in a cylinder group.
1158 * This algorithm implements the following policy:
1159 * 1) allocate the requested block.
1160 * 2) allocate a rotationally optimal block in the same cylinder.
1161 * 3) allocate the next available block on the block rotor for the
1162 * specified cylinder group.
1163 * Note that this routine only allocates fs_bsize blocks; these
1164 * blocks may be fragmented by the routine that allocates them.
1167 ffs_alloccgblk(struct inode
*ip
, struct buf
*bp
, daddr_t bpref
, int flags
)
1169 struct ufsmount
*ump
;
1170 struct fs
*fs
= ip
->i_fs
;
1177 const int needswap
= UFS_FSNEEDSWAP(fs
);
1182 KASSERT(mutex_owned(&ump
->um_lock
));
1184 cgp
= (struct cg
*)bp
->b_data
;
1185 blksfree
= cg_blksfree(cgp
, needswap
);
1186 if (bpref
== 0 || dtog(fs
, bpref
) != ufs_rw32(cgp
->cg_cgx
, needswap
)) {
1187 bpref
= ufs_rw32(cgp
->cg_rotor
, needswap
);
1189 bpref
= blknum(fs
, bpref
);
1190 bno
= dtogd(fs
, bpref
);
1192 * if the requested block is available, use it
1194 if (ffs_isblock(fs
, blksfree
, fragstoblks(fs
, bno
)))
1197 * if the requested data block isn't available and we are
1198 * trying to allocate a contiguous file, return an error.
1200 if ((flags
& (B_CONTIG
| B_METAONLY
)) == B_CONTIG
)
1205 * Take the next available block in this cylinder group.
1207 bno
= ffs_mapsearch(fs
, cgp
, bpref
, (int)fs
->fs_frag
);
1210 cgp
->cg_rotor
= ufs_rw32(bno
, needswap
);
1212 blkno
= fragstoblks(fs
, bno
);
1213 ffs_clrblock(fs
, blksfree
, blkno
);
1214 ffs_clusteracct(fs
, cgp
, blkno
, -1);
1215 ufs_add32(cgp
->cg_cs
.cs_nbfree
, -1, needswap
);
1216 fs
->fs_cstotal
.cs_nbfree
--;
1217 fs
->fs_cs(fs
, ufs_rw32(cgp
->cg_cgx
, needswap
)).cs_nbfree
--;
1218 if ((fs
->fs_magic
== FS_UFS1_MAGIC
) &&
1219 ((fs
->fs_old_flags
& FS_FLAGS_UPDATED
) == 0)) {
1221 cylno
= old_cbtocylno(fs
, bno
);
1222 KASSERT(cylno
>= 0);
1223 KASSERT(cylno
< fs
->fs_old_ncyl
);
1224 KASSERT(old_cbtorpos(fs
, bno
) >= 0);
1225 KASSERT(fs
->fs_old_nrpos
== 0 || old_cbtorpos(fs
, bno
) < fs
->fs_old_nrpos
);
1226 ufs_add16(old_cg_blks(fs
, cgp
, cylno
, needswap
)[old_cbtorpos(fs
, bno
)], -1,
1228 ufs_add32(old_cg_blktot(cgp
, needswap
)[cylno
], -1, needswap
);
1231 cg
= ufs_rw32(cgp
->cg_cgx
, needswap
);
1232 blkno
= cgbase(fs
, cg
) + bno
;
1237 * Determine whether an inode can be allocated.
1239 * Check to see if an inode is available, and if it is,
1240 * allocate it using the following policy:
1241 * 1) allocate the requested inode.
1242 * 2) allocate the next available inode after the requested
1243 * inode in the specified cylinder group.
1246 ffs_nodealloccg(struct inode
*ip
, int cg
, daddr_t ipref
, int mode
, int flags
)
1248 struct ufsmount
*ump
= ip
->i_ump
;
1249 struct fs
*fs
= ip
->i_fs
;
1251 struct buf
*bp
, *ibp
;
1253 int error
, start
, len
, loc
, map
, i
;
1256 struct ufs2_dinode
*dp2
;
1258 const int needswap
= UFS_FSNEEDSWAP(fs
);
1261 KASSERT(mutex_owned(&ump
->um_lock
));
1262 UFS_WAPBL_JLOCK_ASSERT(ip
->i_ump
->um_mountp
);
1264 if (fs
->fs_cs(fs
, cg
).cs_nifree
== 0)
1266 mutex_exit(&ump
->um_lock
);
1270 error
= bread(ip
->i_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1271 (int)fs
->fs_cgsize
, NOCRED
, B_MODIFY
, &bp
);
1274 cgp
= (struct cg
*)bp
->b_data
;
1275 if (!cg_chkmagic(cgp
, needswap
) || cgp
->cg_cs
.cs_nifree
== 0)
1279 initediblk
!= ufs_rw32(cgp
->cg_initediblk
, needswap
)) {
1280 /* Another thread allocated more inodes so we retry the test. */
1285 * Check to see if we need to initialize more inodes.
1287 if (fs
->fs_magic
== FS_UFS2_MAGIC
&& ibp
== NULL
) {
1288 initediblk
= ufs_rw32(cgp
->cg_initediblk
, needswap
);
1289 nalloc
= fs
->fs_ipg
- ufs_rw32(cgp
->cg_cs
.cs_nifree
, needswap
);
1290 if (nalloc
+ INOPB(fs
) > initediblk
&&
1291 initediblk
< ufs_rw32(cgp
->cg_niblk
, needswap
)) {
1293 * We have to release the cg buffer here to prevent
1294 * a deadlock when reading the inode block will
1295 * run a copy-on-write that might use this cg.
1299 error
= ffs_getblk(ip
->i_devvp
, fsbtodb(fs
,
1300 ino_to_fsba(fs
, cg
* fs
->fs_ipg
+ initediblk
)),
1301 FFS_NOBLK
, fs
->fs_bsize
, false, &ibp
);
1308 cgp
->cg_old_time
= ufs_rw32(time_second
, needswap
);
1309 if ((fs
->fs_magic
!= FS_UFS1_MAGIC
) ||
1310 (fs
->fs_old_flags
& FS_FLAGS_UPDATED
))
1311 cgp
->cg_time
= ufs_rw64(time_second
, needswap
);
1312 inosused
= cg_inosused(cgp
, needswap
);
1314 ipref
%= fs
->fs_ipg
;
1315 if (isclr(inosused
, ipref
))
1318 start
= ufs_rw32(cgp
->cg_irotor
, needswap
) / NBBY
;
1319 len
= howmany(fs
->fs_ipg
- ufs_rw32(cgp
->cg_irotor
, needswap
),
1321 loc
= skpc(0xff, len
, &inosused
[start
]);
1325 loc
= skpc(0xff, len
, &inosused
[0]);
1327 printf("cg = %d, irotor = %d, fs = %s\n",
1328 cg
, ufs_rw32(cgp
->cg_irotor
, needswap
),
1330 panic("ffs_nodealloccg: map corrupted");
1334 i
= start
+ len
- loc
;
1337 for (i
= 1; i
< (1 << NBBY
); i
<<= 1, ipref
++) {
1338 if ((map
& i
) == 0) {
1339 cgp
->cg_irotor
= ufs_rw32(ipref
, needswap
);
1343 printf("fs = %s\n", fs
->fs_fsmnt
);
1344 panic("ffs_nodealloccg: block not in map");
1347 UFS_WAPBL_REGISTER_INODE(ip
->i_ump
->um_mountp
, cg
* fs
->fs_ipg
+ ipref
,
1350 * Check to see if we need to initialize more inodes.
1353 KASSERT(initediblk
== ufs_rw32(cgp
->cg_initediblk
, needswap
));
1354 memset(ibp
->b_data
, 0, fs
->fs_bsize
);
1355 dp2
= (struct ufs2_dinode
*)(ibp
->b_data
);
1356 for (i
= 0; i
< INOPB(fs
); i
++) {
1358 * Don't bother to swap, it's supposed to be
1359 * random, after all.
1361 dp2
->di_gen
= (arc4random() & INT32_MAX
) / 2 + 1;
1364 initediblk
+= INOPB(fs
);
1365 cgp
->cg_initediblk
= ufs_rw32(initediblk
, needswap
);
1368 mutex_enter(&ump
->um_lock
);
1369 ACTIVECG_CLR(fs
, cg
);
1370 setbit(inosused
, ipref
);
1371 ufs_add32(cgp
->cg_cs
.cs_nifree
, -1, needswap
);
1372 fs
->fs_cstotal
.cs_nifree
--;
1373 fs
->fs_cs(fs
, cg
).cs_nifree
--;
1375 if ((mode
& IFMT
) == IFDIR
) {
1376 ufs_add32(cgp
->cg_cs
.cs_ndir
, 1, needswap
);
1377 fs
->fs_cstotal
.cs_ndir
++;
1378 fs
->fs_cs(fs
, cg
).cs_ndir
++;
1380 mutex_exit(&ump
->um_lock
);
1386 return (cg
* fs
->fs_ipg
+ ipref
);
1392 mutex_enter(&ump
->um_lock
);
1397 * Allocate a block or fragment.
1399 * The specified block or fragment is removed from the
1400 * free map, possibly fragmenting a block in the process.
1402 * This implementation should mirror fs_blkfree
1404 * => um_lock not held on entry or exit
1407 ffs_blkalloc(struct inode
*ip
, daddr_t bno
, long size
)
1411 error
= ffs_check_bad_allocation(__func__
, ip
->i_fs
, bno
, size
,
1412 ip
->i_dev
, ip
->i_uid
);
1416 return ffs_blkalloc_ump(ip
->i_ump
, bno
, size
);
1420 ffs_blkalloc_ump(struct ufsmount
*ump
, daddr_t bno
, long size
)
1422 struct fs
*fs
= ump
->um_fs
;
1425 int32_t fragno
, cgbno
;
1426 int i
, error
, cg
, blk
, frags
, bbase
;
1428 const int needswap
= UFS_FSNEEDSWAP(fs
);
1430 KASSERT((u_int
)size
<= fs
->fs_bsize
&& fragoff(fs
, size
) == 0 &&
1431 fragnum(fs
, bno
) + numfrags(fs
, size
) <= fs
->fs_frag
);
1432 KASSERT(bno
< fs
->fs_size
);
1435 error
= bread(ump
->um_devvp
, fsbtodb(fs
, cgtod(fs
, cg
)),
1436 (int)fs
->fs_cgsize
, NOCRED
, B_MODIFY
, &bp
);
1441 cgp
= (struct cg
*)bp
->b_data
;
1442 if (!cg_chkmagic(cgp
, needswap
)) {
1446 cgp
->cg_old_time
= ufs_rw32(time_second
, needswap
);
1447 cgp
->cg_time
= ufs_rw64(time_second
, needswap
);
1448 cgbno
= dtogd(fs
, bno
);
1449 blksfree
= cg_blksfree(cgp
, needswap
);
1451 mutex_enter(&ump
->um_lock
);
1452 if (size
== fs
->fs_bsize
) {
1453 fragno
= fragstoblks(fs
, cgbno
);
1454 if (!ffs_isblock(fs
, blksfree
, fragno
)) {
1455 mutex_exit(&ump
->um_lock
);
1459 ffs_clrblock(fs
, blksfree
, fragno
);
1460 ffs_clusteracct(fs
, cgp
, fragno
, -1);
1461 ufs_add32(cgp
->cg_cs
.cs_nbfree
, -1, needswap
);
1462 fs
->fs_cstotal
.cs_nbfree
--;
1463 fs
->fs_cs(fs
, cg
).cs_nbfree
--;
1465 bbase
= cgbno
- fragnum(fs
, cgbno
);
1467 frags
= numfrags(fs
, size
);
1468 for (i
= 0; i
< frags
; i
++) {
1469 if (isclr(blksfree
, cgbno
+ i
)) {
1470 mutex_exit(&ump
->um_lock
);
1476 * if a complete block is being split, account for it
1478 fragno
= fragstoblks(fs
, bbase
);
1479 if (ffs_isblock(fs
, blksfree
, fragno
)) {
1480 ufs_add32(cgp
->cg_cs
.cs_nffree
, fs
->fs_frag
, needswap
);
1481 fs
->fs_cstotal
.cs_nffree
+= fs
->fs_frag
;
1482 fs
->fs_cs(fs
, cg
).cs_nffree
+= fs
->fs_frag
;
1483 ffs_clusteracct(fs
, cgp
, fragno
, -1);
1484 ufs_add32(cgp
->cg_cs
.cs_nbfree
, -1, needswap
);
1485 fs
->fs_cstotal
.cs_nbfree
--;
1486 fs
->fs_cs(fs
, cg
).cs_nbfree
--;
1489 * decrement the counts associated with the old frags
1491 blk
= blkmap(fs
, blksfree
, bbase
);
1492 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, -1, needswap
);
1494 * allocate the fragment
1496 for (i
= 0; i
< frags
; i
++) {
1497 clrbit(blksfree
, cgbno
+ i
);
1499 ufs_add32(cgp
->cg_cs
.cs_nffree
, -i
, needswap
);
1500 fs
->fs_cstotal
.cs_nffree
-= i
;
1501 fs
->fs_cs(fs
, cg
).cs_nffree
-= i
;
1503 * add back in counts associated with the new frags
1505 blk
= blkmap(fs
, blksfree
, bbase
);
1506 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, 1, needswap
);
1509 ACTIVECG_CLR(fs
, cg
);
1510 mutex_exit(&ump
->um_lock
);
1516 * Free a block or fragment.
1518 * The specified block or fragment is placed back in the
1519 * free map. If a fragment is deallocated, a possible
1520 * block reassembly is checked.
1522 * => um_lock not held on entry or exit
1525 ffs_blkfree(struct fs
*fs
, struct vnode
*devvp
, daddr_t bno
, long size
,
1530 struct ufsmount
*ump
;
1534 const bool devvp_is_snapshot
= (devvp
->v_type
!= VBLK
);
1536 const int needswap
= UFS_FSNEEDSWAP(fs
);
1539 KASSERT(!devvp_is_snapshot
);
1542 dev
= devvp
->v_rdev
;
1543 ump
= VFSTOUFS(devvp
->v_specmountpoint
);
1544 KASSERT(fs
== ump
->um_fs
);
1545 cgblkno
= fsbtodb(fs
, cgtod(fs
, cg
));
1546 if (ffs_snapblkfree(fs
, devvp
, bno
, size
, inum
))
1549 error
= ffs_check_bad_allocation(__func__
, fs
, bno
, size
, dev
, inum
);
1553 error
= bread(devvp
, cgblkno
, (int)fs
->fs_cgsize
,
1554 NOCRED
, B_MODIFY
, &bp
);
1559 cgp
= (struct cg
*)bp
->b_data
;
1560 if (!cg_chkmagic(cgp
, needswap
)) {
1565 ffs_blkfree_common(ump
, fs
, dev
, bp
, bno
, size
, devvp_is_snapshot
);
1571 * Free a block or fragment from a snapshot cg copy.
1573 * The specified block or fragment is placed back in the
1574 * free map. If a fragment is deallocated, a possible
1575 * block reassembly is checked.
1577 * => um_lock not held on entry or exit
1580 ffs_blkfree_snap(struct fs
*fs
, struct vnode
*devvp
, daddr_t bno
, long size
,
1585 struct ufsmount
*ump
;
1589 const bool devvp_is_snapshot
= (devvp
->v_type
!= VBLK
);
1591 const int needswap
= UFS_FSNEEDSWAP(fs
);
1594 KASSERT(devvp_is_snapshot
);
1597 dev
= VTOI(devvp
)->i_devvp
->v_rdev
;
1598 ump
= VFSTOUFS(devvp
->v_mount
);
1599 cgblkno
= fragstoblks(fs
, cgtod(fs
, cg
));
1601 error
= ffs_check_bad_allocation(__func__
, fs
, bno
, size
, dev
, inum
);
1605 error
= bread(devvp
, cgblkno
, (int)fs
->fs_cgsize
,
1606 NOCRED
, B_MODIFY
, &bp
);
1611 cgp
= (struct cg
*)bp
->b_data
;
1612 if (!cg_chkmagic(cgp
, needswap
)) {
1617 ffs_blkfree_common(ump
, fs
, dev
, bp
, bno
, size
, devvp_is_snapshot
);
1623 ffs_blkfree_common(struct ufsmount
*ump
, struct fs
*fs
, dev_t dev
,
1624 struct buf
*bp
, daddr_t bno
, long size
, bool devvp_is_snapshot
)
1627 int32_t fragno
, cgbno
;
1628 int i
, cg
, blk
, frags
, bbase
;
1630 const int needswap
= UFS_FSNEEDSWAP(fs
);
1633 cgp
= (struct cg
*)bp
->b_data
;
1634 cgp
->cg_old_time
= ufs_rw32(time_second
, needswap
);
1635 if ((fs
->fs_magic
!= FS_UFS1_MAGIC
) ||
1636 (fs
->fs_old_flags
& FS_FLAGS_UPDATED
))
1637 cgp
->cg_time
= ufs_rw64(time_second
, needswap
);
1638 cgbno
= dtogd(fs
, bno
);
1639 blksfree
= cg_blksfree(cgp
, needswap
);
1640 mutex_enter(&ump
->um_lock
);
1641 if (size
== fs
->fs_bsize
) {
1642 fragno
= fragstoblks(fs
, cgbno
);
1643 if (!ffs_isfreeblock(fs
, blksfree
, fragno
)) {
1644 if (devvp_is_snapshot
) {
1645 mutex_exit(&ump
->um_lock
);
1648 printf("dev = 0x%llx, block = %" PRId64
", fs = %s\n",
1649 (unsigned long long)dev
, bno
, fs
->fs_fsmnt
);
1650 panic("blkfree: freeing free block");
1652 ffs_setblock(fs
, blksfree
, fragno
);
1653 ffs_clusteracct(fs
, cgp
, fragno
, 1);
1654 ufs_add32(cgp
->cg_cs
.cs_nbfree
, 1, needswap
);
1655 fs
->fs_cstotal
.cs_nbfree
++;
1656 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
1657 if ((fs
->fs_magic
== FS_UFS1_MAGIC
) &&
1658 ((fs
->fs_old_flags
& FS_FLAGS_UPDATED
) == 0)) {
1659 i
= old_cbtocylno(fs
, cgbno
);
1661 KASSERT(i
< fs
->fs_old_ncyl
);
1662 KASSERT(old_cbtorpos(fs
, cgbno
) >= 0);
1663 KASSERT(fs
->fs_old_nrpos
== 0 || old_cbtorpos(fs
, cgbno
) < fs
->fs_old_nrpos
);
1664 ufs_add16(old_cg_blks(fs
, cgp
, i
, needswap
)[old_cbtorpos(fs
, cgbno
)], 1,
1666 ufs_add32(old_cg_blktot(cgp
, needswap
)[i
], 1, needswap
);
1669 bbase
= cgbno
- fragnum(fs
, cgbno
);
1671 * decrement the counts associated with the old frags
1673 blk
= blkmap(fs
, blksfree
, bbase
);
1674 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, -1, needswap
);
1676 * deallocate the fragment
1678 frags
= numfrags(fs
, size
);
1679 for (i
= 0; i
< frags
; i
++) {
1680 if (isset(blksfree
, cgbno
+ i
)) {
1681 printf("dev = 0x%llx, block = %" PRId64
1683 (unsigned long long)dev
, bno
+ i
,
1685 panic("blkfree: freeing free frag");
1687 setbit(blksfree
, cgbno
+ i
);
1689 ufs_add32(cgp
->cg_cs
.cs_nffree
, i
, needswap
);
1690 fs
->fs_cstotal
.cs_nffree
+= i
;
1691 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
1693 * add back in counts associated with the new frags
1695 blk
= blkmap(fs
, blksfree
, bbase
);
1696 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, 1, needswap
);
1698 * if a complete block has been reassembled, account for it
1700 fragno
= fragstoblks(fs
, bbase
);
1701 if (ffs_isblock(fs
, blksfree
, fragno
)) {
1702 ufs_add32(cgp
->cg_cs
.cs_nffree
, -fs
->fs_frag
, needswap
);
1703 fs
->fs_cstotal
.cs_nffree
-= fs
->fs_frag
;
1704 fs
->fs_cs(fs
, cg
).cs_nffree
-= fs
->fs_frag
;
1705 ffs_clusteracct(fs
, cgp
, fragno
, 1);
1706 ufs_add32(cgp
->cg_cs
.cs_nbfree
, 1, needswap
);
1707 fs
->fs_cstotal
.cs_nbfree
++;
1708 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
1709 if ((fs
->fs_magic
== FS_UFS1_MAGIC
) &&
1710 ((fs
->fs_old_flags
& FS_FLAGS_UPDATED
) == 0)) {
1711 i
= old_cbtocylno(fs
, bbase
);
1713 KASSERT(i
< fs
->fs_old_ncyl
);
1714 KASSERT(old_cbtorpos(fs
, bbase
) >= 0);
1715 KASSERT(fs
->fs_old_nrpos
== 0 || old_cbtorpos(fs
, bbase
) < fs
->fs_old_nrpos
);
1716 ufs_add16(old_cg_blks(fs
, cgp
, i
, needswap
)[old_cbtorpos(fs
,
1717 bbase
)], 1, needswap
);
1718 ufs_add32(old_cg_blktot(cgp
, needswap
)[i
], 1, needswap
);
1723 ACTIVECG_CLR(fs
, cg
);
1724 mutex_exit(&ump
->um_lock
);
1731 ffs_vfree(struct vnode
*vp
, ino_t ino
, int mode
)
1734 return ffs_freefile(vp
->v_mount
, ino
, mode
);
1738 * Do the actual free operation.
1739 * The specified inode is placed back in the free map.
1741 * => um_lock not held on entry or exit
1744 ffs_freefile(struct mount
*mp
, ino_t ino
, int mode
)
1746 struct ufsmount
*ump
= VFSTOUFS(mp
);
1747 struct fs
*fs
= ump
->um_fs
;
1748 struct vnode
*devvp
;
1755 const int needswap
= UFS_FSNEEDSWAP(fs
);
1758 cg
= ino_to_cg(fs
, ino
);
1759 devvp
= ump
->um_devvp
;
1760 dev
= devvp
->v_rdev
;
1761 cgbno
= fsbtodb(fs
, cgtod(fs
, cg
));
1763 if ((u_int
)ino
>= fs
->fs_ipg
* fs
->fs_ncg
)
1764 panic("ifree: range: dev = 0x%llx, ino = %llu, fs = %s",
1765 (long long)dev
, (unsigned long long)ino
, fs
->fs_fsmnt
);
1766 error
= bread(devvp
, cgbno
, (int)fs
->fs_cgsize
,
1767 NOCRED
, B_MODIFY
, &bp
);
1772 cgp
= (struct cg
*)bp
->b_data
;
1773 if (!cg_chkmagic(cgp
, needswap
)) {
1778 ffs_freefile_common(ump
, fs
, dev
, bp
, ino
, mode
, false);
1786 ffs_freefile_snap(struct fs
*fs
, struct vnode
*devvp
, ino_t ino
, int mode
)
1788 struct ufsmount
*ump
;
1795 const int needswap
= UFS_FSNEEDSWAP(fs
);
1798 KASSERT(devvp
->v_type
!= VBLK
);
1800 cg
= ino_to_cg(fs
, ino
);
1801 dev
= VTOI(devvp
)->i_devvp
->v_rdev
;
1802 ump
= VFSTOUFS(devvp
->v_mount
);
1803 cgbno
= fragstoblks(fs
, cgtod(fs
, cg
));
1804 if ((u_int
)ino
>= fs
->fs_ipg
* fs
->fs_ncg
)
1805 panic("ifree: range: dev = 0x%llx, ino = %llu, fs = %s",
1806 (unsigned long long)dev
, (unsigned long long)ino
,
1808 error
= bread(devvp
, cgbno
, (int)fs
->fs_cgsize
,
1809 NOCRED
, B_MODIFY
, &bp
);
1814 cgp
= (struct cg
*)bp
->b_data
;
1815 if (!cg_chkmagic(cgp
, needswap
)) {
1819 ffs_freefile_common(ump
, fs
, dev
, bp
, ino
, mode
, true);
1827 ffs_freefile_common(struct ufsmount
*ump
, struct fs
*fs
, dev_t dev
,
1828 struct buf
*bp
, ino_t ino
, int mode
, bool devvp_is_snapshot
)
1834 const int needswap
= UFS_FSNEEDSWAP(fs
);
1837 cg
= ino_to_cg(fs
, ino
);
1838 cgp
= (struct cg
*)bp
->b_data
;
1839 cgp
->cg_old_time
= ufs_rw32(time_second
, needswap
);
1840 if ((fs
->fs_magic
!= FS_UFS1_MAGIC
) ||
1841 (fs
->fs_old_flags
& FS_FLAGS_UPDATED
))
1842 cgp
->cg_time
= ufs_rw64(time_second
, needswap
);
1843 inosused
= cg_inosused(cgp
, needswap
);
1845 if (isclr(inosused
, ino
)) {
1846 printf("ifree: dev = 0x%llx, ino = %llu, fs = %s\n",
1847 (unsigned long long)dev
, (unsigned long long)ino
+
1848 cg
* fs
->fs_ipg
, fs
->fs_fsmnt
);
1849 if (fs
->fs_ronly
== 0)
1850 panic("ifree: freeing free inode");
1852 clrbit(inosused
, ino
);
1853 if (!devvp_is_snapshot
)
1854 UFS_WAPBL_UNREGISTER_INODE(ump
->um_mountp
,
1855 ino
+ cg
* fs
->fs_ipg
, mode
);
1856 if (ino
< ufs_rw32(cgp
->cg_irotor
, needswap
))
1857 cgp
->cg_irotor
= ufs_rw32(ino
, needswap
);
1858 ufs_add32(cgp
->cg_cs
.cs_nifree
, 1, needswap
);
1859 mutex_enter(&ump
->um_lock
);
1860 fs
->fs_cstotal
.cs_nifree
++;
1861 fs
->fs_cs(fs
, cg
).cs_nifree
++;
1862 if ((mode
& IFMT
) == IFDIR
) {
1863 ufs_add32(cgp
->cg_cs
.cs_ndir
, -1, needswap
);
1864 fs
->fs_cstotal
.cs_ndir
--;
1865 fs
->fs_cs(fs
, cg
).cs_ndir
--;
1868 ACTIVECG_CLR(fs
, cg
);
1869 mutex_exit(&ump
->um_lock
);
1873 * Check to see if a file is free.
1876 ffs_checkfreefile(struct fs
*fs
, struct vnode
*devvp
, ino_t ino
)
1883 const bool devvp_is_snapshot
= (devvp
->v_type
!= VBLK
);
1885 KASSERT(devvp_is_snapshot
);
1887 cg
= ino_to_cg(fs
, ino
);
1888 if (devvp_is_snapshot
)
1889 cgbno
= fragstoblks(fs
, cgtod(fs
, cg
));
1891 cgbno
= fsbtodb(fs
, cgtod(fs
, cg
));
1892 if ((u_int
)ino
>= fs
->fs_ipg
* fs
->fs_ncg
)
1894 if (bread(devvp
, cgbno
, (int)fs
->fs_cgsize
, NOCRED
, 0, &bp
)) {
1898 cgp
= (struct cg
*)bp
->b_data
;
1899 if (!cg_chkmagic(cgp
, UFS_FSNEEDSWAP(fs
))) {
1903 inosused
= cg_inosused(cgp
, UFS_FSNEEDSWAP(fs
));
1905 ret
= isclr(inosused
, ino
);
1911 * Find a block of the specified size in the specified cylinder group.
1913 * It is a panic if a request is made to find a block if none are
1917 ffs_mapsearch(struct fs
*fs
, struct cg
*cgp
, daddr_t bpref
, int allocsiz
)
1920 int start
, len
, loc
, i
;
1921 int blk
, field
, subfield
, pos
;
1925 const int needswap
= UFS_FSNEEDSWAP(fs
);
1928 /* KASSERT(mutex_owned(&ump->um_lock)); */
1931 * find the fragment by searching through the free block
1932 * map for an appropriate bit pattern
1935 start
= dtogd(fs
, bpref
) / NBBY
;
1937 start
= ufs_rw32(cgp
->cg_frotor
, needswap
) / NBBY
;
1938 blksfree
= cg_blksfree(cgp
, needswap
);
1939 len
= howmany(fs
->fs_fpg
, NBBY
) - start
;
1942 loc
= scanc((u_int
)len
,
1943 (const u_char
*)&blksfree
[start
],
1944 (const u_char
*)fragtbl
[fs
->fs_frag
],
1945 (1 << (allocsiz
- 1 + (fs
->fs_frag
& (NBBY
- 1)))));
1949 loc
= scanc((u_int
)len
,
1950 (const u_char
*)&blksfree
[0],
1951 (const u_char
*)fragtbl
[fs
->fs_frag
],
1952 (1 << (allocsiz
- 1 + (fs
->fs_frag
& (NBBY
- 1)))));
1954 printf("start = %d, len = %d, fs = %s\n",
1955 ostart
, olen
, fs
->fs_fsmnt
);
1956 printf("offset=%d %ld\n",
1957 ufs_rw32(cgp
->cg_freeoff
, needswap
),
1958 (long)blksfree
- (long)cgp
);
1959 printf("cg %d\n", cgp
->cg_cgx
);
1960 panic("ffs_alloccg: map corrupted");
1964 bno
= (start
+ len
- loc
) * NBBY
;
1965 cgp
->cg_frotor
= ufs_rw32(bno
, needswap
);
1967 * found the byte in the map
1968 * sift through the bits to find the selected frag
1970 for (i
= bno
+ NBBY
; bno
< i
; bno
+= fs
->fs_frag
) {
1971 blk
= blkmap(fs
, blksfree
, bno
);
1973 field
= around
[allocsiz
];
1974 subfield
= inside
[allocsiz
];
1975 for (pos
= 0; pos
<= fs
->fs_frag
- allocsiz
; pos
++) {
1976 if ((blk
& field
) == subfield
)
1982 printf("bno = %d, fs = %s\n", bno
, fs
->fs_fsmnt
);
1983 panic("ffs_alloccg: block not in map");
1988 * Update the cluster map because of an allocation or free.
1990 * Cnt == 1 means free; cnt == -1 means allocating.
1993 ffs_clusteracct(struct fs
*fs
, struct cg
*cgp
, int32_t blkno
, int cnt
)
1997 u_char
*freemapp
, *mapp
;
1998 int i
, start
, end
, forw
, back
, map
, bit
;
2000 const int needswap
= UFS_FSNEEDSWAP(fs
);
2003 /* KASSERT(mutex_owned(&ump->um_lock)); */
2005 if (fs
->fs_contigsumsize
<= 0)
2007 freemapp
= cg_clustersfree(cgp
, needswap
);
2008 sump
= cg_clustersum(cgp
, needswap
);
2010 * Allocate or clear the actual block.
2013 setbit(freemapp
, blkno
);
2015 clrbit(freemapp
, blkno
);
2017 * Find the size of the cluster going forward.
2020 end
= start
+ fs
->fs_contigsumsize
;
2021 if (end
>= ufs_rw32(cgp
->cg_nclusterblks
, needswap
))
2022 end
= ufs_rw32(cgp
->cg_nclusterblks
, needswap
);
2023 mapp
= &freemapp
[start
/ NBBY
];
2025 bit
= 1 << (start
% NBBY
);
2026 for (i
= start
; i
< end
; i
++) {
2027 if ((map
& bit
) == 0)
2029 if ((i
& (NBBY
- 1)) != (NBBY
- 1)) {
2038 * Find the size of the cluster going backward.
2041 end
= start
- fs
->fs_contigsumsize
;
2044 mapp
= &freemapp
[start
/ NBBY
];
2046 bit
= 1 << (start
% NBBY
);
2047 for (i
= start
; i
> end
; i
--) {
2048 if ((map
& bit
) == 0)
2050 if ((i
& (NBBY
- 1)) != 0) {
2054 bit
= 1 << (NBBY
- 1);
2059 * Account for old cluster and the possibly new forward and
2062 i
= back
+ forw
+ 1;
2063 if (i
> fs
->fs_contigsumsize
)
2064 i
= fs
->fs_contigsumsize
;
2065 ufs_add32(sump
[i
], cnt
, needswap
);
2067 ufs_add32(sump
[back
], -cnt
, needswap
);
2069 ufs_add32(sump
[forw
], -cnt
, needswap
);
2072 * Update cluster summary information.
2074 lp
= &sump
[fs
->fs_contigsumsize
];
2075 for (i
= fs
->fs_contigsumsize
; i
> 0; i
--)
2076 if (ufs_rw32(*lp
--, needswap
) > 0)
2078 fs
->fs_maxcluster
[ufs_rw32(cgp
->cg_cgx
, needswap
)] = i
;
2082 * Fserr prints the name of a file system with an error diagnostic.
2084 * The form of the error message is:
2088 ffs_fserr(struct fs
*fs
, u_int uid
, const char *cp
)
2091 log(LOG_ERR
, "uid %d, pid %d, command %s, on %s: %s\n",
2092 uid
, curproc
->p_pid
, curproc
->p_comm
, fs
->fs_fsmnt
, cp
);