1 /* $NetBSD: lfs_bio.c,v 1.115 2009/12/07 04:12:10 eeh Exp $ */
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant@hhhh.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (c) 1991, 1993
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.115 2009/12/07 04:12:10 eeh Exp $");
65 #include <sys/param.h>
66 #include <sys/systm.h>
69 #include <sys/vnode.h>
70 #include <sys/resourcevar.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/kauth.h>
75 #include <ufs/ufs/inode.h>
76 #include <ufs/ufs/ufsmount.h>
77 #include <ufs/ufs/ufs_extern.h>
79 #include <ufs/lfs/lfs.h>
80 #include <ufs/lfs/lfs_extern.h>
85 * LFS block write function.
88 * No write cost accounting is done.
89 * This is almost certainly wrong for synchronous operations and NFS.
91 * protected by lfs_lock.
93 int locked_queue_count
= 0; /* Count of locked-down buffers. */
94 long locked_queue_bytes
= 0L; /* Total size of locked buffers. */
95 int lfs_subsys_pages
= 0L; /* Total number LFS-written pages */
96 int lfs_fs_pagetrip
= 0; /* # of pages to trip per-fs write */
97 int lfs_writing
= 0; /* Set if already kicked off a writer
98 because of buffer space */
100 /* Lock and condition variables for above. */
101 kcondvar_t locked_queue_cv
;
102 kcondvar_t lfs_writing_cv
;
105 extern int lfs_dostats
;
108 * reserved number/bytes of locked buffers
110 int locked_queue_rcount
= 0;
111 long locked_queue_rbytes
= 0L;
113 int lfs_fits_buf(struct lfs
*, int, int);
114 int lfs_reservebuf(struct lfs
*, struct vnode
*vp
, struct vnode
*vp2
,
116 int lfs_reserveavail(struct lfs
*, struct vnode
*vp
, struct vnode
*vp2
, int);
119 lfs_fits_buf(struct lfs
*fs
, int n
, int bytes
)
121 int count_fit
, bytes_fit
;
123 ASSERT_NO_SEGLOCK(fs
);
124 KASSERT(mutex_owned(&lfs_lock
));
127 (locked_queue_count
+ locked_queue_rcount
+ n
< LFS_WAIT_BUFS
);
129 (locked_queue_bytes
+ locked_queue_rbytes
+ bytes
< LFS_WAIT_BYTES
);
133 DLOG((DLOG_AVAIL
, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
134 locked_queue_count
, locked_queue_rcount
,
138 DLOG((DLOG_AVAIL
, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
139 locked_queue_bytes
, locked_queue_rbytes
,
140 bytes
, LFS_WAIT_BYTES
));
144 return (count_fit
&& bytes_fit
);
149 lfs_reservebuf(struct lfs
*fs
, struct vnode
*vp
,
150 struct vnode
*vp2
, int n
, int bytes
)
152 ASSERT_MAYBE_SEGLOCK(fs
);
153 KASSERT(locked_queue_rcount
>= 0);
154 KASSERT(locked_queue_rbytes
>= 0);
156 mutex_enter(&lfs_lock
);
157 while (n
> 0 && !lfs_fits_buf(fs
, n
, bytes
)) {
162 error
= cv_timedwait_sig(&locked_queue_cv
, &lfs_lock
,
164 if (error
&& error
!= EWOULDBLOCK
) {
165 mutex_exit(&lfs_lock
);
170 locked_queue_rcount
+= n
;
171 locked_queue_rbytes
+= bytes
;
173 mutex_exit(&lfs_lock
);
175 KASSERT(locked_queue_rcount
>= 0);
176 KASSERT(locked_queue_rbytes
>= 0);
182 * Try to reserve some blocks, prior to performing a sensitive operation that
183 * requires the vnode lock to be honored. If there is not enough space, give
184 * up the vnode lock temporarily and wait for the space to become available.
186 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
188 * XXX YAMT - it isn't safe to unlock vp here
189 * because the node might be modified while we sleep.
190 * (eg. cached states like i_offset might be stale,
191 * the vnode might be truncated, etc..)
192 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
193 * or rearrange vnodeop interface to leave vnode locking to file system
194 * specific code so that each file systems can have their own vnode locking and
195 * vnode re-using strategies.
198 lfs_reserveavail(struct lfs
*fs
, struct vnode
*vp
,
199 struct vnode
*vp2
, int fsb
)
205 ASSERT_MAYBE_SEGLOCK(fs
);
207 mutex_enter(&lfs_lock
);
208 while (fsb
> 0 && !lfs_fits(fs
, fsb
+ fs
->lfs_ravail
+ fs
->lfs_favail
)) {
209 mutex_exit(&lfs_lock
);
212 * XXX ideally, we should unlock vnodes here
213 * because we might sleep very long time.
221 * XXX since we'll sleep for cleaner with vnode lock holding,
222 * deadlock will occur if cleaner tries to lock the vnode.
223 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
228 DLOG((DLOG_AVAIL
, "lfs_reserve: waiting for %ld (bfree = %d,"
229 " est_bfree = %d)\n",
230 fsb
+ fs
->lfs_ravail
+ fs
->lfs_favail
,
231 fs
->lfs_bfree
, LFS_EST_BFREE(fs
)));
235 /* Wake up the cleaner */
236 LFS_CLEANERINFO(cip
, fs
, bp
);
237 LFS_SYNC_CLEANERINFO(cip
, fs
, bp
, 0);
238 lfs_wakeup_cleaner(fs
);
240 mutex_enter(&lfs_lock
);
241 /* Cleaner might have run while we were reading, check again */
242 if (lfs_fits(fs
, fsb
+ fs
->lfs_ravail
+ fs
->lfs_favail
))
245 error
= mtsleep(&fs
->lfs_avail
, PCATCH
| PUSER
, "lfs_reserve",
248 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
); /* XXX use lockstatus */
249 vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
); /* XXX use lockstatus */
252 mutex_exit(&lfs_lock
);
258 DLOG((DLOG_AVAIL
, "lfs_reserve: woke up\n"));
261 fs
->lfs_ravail
+= fsb
;
262 mutex_exit(&lfs_lock
);
269 int lfs_rescountdirop
;
273 lfs_reserve(struct lfs
*fs
, struct vnode
*vp
, struct vnode
*vp2
, int fsb
)
278 ASSERT_MAYBE_SEGLOCK(fs
);
280 /* Make sure we're not in the process of reclaiming vp2 */
281 mutex_enter(&lfs_lock
);
282 while(fs
->lfs_flags
& LFS_UNDIROP
) {
283 mtsleep(&fs
->lfs_flags
, PRIBIO
+ 1, "lfsrundirop", 0,
286 mutex_exit(&lfs_lock
);
289 KASSERT(fsb
< 0 || VOP_ISLOCKED(vp
));
290 KASSERT(vp2
== NULL
|| fsb
< 0 || VOP_ISLOCKED(vp2
));
291 KASSERT(vp2
== NULL
|| !(VTOI(vp2
)->i_flag
& IN_ADIROP
));
292 KASSERT(vp2
== NULL
|| vp2
!= fs
->lfs_unlockvp
);
294 cantwait
= (VTOI(vp
)->i_flag
& IN_ADIROP
) || fs
->lfs_unlockvp
== vp
;
301 if (lfs_rescountdirop
< 0)
302 panic("lfs_rescountdirop");
309 if (lfs_rescount
< 0)
310 panic("lfs_rescount");
318 * vref vnodes here so that cleaner doesn't try to reuse them.
319 * (see XXX comment in lfs_reserveavail)
326 error
= lfs_reserveavail(fs
, vp
, vp2
, fsb
);
331 * XXX just a guess. should be more precise.
333 error
= lfs_reservebuf(fs
, vp
, vp2
,
334 fragstoblks(fs
, fsb
), fsbtob(fs
, fsb
));
336 lfs_reserveavail(fs
, vp
, vp2
, -fsb
);
350 struct vop_bwrite_args
/* {
353 struct buf
*bp
= ap
->a_bp
;
356 if (VTOI(bp
->b_vp
)->i_lfs
->lfs_ronly
== 0 && (bp
->b_flags
& B_ASYNC
)) {
357 panic("bawrite LFS buffer");
359 #endif /* DIAGNOSTIC */
360 return lfs_bwrite_ext(bp
, 0);
364 * Determine if there is enough room currently available to write fsb
365 * blocks. We need enough blocks for the new blocks, the current
366 * inode blocks (including potentially the ifile inode), a summary block,
367 * and the segment usage table, plus an ifile block.
370 lfs_fits(struct lfs
*fs
, int fsb
)
374 ASSERT_NO_SEGLOCK(fs
);
375 needed
= fsb
+ btofsb(fs
, fs
->lfs_sumsize
) +
376 ((howmany(fs
->lfs_uinodes
+ 1, INOPB(fs
)) + fs
->lfs_segtabsz
+
377 1) << (fs
->lfs_blktodb
- fs
->lfs_fsbtodb
));
379 if (needed
>= fs
->lfs_avail
) {
381 DLOG((DLOG_AVAIL
, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
382 "needed = %ld, avail = %ld\n",
383 (long)fsb
, (long)fs
->lfs_uinodes
, (long)needed
,
384 (long)fs
->lfs_avail
));
392 lfs_availwait(struct lfs
*fs
, int fsb
)
398 ASSERT_NO_SEGLOCK(fs
);
399 /* Push cleaner blocks through regardless */
400 mutex_enter(&lfs_lock
);
401 if (LFS_SEGLOCK_HELD(fs
) &&
402 fs
->lfs_sp
->seg_flags
& (SEGM_CLEAN
| SEGM_FORCE_CKP
)) {
403 mutex_exit(&lfs_lock
);
406 mutex_exit(&lfs_lock
);
408 while (!lfs_fits(fs
, fsb
)) {
410 * Out of space, need cleaner to run.
411 * Update the cleaner info, then wake it up.
412 * Note the cleanerinfo block is on the ifile
415 LFS_CLEANERINFO(cip
, fs
, cbp
);
416 LFS_SYNC_CLEANERINFO(cip
, fs
, cbp
, 0);
419 DLOG((DLOG_AVAIL
, "lfs_availwait: out of available space, "
420 "waiting on cleaner\n"));
423 lfs_wakeup_cleaner(fs
);
425 if (LFS_SEGLOCK_HELD(fs
))
426 panic("lfs_availwait: deadlock");
428 error
= tsleep(&fs
->lfs_avail
, PCATCH
| PUSER
, "cleaner", 0);
436 lfs_bwrite_ext(struct buf
*bp
, int flags
)
444 fs
= VFSTOUFS(vp
->v_mount
)->um_lfs
;
446 ASSERT_MAYBE_SEGLOCK(fs
);
447 KASSERT(bp
->b_cflags
& BC_BUSY
);
448 KASSERT(flags
& BW_CLEAN
|| !LFS_IS_MALLOC_BUF(bp
));
449 KASSERT(((bp
->b_oflags
| bp
->b_flags
) & (BO_DELWRI
|B_LOCKED
))
453 * Don't write *any* blocks if we're mounted read-only, or
454 * if we are "already unmounted".
456 * In particular the cleaner can't write blocks either.
458 if (fs
->lfs_ronly
|| (fs
->lfs_pflags
& LFS_PF_CLEAN
)) {
459 bp
->b_oflags
&= ~BO_DELWRI
;
460 bp
->b_flags
|= B_READ
;
462 mutex_enter(&bufcache_lock
);
464 if (LFS_IS_MALLOC_BUF(bp
))
465 bp
->b_cflags
&= ~BC_BUSY
;
468 mutex_exit(&bufcache_lock
);
469 return (fs
->lfs_ronly
? EROFS
: 0);
473 * Set the delayed write flag and use reassignbuf to move the buffer
474 * from the clean list to the dirty one.
476 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
477 * the buffer onto the LOCKED free list. This is necessary, otherwise
478 * getnewbuf() would try to reclaim the buffers using bawrite, which
479 * isn't going to work.
481 * XXX we don't let meta-data writes run out of space because they can
482 * come from the segment writer. We need to make sure that there is
483 * enough space reserved so that there's room to write meta-data
486 if ((bp
->b_flags
& B_LOCKED
) == 0) {
487 fsb
= fragstofsb(fs
, numfrags(fs
, bp
->b_bcount
));
490 mutex_enter(&lfs_lock
);
491 if (flags
& BW_CLEAN
) {
492 LFS_SET_UINO(ip
, IN_CLEANING
);
494 LFS_SET_UINO(ip
, IN_MODIFIED
);
496 mutex_exit(&lfs_lock
);
497 fs
->lfs_avail
-= fsb
;
499 mutex_enter(&bufcache_lock
);
500 mutex_enter(&vp
->v_interlock
);
501 bp
->b_oflags
= (bp
->b_oflags
| BO_DELWRI
) & ~BO_DONE
;
503 bp
->b_flags
&= ~B_READ
;
505 reassignbuf(bp
, bp
->b_vp
);
506 mutex_exit(&vp
->v_interlock
);
508 mutex_enter(&bufcache_lock
);
511 if (bp
->b_iodone
!= NULL
)
512 bp
->b_cflags
&= ~BC_BUSY
;
515 mutex_exit(&bufcache_lock
);
521 * Called and return with the lfs_lock held.
524 lfs_flush_fs(struct lfs
*fs
, int flags
)
526 ASSERT_NO_SEGLOCK(fs
);
527 KASSERT(mutex_owned(&lfs_lock
));
532 ++lfs_stats
.flush_invoked
;
534 mutex_exit(&lfs_lock
);
535 lfs_writer_enter(fs
, "fldirop");
536 lfs_segwrite(fs
->lfs_ivnode
->v_mount
, flags
);
537 lfs_writer_leave(fs
);
538 mutex_enter(&lfs_lock
);
539 fs
->lfs_favail
= 0; /* XXX */
543 * This routine initiates segment writes when LFS is consuming too many
544 * resources. Ideally the pageout daemon would be able to direct LFS
546 * XXX We have one static count of locked buffers;
547 * XXX need to think more about the multiple filesystem case.
549 * Called and return with lfs_lock held.
550 * If fs != NULL, we hold the segment lock for fs.
553 lfs_flush(struct lfs
*fs
, int flags
, int only_onefs
)
555 extern u_int64_t locked_fakequeue_count
;
556 struct mount
*mp
, *nmp
;
559 KASSERT(mutex_owned(&lfs_lock
));
560 KDASSERT(fs
== NULL
|| !LFS_SEGLOCK_HELD(fs
));
563 ++lfs_stats
.write_exceeded
;
564 /* XXX should we include SEGM_CKP here? */
565 if (lfs_writing
&& !(flags
& SEGM_SYNC
)) {
566 DLOG((DLOG_FLUSH
, "lfs_flush: not flushing because another flush is active\n"));
570 cv_wait(&lfs_writing_cv
, &lfs_lock
);
573 mutex_exit(&lfs_lock
);
577 if (vfs_busy(fs
->lfs_ivnode
->v_mount
, NULL
))
579 mutex_enter(&lfs_lock
);
580 lfs_flush_fs(fs
, flags
);
581 mutex_exit(&lfs_lock
);
582 vfs_unbusy(fs
->lfs_ivnode
->v_mount
, false, NULL
);
584 locked_fakequeue_count
= 0;
585 mutex_enter(&mountlist_lock
);
586 for (mp
= CIRCLEQ_FIRST(&mountlist
); mp
!= (void *)&mountlist
;
588 if (vfs_busy(mp
, &nmp
)) {
589 DLOG((DLOG_FLUSH
, "lfs_flush: fs vfs_busy\n"));
592 if (strncmp(&mp
->mnt_stat
.f_fstypename
[0], MOUNT_LFS
,
593 sizeof(mp
->mnt_stat
.f_fstypename
)) == 0) {
594 tfs
= VFSTOUFS(mp
)->um_lfs
;
595 mutex_enter(&lfs_lock
);
596 lfs_flush_fs(tfs
, flags
);
597 mutex_exit(&lfs_lock
);
599 vfs_unbusy(mp
, false, &nmp
);
601 mutex_exit(&mountlist_lock
);
603 LFS_DEBUG_COUNTLOCKED("flush");
604 wakeup(&lfs_subsys_pages
);
607 mutex_enter(&lfs_lock
);
608 KASSERT(lfs_writing
);
610 wakeup(&lfs_writing
);
613 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
614 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
617 * make sure that we don't have too many locked buffers.
618 * flush buffers if needed.
621 lfs_check(struct vnode
*vp
, daddr_t blkno
, int flags
)
626 extern pid_t lfs_writer_daemon
;
631 /* If out of buffers, wait on writer */
632 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
633 if (ip
->i_number
== LFS_IFILE_INUM
)
635 /* If we're being called from inside a dirop, don't sleep */
636 if (ip
->i_flag
& IN_ADIROP
)
641 ASSERT_NO_SEGLOCK(fs
);
644 * If we would flush below, but dirops are active, sleep.
645 * Note that a dirop cannot ever reach this code!
647 mutex_enter(&lfs_lock
);
648 while (fs
->lfs_dirops
> 0 &&
649 (locked_queue_count
+ INOCOUNT(fs
) > LFS_MAX_BUFS
||
650 locked_queue_bytes
+ INOBYTES(fs
) > LFS_MAX_BYTES
||
651 lfs_subsys_pages
> LFS_MAX_PAGES
||
652 fs
->lfs_dirvcount
> LFS_MAX_FSDIROP(fs
) ||
653 lfs_dirvcount
> LFS_MAX_DIROP
|| fs
->lfs_diropwait
> 0))
656 mtsleep(&fs
->lfs_writer
, PRIBIO
+1, "bufdirop", 0,
662 if (locked_queue_count
+ INOCOUNT(fs
) > LFS_MAX_BUFS
)
663 DLOG((DLOG_FLUSH
, "lfs_check: lqc = %d, max %d\n",
664 locked_queue_count
+ INOCOUNT(fs
), LFS_MAX_BUFS
));
665 if (locked_queue_bytes
+ INOBYTES(fs
) > LFS_MAX_BYTES
)
666 DLOG((DLOG_FLUSH
, "lfs_check: lqb = %ld, max %ld\n",
667 locked_queue_bytes
+ INOBYTES(fs
), LFS_MAX_BYTES
));
668 if (lfs_subsys_pages
> LFS_MAX_PAGES
)
669 DLOG((DLOG_FLUSH
, "lfs_check: lssp = %d, max %d\n",
670 lfs_subsys_pages
, LFS_MAX_PAGES
));
671 if (lfs_fs_pagetrip
&& fs
->lfs_pages
> lfs_fs_pagetrip
)
672 DLOG((DLOG_FLUSH
, "lfs_check: fssp = %d, trip at %d\n",
673 fs
->lfs_pages
, lfs_fs_pagetrip
));
674 if (lfs_dirvcount
> LFS_MAX_DIROP
)
675 DLOG((DLOG_FLUSH
, "lfs_check: ldvc = %d, max %d\n",
676 lfs_dirvcount
, LFS_MAX_DIROP
));
677 if (fs
->lfs_dirvcount
> LFS_MAX_FSDIROP(fs
))
678 DLOG((DLOG_FLUSH
, "lfs_check: lfdvc = %d, max %d\n",
679 fs
->lfs_dirvcount
, LFS_MAX_FSDIROP(fs
)));
680 if (fs
->lfs_diropwait
> 0)
681 DLOG((DLOG_FLUSH
, "lfs_check: ldvw = %d\n",
685 /* If there are too many pending dirops, we have to flush them. */
686 if (fs
->lfs_dirvcount
> LFS_MAX_FSDIROP(fs
) ||
687 lfs_dirvcount
> LFS_MAX_DIROP
|| fs
->lfs_diropwait
> 0) {
691 if (locked_queue_count
+ INOCOUNT(fs
) > LFS_MAX_BUFS
||
692 locked_queue_bytes
+ INOBYTES(fs
) > LFS_MAX_BYTES
||
693 lfs_subsys_pages
> LFS_MAX_PAGES
||
694 fs
->lfs_dirvcount
> LFS_MAX_FSDIROP(fs
) ||
695 lfs_dirvcount
> LFS_MAX_DIROP
|| fs
->lfs_diropwait
> 0) {
696 lfs_flush(fs
, flags
, 0);
697 } else if (lfs_fs_pagetrip
&& fs
->lfs_pages
> lfs_fs_pagetrip
) {
699 * If we didn't flush the whole thing, some filesystems
700 * still might want to be flushed.
703 wakeup(&lfs_writer_daemon
);
706 while (locked_queue_count
+ INOCOUNT(fs
) > LFS_WAIT_BUFS
||
707 locked_queue_bytes
+ INOBYTES(fs
) > LFS_WAIT_BYTES
||
708 lfs_subsys_pages
> LFS_WAIT_PAGES
||
709 fs
->lfs_dirvcount
> LFS_MAX_FSDIROP(fs
) ||
710 lfs_dirvcount
> LFS_MAX_DIROP
) {
713 ++lfs_stats
.wait_exceeded
;
714 DLOG((DLOG_AVAIL
, "lfs_check: waiting: count=%d, bytes=%ld\n",
715 locked_queue_count
, locked_queue_bytes
));
716 error
= cv_timedwait_sig(&locked_queue_cv
, &lfs_lock
,
718 if (error
!= EWOULDBLOCK
)
722 * lfs_flush might not flush all the buffers, if some of the
723 * inodes were locked or if most of them were Ifile blocks
724 * and we weren't asked to checkpoint. Try flushing again
725 * to keep us from blocking indefinitely.
727 if (locked_queue_count
+ INOCOUNT(fs
) > LFS_MAX_BUFS
||
728 locked_queue_bytes
+ INOBYTES(fs
) > LFS_MAX_BYTES
) {
729 lfs_flush(fs
, flags
| SEGM_CKP
, 0);
732 mutex_exit(&lfs_lock
);
737 * Allocate a new buffer header.
740 lfs_newbuf(struct lfs
*fs
, struct vnode
*vp
, daddr_t daddr
, size_t size
, int type
)
745 ASSERT_MAYBE_SEGLOCK(fs
);
746 nbytes
= roundup(size
, fsbtob(fs
, 1));
748 bp
= getiobuf(NULL
, true);
750 bp
->b_data
= lfs_malloc(fs
, nbytes
, type
);
751 /* memset(bp->b_data, 0, nbytes); */
755 panic("vp is NULL in lfs_newbuf");
757 panic("bp is NULL after malloc in lfs_newbuf");
760 bp
->b_bufsize
= size
;
762 bp
->b_lblkno
= daddr
;
766 bp
->b_iodone
= lfs_callback
;
767 bp
->b_cflags
= BC_BUSY
| BC_NOCACHE
;
770 mutex_enter(&bufcache_lock
);
771 mutex_enter(&vp
->v_interlock
);
773 mutex_exit(&vp
->v_interlock
);
774 mutex_exit(&bufcache_lock
);
780 lfs_freebuf(struct lfs
*fs
, struct buf
*bp
)
784 if ((vp
= bp
->b_vp
) != NULL
) {
785 mutex_enter(&bufcache_lock
);
786 mutex_enter(&vp
->v_interlock
);
788 mutex_exit(&vp
->v_interlock
);
789 mutex_exit(&bufcache_lock
);
791 if (!(bp
->b_cflags
& BC_INVAL
)) { /* BC_INVAL indicates a "fake" buffer */
792 lfs_free(fs
, bp
->b_data
, LFS_NB_UNKNOWN
);
799 * Count buffers on the "locked" queue, and compare it to a pro-forma count.
800 * Don't count malloced buffers, since they don't detract from the total.
803 lfs_countlocked(int *count
, long *bytes
, const char *msg
)
809 mutex_enter(&bufcache_lock
);
810 TAILQ_FOREACH(bp
, &bufqueues
[BQ_LOCKED
].bq_queue
, b_freelist
) {
811 KASSERT(bp
->b_iodone
== NULL
);
813 size
+= bp
->b_bufsize
;
816 panic("lfs_countlocked: this can't happen: more"
817 " buffers locked than exist");
821 * Theoretically this function never really does anything.
822 * Give a warning if we have to fix the accounting.
825 DLOG((DLOG_LLIST
, "lfs_countlocked: %s: adjusted buf count"
826 " from %d to %d\n", msg
, *count
, n
));
828 if (size
!= *bytes
) {
829 DLOG((DLOG_LLIST
, "lfs_countlocked: %s: adjusted byte count"
830 " from %ld to %ld\n", msg
, *bytes
, size
));
834 mutex_exit(&bufcache_lock
);
841 int active
, inactive
;
843 uvm_estimatepageable(&active
, &inactive
);
844 return LFS_WAIT_RESOURCE(active
+ inactive
+ uvmexp
.free
, 1);
850 int active
, inactive
;
852 uvm_estimatepageable(&active
, &inactive
);
853 return LFS_MAX_RESOURCE(active
+ inactive
+ uvmexp
.free
, 1);