etc/protocols - sync with NetBSD-8
[minix.git] / sys / ufs / lfs / lfs_segment.c
bloba8ada66c1e87da0e2b1452a8bf93370f63adae36
1 /* $NetBSD: lfs_segment.c,v 1.260 2015/10/03 08:28:16 dholland Exp $ */
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant@hhhh.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (c) 1991, 1993
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
59 * @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.260 2015/10/03 08:28:16 dholland Exp $");
65 #ifdef DEBUG
66 # define vndebug(vp, str) do { \
67 if (VTOI(vp)->i_flag & IN_CLEANING) \
68 DLOG((DLOG_WVNODE, "not writing ino %d because %s (op %d)\n", \
69 VTOI(vp)->i_number, (str), op)); \
70 } while(0)
71 #else
72 # define vndebug(vp, str)
73 #endif
74 #define ivndebug(vp, str) \
75 DLOG((DLOG_WVNODE, "ino %d: %s\n", VTOI(vp)->i_number, (str)))
77 #if defined(_KERNEL_OPT)
78 #include "opt_ddb.h"
79 #endif
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/namei.h>
84 #include <sys/kernel.h>
85 #include <sys/resourcevar.h>
86 #include <sys/file.h>
87 #include <sys/stat.h>
88 #include <sys/buf.h>
89 #include <sys/proc.h>
90 #include <sys/vnode.h>
91 #include <sys/mount.h>
92 #include <sys/kauth.h>
93 #include <sys/syslog.h>
95 #include <miscfs/specfs/specdev.h>
96 #include <miscfs/fifofs/fifo.h>
98 #include <ufs/lfs/ulfs_inode.h>
99 #include <ufs/lfs/ulfsmount.h>
100 #include <ufs/lfs/ulfs_extern.h>
102 #include <ufs/lfs/lfs.h>
103 #include <ufs/lfs/lfs_accessors.h>
104 #include <ufs/lfs/lfs_kernel.h>
105 #include <ufs/lfs/lfs_extern.h>
107 #include <uvm/uvm.h>
108 #include <uvm/uvm_extern.h>
110 MALLOC_JUSTDEFINE(M_SEGMENT, "LFS segment", "Segment for LFS");
112 static void lfs_generic_callback(struct buf *, void (*)(struct buf *));
113 static void lfs_free_aiodone(struct buf *);
114 static void lfs_super_aiodone(struct buf *);
115 static void lfs_cluster_aiodone(struct buf *);
116 static void lfs_cluster_callback(struct buf *);
119 * Determine if it's OK to start a partial in this segment, or if we need
120 * to go on to a new segment.
122 #define LFS_PARTIAL_FITS(fs) \
123 (lfs_sb_getfsbpseg(fs) - \
124 (lfs_sb_getoffset(fs) - lfs_sb_getcurseg(fs)) > \
125 lfs_sb_getfrag(fs))
128 * Figure out whether we should do a checkpoint write or go ahead with
129 * an ordinary write.
131 #define LFS_SHOULD_CHECKPOINT(fs, flags) \
132 ((flags & SEGM_CLEAN) == 0 && \
133 ((fs->lfs_nactive > LFS_MAX_ACTIVE || \
134 (flags & SEGM_CKP) || \
135 lfs_sb_getnclean(fs) < LFS_MAX_ACTIVE)))
137 int lfs_match_fake(struct lfs *, struct buf *);
138 void lfs_newseg(struct lfs *);
139 void lfs_supercallback(struct buf *);
140 void lfs_updatemeta(struct segment *);
141 void lfs_writesuper(struct lfs *, daddr_t);
142 int lfs_writevnodes(struct lfs *fs, struct mount *mp,
143 struct segment *sp, int dirops);
145 static void lfs_shellsort(struct lfs *, struct buf **, union lfs_blocks *,
146 int, int);
148 int lfs_allclean_wakeup; /* Cleaner wakeup address. */
149 int lfs_writeindir = 1; /* whether to flush indir on non-ckp */
150 int lfs_clean_vnhead = 0; /* Allow freeing to head of vn list */
151 int lfs_dirvcount = 0; /* # active dirops */
153 /* Statistics Counters */
154 int lfs_dostats = 1;
155 struct lfs_stats lfs_stats;
157 /* op values to lfs_writevnodes */
158 #define VN_REG 0
159 #define VN_DIROP 1
160 #define VN_EMPTY 2
161 #define VN_CLEAN 3
164 * XXX KS - Set modification time on the Ifile, so the cleaner can
165 * read the fs mod time off of it. We don't set IN_UPDATE here,
166 * since we don't really need this to be flushed to disk (and in any
167 * case that wouldn't happen to the Ifile until we checkpoint).
169 void
170 lfs_imtime(struct lfs *fs)
172 struct timespec ts;
173 struct inode *ip;
175 ASSERT_MAYBE_SEGLOCK(fs);
176 vfs_timestamp(&ts);
177 ip = VTOI(fs->lfs_ivnode);
178 lfs_dino_setmtime(fs, ip->i_din, ts.tv_sec);
179 lfs_dino_setmtimensec(fs, ip->i_din, ts.tv_nsec);
183 * Ifile and meta data blocks are not marked busy, so segment writes MUST be
184 * single threaded. Currently, there are two paths into lfs_segwrite, sync()
185 * and getnewbuf(). They both mark the file system busy. Lfs_vflush()
186 * explicitly marks the file system busy. So lfs_segwrite is safe. I think.
189 #define IS_FLUSHING(fs,vp) ((fs)->lfs_flushvp == (vp))
192 lfs_vflush(struct vnode *vp)
194 struct inode *ip;
195 struct lfs *fs;
196 struct segment *sp;
197 struct buf *bp, *nbp, *tbp, *tnbp;
198 int error;
199 int flushed;
200 int relock;
202 ip = VTOI(vp);
203 fs = VFSTOULFS(vp->v_mount)->um_lfs;
204 relock = 0;
206 top:
207 KASSERT(mutex_owned(vp->v_interlock) == false);
208 KASSERT(mutex_owned(&lfs_lock) == false);
209 KASSERT(mutex_owned(&bufcache_lock) == false);
210 ASSERT_NO_SEGLOCK(fs);
211 if (ip->i_flag & IN_CLEANING) {
212 ivndebug(vp,"vflush/in_cleaning");
213 mutex_enter(&lfs_lock);
214 LFS_CLR_UINO(ip, IN_CLEANING);
215 LFS_SET_UINO(ip, IN_MODIFIED);
216 mutex_exit(&lfs_lock);
219 * Toss any cleaning buffers that have real counterparts
220 * to avoid losing new data.
222 mutex_enter(vp->v_interlock);
223 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
224 nbp = LIST_NEXT(bp, b_vnbufs);
225 if (!LFS_IS_MALLOC_BUF(bp))
226 continue;
228 * Look for pages matching the range covered
229 * by cleaning blocks. It's okay if more dirty
230 * pages appear, so long as none disappear out
231 * from under us.
233 if (bp->b_lblkno > 0 && vp->v_type == VREG &&
234 vp != fs->lfs_ivnode) {
235 struct vm_page *pg;
236 voff_t off;
238 for (off = lfs_lblktosize(fs, bp->b_lblkno);
239 off < lfs_lblktosize(fs, bp->b_lblkno + 1);
240 off += PAGE_SIZE) {
241 pg = uvm_pagelookup(&vp->v_uobj, off);
242 if (pg == NULL)
243 continue;
244 if ((pg->flags & PG_CLEAN) == 0 ||
245 pmap_is_modified(pg)) {
246 lfs_sb_addavail(fs,
247 lfs_btofsb(fs,
248 bp->b_bcount));
249 wakeup(&fs->lfs_availsleep);
250 mutex_exit(vp->v_interlock);
251 lfs_freebuf(fs, bp);
252 mutex_enter(vp->v_interlock);
253 bp = NULL;
254 break;
258 for (tbp = LIST_FIRST(&vp->v_dirtyblkhd); tbp;
259 tbp = tnbp)
261 tnbp = LIST_NEXT(tbp, b_vnbufs);
262 if (tbp->b_vp == bp->b_vp
263 && tbp->b_lblkno == bp->b_lblkno
264 && tbp != bp)
266 lfs_sb_addavail(fs, lfs_btofsb(fs,
267 bp->b_bcount));
268 wakeup(&fs->lfs_availsleep);
269 mutex_exit(vp->v_interlock);
270 lfs_freebuf(fs, bp);
271 mutex_enter(vp->v_interlock);
272 bp = NULL;
273 break;
277 } else {
278 mutex_enter(vp->v_interlock);
281 /* If the node is being written, wait until that is done */
282 while (WRITEINPROG(vp)) {
283 ivndebug(vp,"vflush/writeinprog");
284 cv_wait(&vp->v_cv, vp->v_interlock);
286 error = vdead_check(vp, VDEAD_NOWAIT);
287 mutex_exit(vp->v_interlock);
289 /* Protect against deadlock in vinvalbuf() */
290 lfs_seglock(fs, SEGM_SYNC | ((error != 0) ? SEGM_RECLAIM : 0));
291 if (error != 0) {
292 fs->lfs_reclino = ip->i_number;
295 /* If we're supposed to flush a freed inode, just toss it */
296 if (ip->i_lfs_iflags & LFSI_DELETED) {
297 DLOG((DLOG_VNODE, "lfs_vflush: ino %d freed, not flushing\n",
298 ip->i_number));
299 /* Drain v_numoutput */
300 mutex_enter(vp->v_interlock);
301 while (vp->v_numoutput > 0) {
302 cv_wait(&vp->v_cv, vp->v_interlock);
304 KASSERT(vp->v_numoutput == 0);
305 mutex_exit(vp->v_interlock);
307 mutex_enter(&bufcache_lock);
308 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
309 nbp = LIST_NEXT(bp, b_vnbufs);
311 KASSERT((bp->b_flags & B_GATHERED) == 0);
312 if (bp->b_oflags & BO_DELWRI) { /* XXX always true? */
313 lfs_sb_addavail(fs, lfs_btofsb(fs, bp->b_bcount));
314 wakeup(&fs->lfs_availsleep);
316 /* Copied from lfs_writeseg */
317 if (bp->b_iodone != NULL) {
318 mutex_exit(&bufcache_lock);
319 biodone(bp);
320 mutex_enter(&bufcache_lock);
321 } else {
322 bremfree(bp);
323 LFS_UNLOCK_BUF(bp);
324 mutex_enter(vp->v_interlock);
325 bp->b_flags &= ~(B_READ | B_GATHERED);
326 bp->b_oflags = (bp->b_oflags & ~BO_DELWRI) | BO_DONE;
327 bp->b_error = 0;
328 reassignbuf(bp, vp);
329 mutex_exit(vp->v_interlock);
330 brelse(bp, 0);
333 mutex_exit(&bufcache_lock);
334 LFS_CLR_UINO(ip, IN_CLEANING);
335 LFS_CLR_UINO(ip, IN_MODIFIED | IN_ACCESSED);
336 ip->i_flag &= ~IN_ALLMOD;
337 DLOG((DLOG_VNODE, "lfs_vflush: done not flushing ino %d\n",
338 ip->i_number));
339 lfs_segunlock(fs);
341 KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL);
343 return 0;
346 fs->lfs_flushvp = vp;
347 if (LFS_SHOULD_CHECKPOINT(fs, fs->lfs_sp->seg_flags)) {
348 error = lfs_segwrite(vp->v_mount, SEGM_CKP | SEGM_SYNC);
349 fs->lfs_flushvp = NULL;
350 KASSERT(fs->lfs_flushvp_fakevref == 0);
351 lfs_segunlock(fs);
353 /* Make sure that any pending buffers get written */
354 mutex_enter(vp->v_interlock);
355 while (vp->v_numoutput > 0) {
356 cv_wait(&vp->v_cv, vp->v_interlock);
358 KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL);
359 KASSERT(vp->v_numoutput == 0);
360 mutex_exit(vp->v_interlock);
362 return error;
364 sp = fs->lfs_sp;
366 flushed = 0;
367 if (VPISEMPTY(vp)) {
368 lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
369 ++flushed;
370 } else if ((ip->i_flag & IN_CLEANING) &&
371 (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
372 ivndebug(vp,"vflush/clean");
373 lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
374 ++flushed;
375 } else if (lfs_dostats) {
376 if (!VPISEMPTY(vp) || (VTOI(vp)->i_flag & IN_ALLMOD))
377 ++lfs_stats.vflush_invoked;
378 ivndebug(vp,"vflush");
381 #ifdef DIAGNOSTIC
382 if (vp->v_uflag & VU_DIROP) {
383 DLOG((DLOG_VNODE, "lfs_vflush: flushing VU_DIROP\n"));
384 /* panic("lfs_vflush: VU_DIROP being flushed...this can\'t happen"); */
386 #endif
388 do {
389 #ifdef DEBUG
390 int loopcount = 0;
391 #endif
392 do {
393 if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL) {
394 relock = lfs_writefile(fs, sp, vp);
395 if (relock && vp != fs->lfs_ivnode) {
397 * Might have to wait for the
398 * cleaner to run; but we're
399 * still not done with this vnode.
400 * XXX we can do better than this.
402 KDASSERT(ip->i_number != LFS_IFILE_INUM);
403 lfs_writeinode(fs, sp, ip);
404 mutex_enter(&lfs_lock);
405 LFS_SET_UINO(ip, IN_MODIFIED);
406 mutex_exit(&lfs_lock);
407 lfs_writeseg(fs, sp);
408 lfs_segunlock(fs);
409 lfs_segunlock_relock(fs);
410 goto top;
414 * If we begin a new segment in the middle of writing
415 * the Ifile, it creates an inconsistent checkpoint,
416 * since the Ifile information for the new segment
417 * is not up-to-date. Take care of this here by
418 * sending the Ifile through again in case there
419 * are newly dirtied blocks. But wait, there's more!
420 * This second Ifile write could *also* cross a segment
421 * boundary, if the first one was large. The second
422 * one is guaranteed to be no more than 8 blocks,
423 * though (two segment blocks and supporting indirects)
424 * so the third write *will not* cross the boundary.
426 if (vp == fs->lfs_ivnode) {
427 lfs_writefile(fs, sp, vp);
428 lfs_writefile(fs, sp, vp);
430 #ifdef DEBUG
431 if (++loopcount > 2)
432 log(LOG_NOTICE, "lfs_vflush: looping count=%d\n", loopcount);
433 #endif
434 } while (lfs_writeinode(fs, sp, ip));
435 } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
437 if (lfs_dostats) {
438 ++lfs_stats.nwrites;
439 if (sp->seg_flags & SEGM_SYNC)
440 ++lfs_stats.nsync_writes;
441 if (sp->seg_flags & SEGM_CKP)
442 ++lfs_stats.ncheckpoints;
445 * If we were called from somewhere that has already held the seglock
446 * (e.g., lfs_markv()), the lfs_segunlock will not wait for
447 * the write to complete because we are still locked.
448 * Since lfs_vflush() must return the vnode with no dirty buffers,
449 * we must explicitly wait, if that is the case.
451 * We compare the iocount against 1, not 0, because it is
452 * artificially incremented by lfs_seglock().
454 mutex_enter(&lfs_lock);
455 if (fs->lfs_seglock > 1) {
456 while (fs->lfs_iocount > 1)
457 (void)mtsleep(&fs->lfs_iocount, PRIBIO + 1,
458 "lfs_vflush", 0, &lfs_lock);
460 mutex_exit(&lfs_lock);
462 lfs_segunlock(fs);
464 /* Wait for these buffers to be recovered by aiodoned */
465 mutex_enter(vp->v_interlock);
466 while (vp->v_numoutput > 0) {
467 cv_wait(&vp->v_cv, vp->v_interlock);
469 KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL);
470 KASSERT(vp->v_numoutput == 0);
471 mutex_exit(vp->v_interlock);
473 fs->lfs_flushvp = NULL;
474 KASSERT(fs->lfs_flushvp_fakevref == 0);
476 return (0);
479 struct lfs_writevnodes_ctx {
480 int op;
481 struct lfs *fs;
483 static bool
484 lfs_writevnodes_selector(void *cl, struct vnode *vp)
486 struct lfs_writevnodes_ctx *c = cl;
487 struct inode *ip = VTOI(vp);
488 int op = c->op;
490 if (ip == NULL || vp->v_type == VNON)
491 return false;
492 if ((op == VN_DIROP && !(vp->v_uflag & VU_DIROP)) ||
493 (op != VN_DIROP && op != VN_CLEAN && (vp->v_uflag & VU_DIROP))) {
494 vndebug(vp, "dirop");
495 return false;
497 if (op == VN_EMPTY && !VPISEMPTY(vp)) {
498 vndebug(vp,"empty");
499 return false;;
501 if (op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM &&
502 vp != c->fs->lfs_flushvp && !(ip->i_flag & IN_CLEANING)) {
503 vndebug(vp,"cleaning");
504 return false;
506 mutex_enter(&lfs_lock);
507 if (vp == c->fs->lfs_unlockvp) {
508 mutex_exit(&lfs_lock);
509 return false;
511 mutex_exit(&lfs_lock);
513 return true;
517 lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
519 struct inode *ip;
520 struct vnode *vp;
521 struct vnode_iterator *marker;
522 struct lfs_writevnodes_ctx ctx;
523 int inodes_written = 0;
524 int error = 0;
527 * XXX This was TAILQ_FOREACH_REVERSE on &mp->mnt_vnodelist.
528 * XXX The rationale is unclear, the initial commit had no information.
529 * XXX If the order really matters we have to sort the vnodes first.
532 ASSERT_SEGLOCK(fs);
533 vfs_vnode_iterator_init(mp, &marker);
534 ctx.op = op;
535 ctx.fs = fs;
536 while ((vp = vfs_vnode_iterator_next(marker,
537 lfs_writevnodes_selector, &ctx)) != NULL) {
538 ip = VTOI(vp);
541 * Write the inode/file if dirty and it's not the IFILE.
543 if (((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp)) &&
544 ip->i_number != LFS_IFILE_INUM) {
545 error = lfs_writefile(fs, sp, vp);
546 if (error) {
547 vrele(vp);
548 if (error == EAGAIN) {
550 * This error from lfs_putpages
551 * indicates we need to drop
552 * the segment lock and start
553 * over after the cleaner has
554 * had a chance to run.
556 lfs_writeinode(fs, sp, ip);
557 lfs_writeseg(fs, sp);
558 if (!VPISEMPTY(vp) &&
559 !WRITEINPROG(vp) &&
560 !(ip->i_flag & IN_ALLMOD)) {
561 mutex_enter(&lfs_lock);
562 LFS_SET_UINO(ip, IN_MODIFIED);
563 mutex_exit(&lfs_lock);
565 break;
567 error = 0; /* XXX not quite right */
568 continue;
571 if (!VPISEMPTY(vp)) {
572 if (WRITEINPROG(vp)) {
573 ivndebug(vp,"writevnodes/write2");
574 } else if (!(ip->i_flag & IN_ALLMOD)) {
575 mutex_enter(&lfs_lock);
576 LFS_SET_UINO(ip, IN_MODIFIED);
577 mutex_exit(&lfs_lock);
580 (void) lfs_writeinode(fs, sp, ip);
581 inodes_written++;
583 vrele(vp);
585 vfs_vnode_iterator_destroy(marker);
586 return error;
590 * Do a checkpoint.
593 lfs_segwrite(struct mount *mp, int flags)
595 struct buf *bp;
596 struct inode *ip;
597 struct lfs *fs;
598 struct segment *sp;
599 struct vnode *vp;
600 SEGUSE *segusep;
601 int do_ckp, did_ckp, error;
602 unsigned n, segleft, maxseg, sn, i, curseg;
603 int writer_set = 0;
604 int dirty;
605 int redo;
606 SEGSUM *ssp;
607 int um_error;
609 fs = VFSTOULFS(mp)->um_lfs;
610 ASSERT_MAYBE_SEGLOCK(fs);
612 if (fs->lfs_ronly)
613 return EROFS;
615 lfs_imtime(fs);
618 * Allocate a segment structure and enough space to hold pointers to
619 * the maximum possible number of buffers which can be described in a
620 * single summary block.
622 do_ckp = LFS_SHOULD_CHECKPOINT(fs, flags);
624 /* We can't do a partial write and checkpoint at the same time. */
625 if (do_ckp)
626 flags &= ~SEGM_SINGLE;
628 lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
629 sp = fs->lfs_sp;
630 if (sp->seg_flags & (SEGM_CLEAN | SEGM_CKP))
631 do_ckp = 1;
634 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
635 * in which case we have to flush *all* buffers off of this vnode.
636 * We don't care about other nodes, but write any non-dirop nodes
637 * anyway in anticipation of another getnewvnode().
639 * If we're cleaning we only write cleaning and ifile blocks, and
640 * no dirops, since otherwise we'd risk corruption in a crash.
642 if (sp->seg_flags & SEGM_CLEAN)
643 lfs_writevnodes(fs, mp, sp, VN_CLEAN);
644 else if (!(sp->seg_flags & SEGM_FORCE_CKP)) {
645 do {
646 um_error = lfs_writevnodes(fs, mp, sp, VN_REG);
647 if ((sp->seg_flags & SEGM_SINGLE) &&
648 lfs_sb_getcurseg(fs) != fs->lfs_startseg) {
649 DLOG((DLOG_SEG, "lfs_segwrite: breaking out of segment write at daddr 0x%jx\n", (uintmax_t)lfs_sb_getoffset(fs)));
650 break;
653 if (do_ckp || fs->lfs_dirops == 0) {
654 if (!writer_set) {
655 lfs_writer_enter(fs, "lfs writer");
656 writer_set = 1;
658 error = lfs_writevnodes(fs, mp, sp, VN_DIROP);
659 if (um_error == 0)
660 um_error = error;
661 /* In case writevnodes errored out */
662 lfs_flush_dirops(fs);
663 ssp = (SEGSUM *)(sp->segsum);
664 lfs_ss_setflags(fs, ssp,
665 lfs_ss_getflags(fs, ssp) & ~(SS_CONT));
666 lfs_finalize_fs_seguse(fs);
668 if (do_ckp && um_error) {
669 lfs_segunlock_relock(fs);
670 sp = fs->lfs_sp;
672 } while (do_ckp && um_error != 0);
676 * If we are doing a checkpoint, mark everything since the
677 * last checkpoint as no longer ACTIVE.
679 if (do_ckp || fs->lfs_doifile) {
680 segleft = lfs_sb_getnseg(fs);
681 curseg = 0;
682 for (n = 0; n < lfs_sb_getsegtabsz(fs); n++) {
683 dirty = 0;
684 if (bread(fs->lfs_ivnode, lfs_sb_getcleansz(fs) + n,
685 lfs_sb_getbsize(fs), B_MODIFY, &bp))
686 panic("lfs_segwrite: ifile read");
687 segusep = (SEGUSE *)bp->b_data;
688 maxseg = min(segleft, lfs_sb_getsepb(fs));
689 for (i = 0; i < maxseg; i++) {
690 sn = curseg + i;
691 if (sn != lfs_dtosn(fs, lfs_sb_getcurseg(fs)) &&
692 segusep->su_flags & SEGUSE_ACTIVE) {
693 segusep->su_flags &= ~SEGUSE_ACTIVE;
694 --fs->lfs_nactive;
695 ++dirty;
697 fs->lfs_suflags[fs->lfs_activesb][sn] =
698 segusep->su_flags;
699 if (lfs_sb_getversion(fs) > 1)
700 ++segusep;
701 else
702 segusep = (SEGUSE *)
703 ((SEGUSE_V1 *)segusep + 1);
706 if (dirty)
707 error = LFS_BWRITE_LOG(bp); /* Ifile */
708 else
709 brelse(bp, 0);
710 segleft -= lfs_sb_getsepb(fs);
711 curseg += lfs_sb_getsepb(fs);
715 KASSERT(LFS_SEGLOCK_HELD(fs));
717 did_ckp = 0;
718 if (do_ckp || fs->lfs_doifile) {
719 vp = fs->lfs_ivnode;
720 #ifdef DEBUG
721 int loopcount = 0;
722 #endif
723 do {
724 #ifdef DEBUG
725 LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0, curproc->p_pid);
726 #endif
727 mutex_enter(&lfs_lock);
728 fs->lfs_flags &= ~LFS_IFDIRTY;
729 mutex_exit(&lfs_lock);
731 ip = VTOI(vp);
733 if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL) {
735 * Ifile has no pages, so we don't need
736 * to check error return here.
738 lfs_writefile(fs, sp, vp);
740 * Ensure the Ifile takes the current segment
741 * into account. See comment in lfs_vflush.
743 lfs_writefile(fs, sp, vp);
744 lfs_writefile(fs, sp, vp);
747 if (ip->i_flag & IN_ALLMOD)
748 ++did_ckp;
749 #if 0
750 redo = (do_ckp ? lfs_writeinode(fs, sp, ip) : 0);
751 #else
752 redo = lfs_writeinode(fs, sp, ip);
753 #endif
754 redo += lfs_writeseg(fs, sp);
755 mutex_enter(&lfs_lock);
756 redo += (fs->lfs_flags & LFS_IFDIRTY);
757 mutex_exit(&lfs_lock);
758 #ifdef DEBUG
759 if (++loopcount > 2)
760 log(LOG_NOTICE, "lfs_segwrite: looping count=%d\n",
761 loopcount);
762 #endif
763 } while (redo && do_ckp);
766 * Unless we are unmounting, the Ifile may continue to have
767 * dirty blocks even after a checkpoint, due to changes to
768 * inodes' atime. If we're checkpointing, it's "impossible"
769 * for other parts of the Ifile to be dirty after the loop
770 * above, since we hold the segment lock.
772 mutex_enter(vp->v_interlock);
773 if (LIST_EMPTY(&vp->v_dirtyblkhd)) {
774 LFS_CLR_UINO(ip, IN_ALLMOD);
776 #ifdef DIAGNOSTIC
777 else if (do_ckp) {
778 int do_panic = 0;
779 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
780 if (bp->b_lblkno < lfs_sb_getcleansz(fs) +
781 lfs_sb_getsegtabsz(fs) &&
782 !(bp->b_flags & B_GATHERED)) {
783 printf("ifile lbn %ld still dirty (flags %lx)\n",
784 (long)bp->b_lblkno,
785 (long)bp->b_flags);
786 ++do_panic;
789 if (do_panic)
790 panic("dirty blocks");
792 #endif
793 mutex_exit(vp->v_interlock);
794 } else {
795 (void) lfs_writeseg(fs, sp);
798 /* Note Ifile no longer needs to be written */
799 fs->lfs_doifile = 0;
800 if (writer_set)
801 lfs_writer_leave(fs);
804 * If we didn't write the Ifile, we didn't really do anything.
805 * That means that (1) there is a checkpoint on disk and (2)
806 * nothing has changed since it was written.
808 * Take the flags off of the segment so that lfs_segunlock
809 * doesn't have to write the superblock either.
811 if (do_ckp && !did_ckp) {
812 sp->seg_flags &= ~SEGM_CKP;
815 if (lfs_dostats) {
816 ++lfs_stats.nwrites;
817 if (sp->seg_flags & SEGM_SYNC)
818 ++lfs_stats.nsync_writes;
819 if (sp->seg_flags & SEGM_CKP)
820 ++lfs_stats.ncheckpoints;
822 lfs_segunlock(fs);
823 return (0);
827 * Write the dirty blocks associated with a vnode.
830 lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
832 struct inode *ip;
833 int i, frag;
834 SEGSUM *ssp;
835 int error;
837 ASSERT_SEGLOCK(fs);
838 error = 0;
839 ip = VTOI(vp);
841 lfs_acquire_finfo(fs, ip->i_number, ip->i_gen);
843 if (vp->v_uflag & VU_DIROP) {
844 ssp = (SEGSUM *)sp->segsum;
845 lfs_ss_setflags(fs, ssp,
846 lfs_ss_getflags(fs, ssp) | (SS_DIROP|SS_CONT));
849 if (sp->seg_flags & SEGM_CLEAN) {
850 lfs_gather(fs, sp, vp, lfs_match_fake);
852 * For a file being flushed, we need to write *all* blocks.
853 * This means writing the cleaning blocks first, and then
854 * immediately following with any non-cleaning blocks.
855 * The same is true of the Ifile since checkpoints assume
856 * that all valid Ifile blocks are written.
858 if (IS_FLUSHING(fs, vp) || vp == fs->lfs_ivnode) {
859 lfs_gather(fs, sp, vp, lfs_match_data);
861 * Don't call VOP_PUTPAGES: if we're flushing,
862 * we've already done it, and the Ifile doesn't
863 * use the page cache.
866 } else {
867 lfs_gather(fs, sp, vp, lfs_match_data);
869 * If we're flushing, we've already called VOP_PUTPAGES
870 * so don't do it again. Otherwise, we want to write
871 * everything we've got.
873 if (!IS_FLUSHING(fs, vp)) {
874 mutex_enter(vp->v_interlock);
875 error = VOP_PUTPAGES(vp, 0, 0,
876 PGO_CLEANIT | PGO_ALLPAGES | PGO_LOCKED);
881 * It may not be necessary to write the meta-data blocks at this point,
882 * as the roll-forward recovery code should be able to reconstruct the
883 * list.
885 * We have to write them anyway, though, under two conditions: (1) the
886 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
887 * checkpointing.
889 * BUT if we are cleaning, we might have indirect blocks that refer to
890 * new blocks not being written yet, in addition to fragments being
891 * moved out of a cleaned segment. If that is the case, don't
892 * write the indirect blocks, or the finfo will have a small block
893 * in the middle of it!
894 * XXX in this case isn't the inode size wrong too?
896 frag = 0;
897 if (sp->seg_flags & SEGM_CLEAN) {
898 for (i = 0; i < ULFS_NDADDR; i++)
899 if (ip->i_lfs_fragsize[i] > 0 &&
900 ip->i_lfs_fragsize[i] < lfs_sb_getbsize(fs))
901 ++frag;
903 #ifdef DIAGNOSTIC
904 if (frag > 1)
905 panic("lfs_writefile: more than one fragment!");
906 #endif
907 if (IS_FLUSHING(fs, vp) ||
908 (frag == 0 && (lfs_writeindir || (sp->seg_flags & SEGM_CKP)))) {
909 lfs_gather(fs, sp, vp, lfs_match_indir);
910 lfs_gather(fs, sp, vp, lfs_match_dindir);
911 lfs_gather(fs, sp, vp, lfs_match_tindir);
913 lfs_release_finfo(fs);
915 return error;
919 * Update segment accounting to reflect this inode's change of address.
921 static int
922 lfs_update_iaddr(struct lfs *fs, struct segment *sp, struct inode *ip, daddr_t ndaddr)
924 struct buf *bp;
925 daddr_t daddr;
926 IFILE *ifp;
927 SEGUSE *sup;
928 ino_t ino;
929 int redo_ifile;
930 u_int32_t sn;
932 redo_ifile = 0;
935 * If updating the ifile, update the super-block. Update the disk
936 * address and access times for this inode in the ifile.
938 ino = ip->i_number;
939 if (ino == LFS_IFILE_INUM) {
940 daddr = lfs_sb_getidaddr(fs);
941 lfs_sb_setidaddr(fs, LFS_DBTOFSB(fs, ndaddr));
942 } else {
943 LFS_IENTRY(ifp, fs, ino, bp);
944 daddr = lfs_if_getdaddr(fs, ifp);
945 lfs_if_setdaddr(fs, ifp, LFS_DBTOFSB(fs, ndaddr));
946 (void)LFS_BWRITE_LOG(bp); /* Ifile */
950 * If this is the Ifile and lfs_offset is set to the first block
951 * in the segment, dirty the new segment's accounting block
952 * (XXX should already be dirty?) and tell the caller to do it again.
954 if (ip->i_number == LFS_IFILE_INUM) {
955 sn = lfs_dtosn(fs, lfs_sb_getoffset(fs));
956 if (lfs_sntod(fs, sn) + lfs_btofsb(fs, lfs_sb_getsumsize(fs)) ==
957 lfs_sb_getoffset(fs)) {
958 LFS_SEGENTRY(sup, fs, sn, bp);
959 KASSERT(bp->b_oflags & BO_DELWRI);
960 LFS_WRITESEGENTRY(sup, fs, sn, bp);
961 /* fs->lfs_flags |= LFS_IFDIRTY; */
962 redo_ifile |= 1;
967 * The inode's last address should not be in the current partial
968 * segment, except under exceptional circumstances (lfs_writevnodes
969 * had to start over, and in the meantime more blocks were written
970 * to a vnode). Both inodes will be accounted to this segment
971 * in lfs_writeseg so we need to subtract the earlier version
972 * here anyway. The segment count can temporarily dip below
973 * zero here; keep track of how many duplicates we have in
974 * "dupino" so we don't panic below.
976 if (daddr >= lfs_sb_getlastpseg(fs) && daddr <= lfs_sb_getoffset(fs)) {
977 ++sp->ndupino;
978 DLOG((DLOG_SEG, "lfs_writeinode: last inode addr in current pseg "
979 "(ino %d daddr 0x%llx) ndupino=%d\n", ino,
980 (long long)daddr, sp->ndupino));
983 * Account the inode: it no longer belongs to its former segment,
984 * though it will not belong to the new segment until that segment
985 * is actually written.
987 if (daddr != LFS_UNUSED_DADDR) {
988 u_int32_t oldsn = lfs_dtosn(fs, daddr);
989 #ifdef DIAGNOSTIC
990 int ndupino = (sp->seg_number == oldsn) ? sp->ndupino : 0;
991 #endif
992 LFS_SEGENTRY(sup, fs, oldsn, bp);
993 #ifdef DIAGNOSTIC
994 if (sup->su_nbytes + DINOSIZE(fs) * ndupino < DINOSIZE(fs)) {
995 printf("lfs_writeinode: negative bytes "
996 "(segment %" PRIu32 " short by %d, "
997 "oldsn=%" PRIu32 ", cursn=%" PRIu32
998 ", daddr=%" PRId64 ", su_nbytes=%u, "
999 "ndupino=%d)\n",
1000 lfs_dtosn(fs, daddr),
1001 (int)DINOSIZE(fs) *
1002 (1 - sp->ndupino) - sup->su_nbytes,
1003 oldsn, sp->seg_number, daddr,
1004 (unsigned int)sup->su_nbytes,
1005 sp->ndupino);
1006 panic("lfs_writeinode: negative bytes");
1007 sup->su_nbytes = DINOSIZE(fs);
1009 #endif
1010 DLOG((DLOG_SU, "seg %d -= %d for ino %d inode\n",
1011 lfs_dtosn(fs, daddr), DINOSIZE(fs), ino));
1012 sup->su_nbytes -= DINOSIZE(fs);
1013 redo_ifile |=
1014 (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
1015 if (redo_ifile) {
1016 mutex_enter(&lfs_lock);
1017 fs->lfs_flags |= LFS_IFDIRTY;
1018 mutex_exit(&lfs_lock);
1019 /* Don't double-account */
1020 lfs_sb_setidaddr(fs, 0x0);
1022 LFS_WRITESEGENTRY(sup, fs, oldsn, bp); /* Ifile */
1025 return redo_ifile;
1029 lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
1031 struct buf *bp;
1032 union lfs_dinode *cdp;
1033 struct vnode *vp = ITOV(ip);
1034 daddr_t daddr;
1035 IINFO *iip;
1036 int i;
1037 int redo_ifile = 0;
1038 int gotblk = 0;
1039 int count;
1040 SEGSUM *ssp;
1042 ASSERT_SEGLOCK(fs);
1043 if (!(ip->i_flag & IN_ALLMOD) && !(vp->v_uflag & VU_DIROP))
1044 return (0);
1046 /* Can't write ifile when writer is not set */
1047 KASSERT(ip->i_number != LFS_IFILE_INUM || fs->lfs_writer > 0 ||
1048 (sp->seg_flags & SEGM_CLEAN));
1051 * If this is the Ifile, see if writing it here will generate a
1052 * temporary misaccounting. If it will, do the accounting and write
1053 * the blocks, postponing the inode write until the accounting is
1054 * solid.
1056 count = 0;
1057 while (vp == fs->lfs_ivnode) {
1058 int redo = 0;
1060 if (sp->idp == NULL && sp->ibp == NULL &&
1061 (sp->seg_bytes_left < lfs_sb_getibsize(fs) ||
1062 sp->sum_bytes_left < sizeof(int32_t))) {
1063 (void) lfs_writeseg(fs, sp);
1064 continue;
1067 /* Look for dirty Ifile blocks */
1068 LIST_FOREACH(bp, &fs->lfs_ivnode->v_dirtyblkhd, b_vnbufs) {
1069 if (!(bp->b_flags & B_GATHERED)) {
1070 redo = 1;
1071 break;
1075 if (redo == 0)
1076 redo = lfs_update_iaddr(fs, sp, ip, 0x0);
1077 if (redo == 0)
1078 break;
1080 if (sp->idp) {
1081 lfs_dino_setinumber(fs, sp->idp, 0);
1082 sp->idp = NULL;
1084 ++count;
1085 if (count > 2)
1086 log(LOG_NOTICE, "lfs_writeinode: looping count=%d\n", count);
1087 lfs_writefile(fs, sp, fs->lfs_ivnode);
1090 /* Allocate a new inode block if necessary. */
1091 if ((ip->i_number != LFS_IFILE_INUM || sp->idp == NULL) &&
1092 sp->ibp == NULL) {
1093 /* Allocate a new segment if necessary. */
1094 if (sp->seg_bytes_left < lfs_sb_getibsize(fs) ||
1095 sp->sum_bytes_left < sizeof(int32_t))
1096 (void) lfs_writeseg(fs, sp);
1098 /* Get next inode block. */
1099 daddr = lfs_sb_getoffset(fs);
1100 lfs_sb_addoffset(fs, lfs_btofsb(fs, lfs_sb_getibsize(fs)));
1101 sp->ibp = *sp->cbpp++ =
1102 getblk(VTOI(fs->lfs_ivnode)->i_devvp,
1103 LFS_FSBTODB(fs, daddr), lfs_sb_getibsize(fs), 0, 0);
1104 gotblk++;
1106 /* Zero out inode numbers */
1107 for (i = 0; i < LFS_INOPB(fs); ++i) {
1108 union lfs_dinode *tmpdi;
1110 tmpdi = (union lfs_dinode *)((char *)sp->ibp->b_data +
1111 DINOSIZE(fs) * i);
1112 lfs_dino_setinumber(fs, tmpdi, 0);
1115 ++sp->start_bpp;
1116 lfs_sb_subavail(fs, lfs_btofsb(fs, lfs_sb_getibsize(fs)));
1117 /* Set remaining space counters. */
1118 sp->seg_bytes_left -= lfs_sb_getibsize(fs);
1119 sp->sum_bytes_left -= sizeof(int32_t);
1121 /* Store the address in the segment summary. */
1122 iip = NTH_IINFO(fs, sp->segsum, sp->ninodes / LFS_INOPB(fs));
1123 lfs_ii_setblock(fs, iip, daddr);
1126 /* Check VU_DIROP in case there is a new file with no data blocks */
1127 if (vp->v_uflag & VU_DIROP) {
1128 ssp = (SEGSUM *)sp->segsum;
1129 lfs_ss_setflags(fs, ssp,
1130 lfs_ss_getflags(fs, ssp) | (SS_DIROP|SS_CONT));
1133 /* Update the inode times and copy the inode onto the inode page. */
1134 /* XXX kludge --- don't redirty the ifile just to put times on it */
1135 if (ip->i_number != LFS_IFILE_INUM)
1136 LFS_ITIMES(ip, NULL, NULL, NULL);
1139 * If this is the Ifile, and we've already written the Ifile in this
1140 * partial segment, just overwrite it (it's not on disk yet) and
1141 * continue.
1143 * XXX we know that the bp that we get the second time around has
1144 * already been gathered.
1146 if (ip->i_number == LFS_IFILE_INUM && sp->idp) {
1147 lfs_copy_dinode(fs, sp->idp, ip->i_din);
1148 ip->i_lfs_osize = ip->i_size;
1149 return 0;
1152 bp = sp->ibp;
1153 cdp = DINO_IN_BLOCK(fs, bp->b_data, sp->ninodes % LFS_INOPB(fs));
1154 lfs_copy_dinode(fs, cdp, ip->i_din);
1157 * This inode is on its way to disk; clear its VU_DIROP status when
1158 * the write is complete.
1160 if (vp->v_uflag & VU_DIROP) {
1161 if (!(sp->seg_flags & SEGM_CLEAN))
1162 ip->i_flag |= IN_CDIROP;
1163 else {
1164 DLOG((DLOG_DIROP, "lfs_writeinode: not clearing dirop for cleaned ino %d\n", (int)ip->i_number));
1169 * If cleaning, link counts and directory file sizes cannot change,
1170 * since those would be directory operations---even if the file
1171 * we are writing is marked VU_DIROP we should write the old values.
1172 * If we're not cleaning, of course, update the values so we get
1173 * current values the next time we clean.
1175 if (sp->seg_flags & SEGM_CLEAN) {
1176 if (vp->v_uflag & VU_DIROP) {
1177 lfs_dino_setnlink(fs, cdp, ip->i_lfs_odnlink);
1178 /* if (vp->v_type == VDIR) */
1179 lfs_dino_setsize(fs, cdp, ip->i_lfs_osize);
1181 } else {
1182 ip->i_lfs_odnlink = lfs_dino_getnlink(fs, cdp);
1183 ip->i_lfs_osize = ip->i_size;
1187 /* We can finish the segment accounting for truncations now */
1188 lfs_finalize_ino_seguse(fs, ip);
1191 * If we are cleaning, ensure that we don't write UNWRITTEN disk
1192 * addresses to disk; possibly change the on-disk record of
1193 * the inode size, either by reverting to the previous size
1194 * (in the case of cleaning) or by verifying the inode's block
1195 * holdings (in the case of files being allocated as they are being
1196 * written).
1197 * XXX By not writing UNWRITTEN blocks, we are making the lfs_avail
1198 * XXX count on disk wrong by the same amount. We should be
1199 * XXX able to "borrow" from lfs_avail and return it after the
1200 * XXX Ifile is written. See also in lfs_writeseg.
1203 /* Check file size based on highest allocated block */
1204 if (((lfs_dino_getmode(fs, ip->i_din) & LFS_IFMT) == LFS_IFREG ||
1205 (lfs_dino_getmode(fs, ip->i_din) & LFS_IFMT) == LFS_IFDIR) &&
1206 ip->i_size > ((ip->i_lfs_hiblk + 1) << lfs_sb_getbshift(fs))) {
1207 lfs_dino_setsize(fs, cdp, (ip->i_lfs_hiblk + 1) << lfs_sb_getbshift(fs));
1208 DLOG((DLOG_SEG, "lfs_writeinode: ino %d size %" PRId64 " -> %"
1209 PRId64 "\n", (int)ip->i_number, ip->i_size, lfs_dino_getsize(fs, cdp)));
1211 if (ip->i_lfs_effnblks != lfs_dino_getblocks(fs, ip->i_din)) {
1212 DLOG((DLOG_SEG, "lfs_writeinode: cleansing ino %d eff %jd != nblk %d)"
1213 " at %jx\n", ip->i_number, (intmax_t)ip->i_lfs_effnblks,
1214 lfs_dino_getblocks(fs, ip->i_din), (uintmax_t)lfs_sb_getoffset(fs)));
1215 for (i=0; i<ULFS_NDADDR; i++) {
1216 if (lfs_dino_getdb(fs, cdp, i) == UNWRITTEN) {
1217 DLOG((DLOG_SEG, "lfs_writeinode: wiping UNWRITTEN\n"));
1218 lfs_dino_setdb(fs, cdp, i, 0);
1221 for (i=0; i<ULFS_NIADDR; i++) {
1222 if (lfs_dino_getib(fs, cdp, i) == UNWRITTEN) {
1223 DLOG((DLOG_SEG, "lfs_writeinode: wiping UNWRITTEN\n"));
1224 lfs_dino_setib(fs, cdp, i, 0);
1229 #ifdef DIAGNOSTIC
1231 * Check dinode held blocks against dinode size.
1232 * This should be identical to the check in lfs_vget().
1234 for (i = (lfs_dino_getsize(fs, cdp) + lfs_sb_getbsize(fs) - 1) >> lfs_sb_getbshift(fs);
1235 i < ULFS_NDADDR; i++) {
1236 KASSERT(i >= 0);
1237 if ((lfs_dino_getmode(fs, cdp) & LFS_IFMT) == LFS_IFLNK)
1238 continue;
1239 if (((lfs_dino_getmode(fs, cdp) & LFS_IFMT) == LFS_IFBLK ||
1240 (lfs_dino_getmode(fs, cdp) & LFS_IFMT) == LFS_IFCHR) && i == 0)
1241 continue;
1242 if (lfs_dino_getdb(fs, cdp, i) != 0) {
1243 # ifdef DEBUG
1244 lfs_dump_dinode(fs, cdp);
1245 # endif
1246 panic("writing inconsistent inode");
1249 #endif /* DIAGNOSTIC */
1251 if (ip->i_flag & IN_CLEANING)
1252 LFS_CLR_UINO(ip, IN_CLEANING);
1253 else {
1254 /* XXX IN_ALLMOD */
1255 LFS_CLR_UINO(ip, IN_ACCESSED | IN_ACCESS | IN_CHANGE |
1256 IN_UPDATE | IN_MODIFY);
1257 if (ip->i_lfs_effnblks == lfs_dino_getblocks(fs, ip->i_din))
1258 LFS_CLR_UINO(ip, IN_MODIFIED);
1259 else {
1260 DLOG((DLOG_VNODE, "lfs_writeinode: ino %d: real "
1261 "blks=%d, eff=%jd\n", ip->i_number,
1262 lfs_dino_getblocks(fs, ip->i_din), (intmax_t)ip->i_lfs_effnblks));
1266 if (ip->i_number == LFS_IFILE_INUM) {
1267 /* We know sp->idp == NULL */
1268 sp->idp = DINO_IN_BLOCK(fs, bp, sp->ninodes % LFS_INOPB(fs));
1270 /* Not dirty any more */
1271 mutex_enter(&lfs_lock);
1272 fs->lfs_flags &= ~LFS_IFDIRTY;
1273 mutex_exit(&lfs_lock);
1276 if (gotblk) {
1277 mutex_enter(&bufcache_lock);
1278 LFS_LOCK_BUF(bp);
1279 brelsel(bp, 0);
1280 mutex_exit(&bufcache_lock);
1283 /* Increment inode count in segment summary block. */
1285 ssp = (SEGSUM *)sp->segsum;
1286 lfs_ss_setninos(fs, ssp, lfs_ss_getninos(fs, ssp) + 1);
1288 /* If this page is full, set flag to allocate a new page. */
1289 if (++sp->ninodes % LFS_INOPB(fs) == 0)
1290 sp->ibp = NULL;
1292 redo_ifile = lfs_update_iaddr(fs, sp, ip, bp->b_blkno);
1294 KASSERT(redo_ifile == 0);
1295 return (redo_ifile);
1299 lfs_gatherblock(struct segment *sp, struct buf *bp, kmutex_t *mptr)
1301 struct lfs *fs;
1302 int vers;
1303 int j, blksinblk;
1305 ASSERT_SEGLOCK(sp->fs);
1307 * If full, finish this segment. We may be doing I/O, so
1308 * release and reacquire the splbio().
1310 #ifdef DIAGNOSTIC
1311 if (sp->vp == NULL)
1312 panic ("lfs_gatherblock: Null vp in segment");
1313 #endif
1314 fs = sp->fs;
1315 blksinblk = howmany(bp->b_bcount, lfs_sb_getbsize(fs));
1316 if (sp->sum_bytes_left < sizeof(int32_t) * blksinblk ||
1317 sp->seg_bytes_left < bp->b_bcount) {
1318 if (mptr)
1319 mutex_exit(mptr);
1320 lfs_updatemeta(sp);
1322 vers = lfs_fi_getversion(fs, sp->fip);
1323 (void) lfs_writeseg(fs, sp);
1325 /* Add the current file to the segment summary. */
1326 lfs_acquire_finfo(fs, VTOI(sp->vp)->i_number, vers);
1328 if (mptr)
1329 mutex_enter(mptr);
1330 return (1);
1333 if (bp->b_flags & B_GATHERED) {
1334 DLOG((DLOG_SEG, "lfs_gatherblock: already gathered! Ino %ju,"
1335 " lbn %" PRId64 "\n",
1336 (uintmax_t)lfs_fi_getino(fs, sp->fip), bp->b_lblkno));
1337 return (0);
1340 /* Insert into the buffer list, update the FINFO block. */
1341 bp->b_flags |= B_GATHERED;
1343 *sp->cbpp++ = bp;
1344 for (j = 0; j < blksinblk; j++) {
1345 unsigned bn;
1347 bn = lfs_fi_getnblocks(fs, sp->fip);
1348 lfs_fi_setnblocks(fs, sp->fip, bn+1);
1349 lfs_fi_setblock(fs, sp->fip, bn, bp->b_lblkno + j);
1350 /* This block's accounting moves from lfs_favail to lfs_avail */
1351 lfs_deregister_block(sp->vp, bp->b_lblkno + j);
1354 sp->sum_bytes_left -= sizeof(int32_t) * blksinblk;
1355 sp->seg_bytes_left -= bp->b_bcount;
1356 return (0);
1360 lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp,
1361 int (*match)(struct lfs *, struct buf *))
1363 struct buf *bp, *nbp;
1364 int count = 0;
1366 ASSERT_SEGLOCK(fs);
1367 if (vp->v_type == VBLK)
1368 return 0;
1369 KASSERT(sp->vp == NULL);
1370 sp->vp = vp;
1371 mutex_enter(&bufcache_lock);
1373 #ifndef LFS_NO_BACKBUF_HACK
1374 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
1375 # define BUF_OFFSET \
1376 (((char *)&LIST_NEXT(bp, b_vnbufs)) - (char *)bp)
1377 # define BACK_BUF(BP) \
1378 ((struct buf *)(((char *)(BP)->b_vnbufs.le_prev) - BUF_OFFSET))
1379 # define BEG_OF_LIST \
1380 ((struct buf *)(((char *)&LIST_FIRST(&vp->v_dirtyblkhd)) - BUF_OFFSET))
1382 loop:
1383 /* Find last buffer. */
1384 for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1385 bp && LIST_NEXT(bp, b_vnbufs) != NULL;
1386 bp = LIST_NEXT(bp, b_vnbufs))
1387 /* nothing */;
1388 for (; bp && bp != BEG_OF_LIST; bp = nbp) {
1389 nbp = BACK_BUF(bp);
1390 #else /* LFS_NO_BACKBUF_HACK */
1391 loop:
1392 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1393 nbp = LIST_NEXT(bp, b_vnbufs);
1394 #endif /* LFS_NO_BACKBUF_HACK */
1395 if ((bp->b_cflags & BC_BUSY) != 0 ||
1396 (bp->b_flags & B_GATHERED) != 0 || !match(fs, bp)) {
1397 #ifdef DEBUG
1398 if (vp == fs->lfs_ivnode &&
1399 (bp->b_cflags & BC_BUSY) != 0 &&
1400 (bp->b_flags & B_GATHERED) == 0)
1401 log(LOG_NOTICE, "lfs_gather: ifile lbn %"
1402 PRId64 " busy (%x) at 0x%jx",
1403 bp->b_lblkno, bp->b_flags,
1404 (uintmax_t)lfs_sb_getoffset(fs));
1405 #endif
1406 continue;
1408 #ifdef DIAGNOSTIC
1409 # ifdef LFS_USE_B_INVAL
1410 if ((bp->b_flags & BC_INVAL) != 0 && bp->b_iodone == NULL) {
1411 DLOG((DLOG_SEG, "lfs_gather: lbn %" PRId64
1412 " is BC_INVAL\n", bp->b_lblkno));
1413 VOP_PRINT(bp->b_vp);
1415 # endif /* LFS_USE_B_INVAL */
1416 if (!(bp->b_oflags & BO_DELWRI))
1417 panic("lfs_gather: bp not BO_DELWRI");
1418 if (!(bp->b_flags & B_LOCKED)) {
1419 DLOG((DLOG_SEG, "lfs_gather: lbn %" PRId64
1420 " blk %" PRId64 " not B_LOCKED\n",
1421 bp->b_lblkno,
1422 LFS_DBTOFSB(fs, bp->b_blkno)));
1423 VOP_PRINT(bp->b_vp);
1424 panic("lfs_gather: bp not B_LOCKED");
1426 #endif
1427 if (lfs_gatherblock(sp, bp, &bufcache_lock)) {
1428 goto loop;
1430 count++;
1432 mutex_exit(&bufcache_lock);
1433 lfs_updatemeta(sp);
1434 KASSERT(sp->vp == vp);
1435 sp->vp = NULL;
1436 return count;
1439 #if DEBUG
1440 # define DEBUG_OOFF(n) do { \
1441 if (ooff == 0) { \
1442 DLOG((DLOG_SEG, "lfs_updatemeta[%d]: warning: writing " \
1443 "ino %d lbn %" PRId64 " at 0x%" PRIx32 \
1444 ", was 0x0 (or %" PRId64 ")\n", \
1445 (n), ip->i_number, lbn, ndaddr, daddr)); \
1447 } while (0)
1448 #else
1449 # define DEBUG_OOFF(n)
1450 #endif
1453 * Change the given block's address to ndaddr, finding its previous
1454 * location using ulfs_bmaparray().
1456 * Account for this change in the segment table.
1458 * called with sp == NULL by roll-forwarding code.
1460 void
1461 lfs_update_single(struct lfs *fs, struct segment *sp,
1462 struct vnode *vp, daddr_t lbn, daddr_t ndaddr, int size)
1464 SEGUSE *sup;
1465 struct buf *bp;
1466 struct indir a[ULFS_NIADDR + 2], *ap;
1467 struct inode *ip;
1468 daddr_t daddr, ooff;
1469 int num, error;
1470 int bb, osize, obb;
1472 ASSERT_SEGLOCK(fs);
1473 KASSERT(sp == NULL || sp->vp == vp);
1474 ip = VTOI(vp);
1476 error = ulfs_bmaparray(vp, lbn, &daddr, a, &num, NULL, NULL);
1477 if (error)
1478 panic("lfs_updatemeta: ulfs_bmaparray returned %d", error);
1480 KASSERT(daddr <= LFS_MAX_DADDR(fs));
1481 if (daddr > 0)
1482 daddr = LFS_DBTOFSB(fs, daddr);
1484 bb = lfs_numfrags(fs, size);
1485 switch (num) {
1486 case 0:
1487 ooff = lfs_dino_getdb(fs, ip->i_din, lbn);
1488 DEBUG_OOFF(0);
1489 if (ooff == UNWRITTEN)
1490 lfs_dino_setblocks(fs, ip->i_din,
1491 lfs_dino_getblocks(fs, ip->i_din) + bb);
1492 else {
1493 /* possible fragment truncation or extension */
1494 obb = lfs_btofsb(fs, ip->i_lfs_fragsize[lbn]);
1495 lfs_dino_setblocks(fs, ip->i_din,
1496 lfs_dino_getblocks(fs, ip->i_din) + (bb-obb));
1498 lfs_dino_setdb(fs, ip->i_din, lbn, ndaddr);
1499 break;
1500 case 1:
1501 ooff = lfs_dino_getib(fs, ip->i_din, a[0].in_off);
1502 DEBUG_OOFF(1);
1503 if (ooff == UNWRITTEN)
1504 lfs_dino_setblocks(fs, ip->i_din,
1505 lfs_dino_getblocks(fs, ip->i_din) + bb);
1506 lfs_dino_setib(fs, ip->i_din, a[0].in_off, ndaddr);
1507 break;
1508 default:
1509 ap = &a[num - 1];
1510 if (bread(vp, ap->in_lbn, lfs_sb_getbsize(fs),
1511 B_MODIFY, &bp))
1512 panic("lfs_updatemeta: bread bno %" PRId64,
1513 ap->in_lbn);
1515 ooff = lfs_iblock_get(fs, bp->b_data, ap->in_off);
1516 DEBUG_OOFF(num);
1517 if (ooff == UNWRITTEN)
1518 lfs_dino_setblocks(fs, ip->i_din,
1519 lfs_dino_getblocks(fs, ip->i_din) + bb);
1520 lfs_iblock_set(fs, bp->b_data, ap->in_off, ndaddr);
1521 (void) VOP_BWRITE(bp->b_vp, bp);
1524 KASSERT(ooff == 0 || ooff == UNWRITTEN || ooff == daddr);
1526 /* Update hiblk when extending the file */
1527 if (lbn > ip->i_lfs_hiblk)
1528 ip->i_lfs_hiblk = lbn;
1531 * Though we'd rather it couldn't, this *can* happen right now
1532 * if cleaning blocks and regular blocks coexist.
1534 /* KASSERT(daddr < fs->lfs_lastpseg || daddr > ndaddr); */
1537 * Update segment usage information, based on old size
1538 * and location.
1540 if (daddr > 0) {
1541 u_int32_t oldsn = lfs_dtosn(fs, daddr);
1542 #ifdef DIAGNOSTIC
1543 int ndupino;
1545 if (sp && sp->seg_number == oldsn) {
1546 ndupino = sp->ndupino;
1547 } else {
1548 ndupino = 0;
1550 #endif
1551 KASSERT(oldsn < lfs_sb_getnseg(fs));
1552 if (lbn >= 0 && lbn < ULFS_NDADDR)
1553 osize = ip->i_lfs_fragsize[lbn];
1554 else
1555 osize = lfs_sb_getbsize(fs);
1556 LFS_SEGENTRY(sup, fs, oldsn, bp);
1557 #ifdef DIAGNOSTIC
1558 if (sup->su_nbytes + DINOSIZE(fs) * ndupino < osize) {
1559 printf("lfs_updatemeta: negative bytes "
1560 "(segment %" PRIu32 " short by %" PRId64
1561 ")\n", lfs_dtosn(fs, daddr),
1562 (int64_t)osize -
1563 (DINOSIZE(fs) * ndupino + sup->su_nbytes));
1564 printf("lfs_updatemeta: ino %llu, lbn %" PRId64
1565 ", addr = 0x%" PRIx64 "\n",
1566 (unsigned long long)ip->i_number, lbn, daddr);
1567 printf("lfs_updatemeta: ndupino=%d\n", ndupino);
1568 panic("lfs_updatemeta: negative bytes");
1569 sup->su_nbytes = osize -
1570 DINOSIZE(fs) * ndupino;
1572 #endif
1573 DLOG((DLOG_SU, "seg %" PRIu32 " -= %d for ino %d lbn %" PRId64
1574 " db 0x%" PRIx64 "\n",
1575 lfs_dtosn(fs, daddr), osize,
1576 ip->i_number, lbn, daddr));
1577 sup->su_nbytes -= osize;
1578 if (!(bp->b_flags & B_GATHERED)) {
1579 mutex_enter(&lfs_lock);
1580 fs->lfs_flags |= LFS_IFDIRTY;
1581 mutex_exit(&lfs_lock);
1583 LFS_WRITESEGENTRY(sup, fs, oldsn, bp);
1586 * Now that this block has a new address, and its old
1587 * segment no longer owns it, we can forget about its
1588 * old size.
1590 if (lbn >= 0 && lbn < ULFS_NDADDR)
1591 ip->i_lfs_fragsize[lbn] = size;
1595 * Update the metadata that points to the blocks listed in the FINFO
1596 * array.
1598 void
1599 lfs_updatemeta(struct segment *sp)
1601 struct buf *sbp;
1602 struct lfs *fs;
1603 struct vnode *vp;
1604 daddr_t lbn;
1605 int i, nblocks, num;
1606 int __diagused nblocks_orig;
1607 int bb;
1608 int bytesleft, size;
1609 unsigned lastlength;
1610 union lfs_blocks tmpptr;
1612 fs = sp->fs;
1613 vp = sp->vp;
1614 ASSERT_SEGLOCK(fs);
1617 * This used to be:
1619 * nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
1621 * that is, it allowed for the possibility that start_lbp did
1622 * not point to the beginning of the finfo block pointer area.
1623 * This particular formulation is six kinds of painful in the
1624 * lfs64 world where we have two sizes of block pointer, so
1625 * unless/until everything can be cleaned up to not move
1626 * start_lbp around but instead use an offset, we do the
1627 * following:
1628 * 1. Get NEXT_FINFO(sp->fip). This is the same pointer as
1629 * &sp->fip->fi_blocks[sp->fip->fi_nblocks], just the wrong
1630 * type. (Ugh.)
1631 * 2. Cast it to void *, then assign it to a temporary
1632 * union lfs_blocks.
1633 * 3. Subtract start_lbp from that.
1634 * 4. Save the value of nblocks in blocks_orig so we can
1635 * assert below that it hasn't changed without repeating this
1636 * rubbish.
1638 * XXX.
1640 lfs_blocks_fromvoid(fs, &tmpptr, (void *)NEXT_FINFO(fs, sp->fip));
1641 nblocks = lfs_blocks_sub(fs, &tmpptr, &sp->start_lbp);
1642 nblocks_orig = nblocks;
1644 KASSERT(nblocks >= 0);
1645 KASSERT(vp != NULL);
1646 if (nblocks == 0)
1647 return;
1650 * This count may be high due to oversize blocks from lfs_gop_write.
1651 * Correct for this. (XXX we should be able to keep track of these.)
1653 for (i = 0; i < nblocks; i++) {
1654 if (sp->start_bpp[i] == NULL) {
1655 DLOG((DLOG_SEG, "lfs_updatemeta: nblocks = %d, not %d\n", i, nblocks));
1656 nblocks = i;
1657 break;
1659 num = howmany(sp->start_bpp[i]->b_bcount, lfs_sb_getbsize(fs));
1660 KASSERT(sp->start_bpp[i]->b_lblkno >= 0 || num == 1);
1661 nblocks -= num - 1;
1664 #if 0
1665 /* pre-lfs64 assertion */
1666 KASSERT(vp->v_type == VREG ||
1667 nblocks == &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp);
1668 #else
1669 KASSERT(vp->v_type == VREG || nblocks == nblocks_orig);
1670 #endif
1671 KASSERT(nblocks == sp->cbpp - sp->start_bpp);
1674 * Sort the blocks.
1676 * We have to sort even if the blocks come from the
1677 * cleaner, because there might be other pending blocks on the
1678 * same inode...and if we don't sort, and there are fragments
1679 * present, blocks may be written in the wrong place.
1681 lfs_shellsort(fs, sp->start_bpp, &sp->start_lbp, nblocks, lfs_sb_getbsize(fs));
1684 * Record the length of the last block in case it's a fragment.
1685 * If there are indirect blocks present, they sort last. An
1686 * indirect block will be lfs_bsize and its presence indicates
1687 * that you cannot have fragments.
1689 * XXX This last is a lie. A cleaned fragment can coexist with
1690 * XXX a later indirect block. This will continue to be
1691 * XXX true until lfs_markv is fixed to do everything with
1692 * XXX fake blocks (including fake inodes and fake indirect blocks).
1694 lastlength = ((sp->start_bpp[nblocks - 1]->b_bcount - 1) &
1695 lfs_sb_getbmask(fs)) + 1;
1696 lfs_fi_setlastlength(fs, sp->fip, lastlength);
1699 * Assign disk addresses, and update references to the logical
1700 * block and the segment usage information.
1702 for (i = nblocks; i--; ++sp->start_bpp) {
1703 sbp = *sp->start_bpp;
1704 lbn = lfs_blocks_get(fs, &sp->start_lbp, 0);
1705 KASSERT(sbp->b_lblkno == lbn);
1707 sbp->b_blkno = LFS_FSBTODB(fs, lfs_sb_getoffset(fs));
1710 * If we write a frag in the wrong place, the cleaner won't
1711 * be able to correctly identify its size later, and the
1712 * segment will be uncleanable. (Even worse, it will assume
1713 * that the indirect block that actually ends the list
1714 * is of a smaller size!)
1716 if ((sbp->b_bcount & lfs_sb_getbmask(fs)) && i != 0)
1717 panic("lfs_updatemeta: fragment is not last block");
1720 * For each subblock in this possibly oversized block,
1721 * update its address on disk.
1723 KASSERT(lbn >= 0 || sbp->b_bcount == lfs_sb_getbsize(fs));
1724 KASSERT(vp == sbp->b_vp);
1725 for (bytesleft = sbp->b_bcount; bytesleft > 0;
1726 bytesleft -= lfs_sb_getbsize(fs)) {
1727 size = MIN(bytesleft, lfs_sb_getbsize(fs));
1728 bb = lfs_numfrags(fs, size);
1729 lbn = lfs_blocks_get(fs, &sp->start_lbp, 0);
1730 lfs_blocks_inc(fs, &sp->start_lbp);
1731 lfs_update_single(fs, sp, sp->vp, lbn, lfs_sb_getoffset(fs),
1732 size);
1733 lfs_sb_addoffset(fs, bb);
1738 /* This inode has been modified */
1739 LFS_SET_UINO(VTOI(vp), IN_MODIFIED);
1743 * Move lfs_offset to a segment earlier than newsn.
1746 lfs_rewind(struct lfs *fs, int newsn)
1748 int sn, osn, isdirty;
1749 struct buf *bp;
1750 SEGUSE *sup;
1752 ASSERT_SEGLOCK(fs);
1754 osn = lfs_dtosn(fs, lfs_sb_getoffset(fs));
1755 if (osn < newsn)
1756 return 0;
1758 /* lfs_avail eats the remaining space in this segment */
1759 lfs_sb_subavail(fs, lfs_sb_getfsbpseg(fs) - (lfs_sb_getoffset(fs) - lfs_sb_getcurseg(fs)));
1761 /* Find a low-numbered segment */
1762 for (sn = 0; sn < lfs_sb_getnseg(fs); ++sn) {
1763 LFS_SEGENTRY(sup, fs, sn, bp);
1764 isdirty = sup->su_flags & SEGUSE_DIRTY;
1765 brelse(bp, 0);
1767 if (!isdirty)
1768 break;
1770 if (sn == lfs_sb_getnseg(fs))
1771 panic("lfs_rewind: no clean segments");
1772 if (newsn >= 0 && sn >= newsn)
1773 return ENOENT;
1774 lfs_sb_setnextseg(fs, lfs_sntod(fs, sn));
1775 lfs_newseg(fs);
1776 lfs_sb_setoffset(fs, lfs_sb_getcurseg(fs));
1778 return 0;
1782 * Start a new partial segment.
1784 * Return 1 when we entered to a new segment.
1785 * Otherwise, return 0.
1788 lfs_initseg(struct lfs *fs)
1790 struct segment *sp = fs->lfs_sp;
1791 SEGSUM *ssp;
1792 struct buf *sbp; /* buffer for SEGSUM */
1793 int repeat = 0; /* return value */
1795 ASSERT_SEGLOCK(fs);
1796 /* Advance to the next segment. */
1797 if (!LFS_PARTIAL_FITS(fs)) {
1798 SEGUSE *sup;
1799 struct buf *bp;
1801 /* lfs_avail eats the remaining space */
1802 lfs_sb_subavail(fs, lfs_sb_getfsbpseg(fs) - (lfs_sb_getoffset(fs) -
1803 lfs_sb_getcurseg(fs)));
1804 /* Wake up any cleaning procs waiting on this file system. */
1805 lfs_wakeup_cleaner(fs);
1806 lfs_newseg(fs);
1807 repeat = 1;
1808 lfs_sb_setoffset(fs, lfs_sb_getcurseg(fs));
1810 sp->seg_number = lfs_dtosn(fs, lfs_sb_getcurseg(fs));
1811 sp->seg_bytes_left = lfs_fsbtob(fs, lfs_sb_getfsbpseg(fs));
1814 * If the segment contains a superblock, update the offset
1815 * and summary address to skip over it.
1817 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1818 if (sup->su_flags & SEGUSE_SUPERBLOCK) {
1819 lfs_sb_addoffset(fs, lfs_btofsb(fs, LFS_SBPAD));
1820 sp->seg_bytes_left -= LFS_SBPAD;
1822 brelse(bp, 0);
1823 /* Segment zero could also contain the labelpad */
1824 if (lfs_sb_getversion(fs) > 1 && sp->seg_number == 0 &&
1825 lfs_sb_gets0addr(fs) < lfs_btofsb(fs, LFS_LABELPAD)) {
1826 lfs_sb_addoffset(fs,
1827 lfs_btofsb(fs, LFS_LABELPAD) - lfs_sb_gets0addr(fs));
1828 sp->seg_bytes_left -=
1829 LFS_LABELPAD - lfs_fsbtob(fs, lfs_sb_gets0addr(fs));
1831 } else {
1832 sp->seg_number = lfs_dtosn(fs, lfs_sb_getcurseg(fs));
1833 sp->seg_bytes_left = lfs_fsbtob(fs, lfs_sb_getfsbpseg(fs) -
1834 (lfs_sb_getoffset(fs) - lfs_sb_getcurseg(fs)));
1836 lfs_sb_setlastpseg(fs, lfs_sb_getoffset(fs));
1838 /* Record first address of this partial segment */
1839 if (sp->seg_flags & SEGM_CLEAN) {
1840 fs->lfs_cleanint[fs->lfs_cleanind] = lfs_sb_getoffset(fs);
1841 if (++fs->lfs_cleanind >= LFS_MAX_CLEANIND) {
1842 /* "1" is the artificial inc in lfs_seglock */
1843 mutex_enter(&lfs_lock);
1844 while (fs->lfs_iocount > 1) {
1845 mtsleep(&fs->lfs_iocount, PRIBIO + 1,
1846 "lfs_initseg", 0, &lfs_lock);
1848 mutex_exit(&lfs_lock);
1849 fs->lfs_cleanind = 0;
1853 sp->fs = fs;
1854 sp->ibp = NULL;
1855 sp->idp = NULL;
1856 sp->ninodes = 0;
1857 sp->ndupino = 0;
1859 sp->cbpp = sp->bpp;
1861 /* Get a new buffer for SEGSUM */
1862 sbp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
1863 LFS_FSBTODB(fs, lfs_sb_getoffset(fs)), lfs_sb_getsumsize(fs), LFS_NB_SUMMARY);
1865 /* ... and enter it into the buffer list. */
1866 *sp->cbpp = sbp;
1867 sp->cbpp++;
1868 lfs_sb_addoffset(fs, lfs_btofsb(fs, lfs_sb_getsumsize(fs)));
1870 sp->start_bpp = sp->cbpp;
1872 /* Set point to SEGSUM, initialize it. */
1873 ssp = sp->segsum = sbp->b_data;
1874 memset(ssp, 0, lfs_sb_getsumsize(fs));
1875 lfs_ss_setnext(fs, ssp, lfs_sb_getnextseg(fs));
1876 lfs_ss_setnfinfo(fs, ssp, 0);
1877 lfs_ss_setninos(fs, ssp, 0);
1878 lfs_ss_setmagic(fs, ssp, SS_MAGIC);
1880 /* Set pointer to first FINFO, initialize it. */
1881 sp->fip = SEGSUM_FINFOBASE(fs, sp->segsum);
1882 lfs_fi_setnblocks(fs, sp->fip, 0);
1883 lfs_fi_setlastlength(fs, sp->fip, 0);
1884 lfs_blocks_fromfinfo(fs, &sp->start_lbp, sp->fip);
1886 sp->seg_bytes_left -= lfs_sb_getsumsize(fs);
1887 sp->sum_bytes_left = lfs_sb_getsumsize(fs) - SEGSUM_SIZE(fs);
1889 return (repeat);
1893 * Remove SEGUSE_INVAL from all segments.
1895 void
1896 lfs_unset_inval_all(struct lfs *fs)
1898 SEGUSE *sup;
1899 struct buf *bp;
1900 int i;
1902 for (i = 0; i < lfs_sb_getnseg(fs); i++) {
1903 LFS_SEGENTRY(sup, fs, i, bp);
1904 if (sup->su_flags & SEGUSE_INVAL) {
1905 sup->su_flags &= ~SEGUSE_INVAL;
1906 LFS_WRITESEGENTRY(sup, fs, i, bp);
1907 } else
1908 brelse(bp, 0);
1913 * Return the next segment to write.
1915 void
1916 lfs_newseg(struct lfs *fs)
1918 CLEANERINFO *cip;
1919 SEGUSE *sup;
1920 struct buf *bp;
1921 int curseg, isdirty, sn, skip_inval;
1923 ASSERT_SEGLOCK(fs);
1925 /* Honor LFCNWRAPSTOP */
1926 mutex_enter(&lfs_lock);
1927 while (lfs_sb_getnextseg(fs) < lfs_sb_getcurseg(fs) && fs->lfs_nowrap) {
1928 if (fs->lfs_wrappass) {
1929 log(LOG_NOTICE, "%s: wrappass=%d\n",
1930 lfs_sb_getfsmnt(fs), fs->lfs_wrappass);
1931 fs->lfs_wrappass = 0;
1932 break;
1934 fs->lfs_wrapstatus = LFS_WRAP_WAITING;
1935 wakeup(&fs->lfs_nowrap);
1936 log(LOG_NOTICE, "%s: waiting at log wrap\n", lfs_sb_getfsmnt(fs));
1937 mtsleep(&fs->lfs_wrappass, PVFS, "newseg", 10 * hz,
1938 &lfs_lock);
1940 fs->lfs_wrapstatus = LFS_WRAP_GOING;
1941 mutex_exit(&lfs_lock);
1943 LFS_SEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getnextseg(fs)), bp);
1944 DLOG((DLOG_SU, "lfs_newseg: seg %d := 0 in newseg\n",
1945 lfs_dtosn(fs, lfs_sb_getnextseg(fs))));
1946 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1947 sup->su_nbytes = 0;
1948 sup->su_nsums = 0;
1949 sup->su_ninos = 0;
1950 LFS_WRITESEGENTRY(sup, fs, lfs_dtosn(fs, lfs_sb_getnextseg(fs)), bp);
1952 LFS_CLEANERINFO(cip, fs, bp);
1953 lfs_ci_shiftcleantodirty(fs, cip, 1);
1954 lfs_sb_setnclean(fs, lfs_ci_getclean(fs, cip));
1955 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
1957 lfs_sb_setlastseg(fs, lfs_sb_getcurseg(fs));
1958 lfs_sb_setcurseg(fs, lfs_sb_getnextseg(fs));
1959 skip_inval = 1;
1960 for (sn = curseg = lfs_dtosn(fs, lfs_sb_getcurseg(fs)) + lfs_sb_getinterleave(fs);;) {
1961 sn = (sn + 1) % lfs_sb_getnseg(fs);
1963 if (sn == curseg) {
1964 if (skip_inval)
1965 skip_inval = 0;
1966 else
1967 panic("lfs_nextseg: no clean segments");
1969 LFS_SEGENTRY(sup, fs, sn, bp);
1970 isdirty = sup->su_flags & (SEGUSE_DIRTY | (skip_inval ? SEGUSE_INVAL : 0));
1971 /* Check SEGUSE_EMPTY as we go along */
1972 if (isdirty && sup->su_nbytes == 0 &&
1973 !(sup->su_flags & SEGUSE_EMPTY))
1974 LFS_WRITESEGENTRY(sup, fs, sn, bp);
1975 else
1976 brelse(bp, 0);
1978 if (!isdirty)
1979 break;
1981 if (skip_inval == 0)
1982 lfs_unset_inval_all(fs);
1984 ++fs->lfs_nactive;
1985 lfs_sb_setnextseg(fs, lfs_sntod(fs, sn));
1986 if (lfs_dostats) {
1987 ++lfs_stats.segsused;
1991 static struct buf *
1992 lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr,
1993 int n)
1995 struct lfs_cluster *cl;
1996 struct buf **bpp, *bp;
1998 ASSERT_SEGLOCK(fs);
1999 cl = (struct lfs_cluster *)pool_get(&fs->lfs_clpool, PR_WAITOK);
2000 bpp = (struct buf **)pool_get(&fs->lfs_bpppool, PR_WAITOK);
2001 memset(cl, 0, sizeof(*cl));
2002 cl->fs = fs;
2003 cl->bpp = bpp;
2004 cl->bufcount = 0;
2005 cl->bufsize = 0;
2007 /* If this segment is being written synchronously, note that */
2008 if (fs->lfs_sp->seg_flags & SEGM_SYNC) {
2009 cl->flags |= LFS_CL_SYNC;
2010 cl->seg = fs->lfs_sp;
2011 ++cl->seg->seg_iocount;
2014 /* Get an empty buffer header, or maybe one with something on it */
2015 bp = getiobuf(vp, true);
2016 bp->b_dev = NODEV;
2017 bp->b_blkno = bp->b_lblkno = addr;
2018 bp->b_iodone = lfs_cluster_callback;
2019 bp->b_private = cl;
2021 return bp;
2025 lfs_writeseg(struct lfs *fs, struct segment *sp)
2027 struct buf **bpp, *bp, *cbp, *newbp, *unbusybp;
2028 SEGUSE *sup;
2029 SEGSUM *ssp;
2030 int i;
2031 int do_again, nblocks, byteoffset;
2032 size_t el_size;
2033 struct lfs_cluster *cl;
2034 u_short ninos;
2035 struct vnode *devvp;
2036 char *p = NULL;
2037 struct vnode *vp;
2038 int32_t *daddrp; /* XXX ondisk32 */
2039 int changed;
2040 u_int32_t sum;
2041 size_t sumstart;
2042 #ifdef DEBUG
2043 FINFO *fip;
2044 int findex;
2045 #endif
2047 ASSERT_SEGLOCK(fs);
2049 ssp = (SEGSUM *)sp->segsum;
2052 * If there are no buffers other than the segment summary to write,
2053 * don't do anything. If we are the end of a dirop sequence, however,
2054 * write the empty segment summary anyway, to help out the
2055 * roll-forward agent.
2057 if ((nblocks = sp->cbpp - sp->bpp) == 1) {
2058 if ((lfs_ss_getflags(fs, ssp) & (SS_DIROP | SS_CONT)) != SS_DIROP)
2059 return 0;
2062 /* Note if partial segment is being written by the cleaner */
2063 if (sp->seg_flags & SEGM_CLEAN)
2064 lfs_ss_setflags(fs, ssp, lfs_ss_getflags(fs, ssp) | SS_CLEAN);
2066 /* Note if we are writing to reclaim */
2067 if (sp->seg_flags & SEGM_RECLAIM) {
2068 lfs_ss_setflags(fs, ssp, lfs_ss_getflags(fs, ssp) | SS_RECLAIM);
2069 lfs_ss_setreclino(fs, ssp, fs->lfs_reclino);
2072 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2074 /* Update the segment usage information. */
2075 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
2077 /* Loop through all blocks, except the segment summary. */
2078 for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
2079 if ((*bpp)->b_vp != devvp) {
2080 sup->su_nbytes += (*bpp)->b_bcount;
2081 DLOG((DLOG_SU, "seg %" PRIu32 " += %ld for ino %d"
2082 " lbn %" PRId64 " db 0x%" PRIx64 "\n",
2083 sp->seg_number, (*bpp)->b_bcount,
2084 VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno,
2085 (*bpp)->b_blkno));
2089 #ifdef DEBUG
2090 /* Check for zero-length and zero-version FINFO entries. */
2091 fip = SEGSUM_FINFOBASE(fs, ssp);
2092 for (findex = 0; findex < lfs_ss_getnfinfo(fs, ssp); findex++) {
2093 KDASSERT(lfs_fi_getnblocks(fs, fip) > 0);
2094 KDASSERT(lfs_fi_getversion(fs, fip) > 0);
2095 fip = NEXT_FINFO(fs, fip);
2097 #endif /* DEBUG */
2099 ninos = (lfs_ss_getninos(fs, ssp) + LFS_INOPB(fs) - 1) / LFS_INOPB(fs);
2100 DLOG((DLOG_SU, "seg %d += %d for %d inodes\n",
2101 sp->seg_number,
2102 lfs_ss_getninos(fs, ssp) * DINOSIZE(fs),
2103 lfs_ss_getninos(fs, ssp)));
2104 sup->su_nbytes += lfs_ss_getninos(fs, ssp) * DINOSIZE(fs);
2105 /* sup->su_nbytes += lfs_sb_getsumsize(fs); */
2106 if (lfs_sb_getversion(fs) == 1)
2107 sup->su_olastmod = time_second;
2108 else
2109 sup->su_lastmod = time_second;
2110 sup->su_ninos += ninos;
2111 ++sup->su_nsums;
2112 lfs_sb_subavail(fs, lfs_btofsb(fs, lfs_sb_getsumsize(fs)));
2114 do_again = !(bp->b_flags & B_GATHERED);
2115 LFS_WRITESEGENTRY(sup, fs, sp->seg_number, bp); /* Ifile */
2118 * Mark blocks B_BUSY, to prevent then from being changed between
2119 * the checksum computation and the actual write.
2121 * If we are cleaning, check indirect blocks for UNWRITTEN, and if
2122 * there are any, replace them with copies that have UNASSIGNED
2123 * instead.
2125 mutex_enter(&bufcache_lock);
2126 for (bpp = sp->bpp, i = nblocks - 1; i--;) {
2127 ++bpp;
2128 bp = *bpp;
2129 if (bp->b_iodone != NULL) { /* UBC or malloced buffer */
2130 bp->b_cflags |= BC_BUSY;
2131 continue;
2134 while (bp->b_cflags & BC_BUSY) {
2135 DLOG((DLOG_SEG, "lfs_writeseg: avoiding potential"
2136 " data summary corruption for ino %d, lbn %"
2137 PRId64 "\n",
2138 VTOI(bp->b_vp)->i_number, bp->b_lblkno));
2139 bp->b_cflags |= BC_WANTED;
2140 cv_wait(&bp->b_busy, &bufcache_lock);
2142 bp->b_cflags |= BC_BUSY;
2143 mutex_exit(&bufcache_lock);
2144 unbusybp = NULL;
2147 * Check and replace indirect block UNWRITTEN bogosity.
2148 * XXX See comment in lfs_writefile.
2150 if (bp->b_lblkno < 0 && bp->b_vp != devvp && bp->b_vp &&
2151 lfs_dino_getblocks(fs, VTOI(bp->b_vp)->i_din) !=
2152 VTOI(bp->b_vp)->i_lfs_effnblks) {
2153 DLOG((DLOG_VNODE, "lfs_writeseg: cleansing ino %d (%jd != %d)\n",
2154 VTOI(bp->b_vp)->i_number,
2155 (intmax_t)VTOI(bp->b_vp)->i_lfs_effnblks,
2156 lfs_dino_getblocks(fs, VTOI(bp->b_vp)->i_din)));
2157 /* Make a copy we'll make changes to */
2158 newbp = lfs_newbuf(fs, bp->b_vp, bp->b_lblkno,
2159 bp->b_bcount, LFS_NB_IBLOCK);
2160 newbp->b_blkno = bp->b_blkno;
2161 memcpy(newbp->b_data, bp->b_data,
2162 newbp->b_bcount);
2164 changed = 0;
2165 /* XXX ondisk32 */
2166 for (daddrp = (int32_t *)(newbp->b_data);
2167 daddrp < (int32_t *)((char *)newbp->b_data +
2168 newbp->b_bcount); daddrp++) {
2169 if (*daddrp == UNWRITTEN) {
2170 ++changed;
2171 *daddrp = 0;
2175 * Get rid of the old buffer. Don't mark it clean,
2176 * though, if it still has dirty data on it.
2178 if (changed) {
2179 DLOG((DLOG_SEG, "lfs_writeseg: replacing UNWRITTEN(%d):"
2180 " bp = %p newbp = %p\n", changed, bp,
2181 newbp));
2182 *bpp = newbp;
2183 bp->b_flags &= ~B_GATHERED;
2184 bp->b_error = 0;
2185 if (bp->b_iodone != NULL) {
2186 DLOG((DLOG_SEG, "lfs_writeseg: "
2187 "indir bp should not be B_CALL\n"));
2188 biodone(bp);
2189 bp = NULL;
2190 } else {
2191 /* Still on free list, leave it there */
2192 unbusybp = bp;
2194 * We have to re-decrement lfs_avail
2195 * since this block is going to come
2196 * back around to us in the next
2197 * segment.
2199 lfs_sb_subavail(fs,
2200 lfs_btofsb(fs, bp->b_bcount));
2202 } else {
2203 lfs_freebuf(fs, newbp);
2206 mutex_enter(&bufcache_lock);
2207 if (unbusybp != NULL) {
2208 unbusybp->b_cflags &= ~BC_BUSY;
2209 if (unbusybp->b_cflags & BC_WANTED)
2210 cv_broadcast(&bp->b_busy);
2213 mutex_exit(&bufcache_lock);
2216 * Compute checksum across data and then across summary; the first
2217 * block (the summary block) is skipped. Set the create time here
2218 * so that it's guaranteed to be later than the inode mod times.
2220 sum = 0;
2221 if (lfs_sb_getversion(fs) == 1)
2222 el_size = sizeof(u_long);
2223 else
2224 el_size = sizeof(u_int32_t);
2225 for (bpp = sp->bpp, i = nblocks - 1; i--; ) {
2226 ++bpp;
2227 /* Loop through gop_write cluster blocks */
2228 for (byteoffset = 0; byteoffset < (*bpp)->b_bcount;
2229 byteoffset += lfs_sb_getbsize(fs)) {
2230 #ifdef LFS_USE_B_INVAL
2231 if (((*bpp)->b_cflags & BC_INVAL) != 0 &&
2232 (*bpp)->b_iodone != NULL) {
2233 if (copyin((void *)(*bpp)->b_saveaddr +
2234 byteoffset, dp, el_size)) {
2235 panic("lfs_writeseg: copyin failed [1]:"
2236 " ino %d blk %" PRId64,
2237 VTOI((*bpp)->b_vp)->i_number,
2238 (*bpp)->b_lblkno);
2240 } else
2241 #endif /* LFS_USE_B_INVAL */
2243 sum = lfs_cksum_part((char *)
2244 (*bpp)->b_data + byteoffset, el_size, sum);
2248 if (lfs_sb_getversion(fs) == 1)
2249 lfs_ss_setocreate(fs, ssp, time_second);
2250 else {
2251 lfs_ss_setcreate(fs, ssp, time_second);
2252 lfs_sb_addserial(fs, 1);
2253 lfs_ss_setserial(fs, ssp, lfs_sb_getserial(fs));
2254 lfs_ss_setident(fs, ssp, lfs_sb_getident(fs));
2256 lfs_ss_setdatasum(fs, ssp, lfs_cksum_fold(sum));
2257 sumstart = lfs_ss_getsumstart(fs);
2258 lfs_ss_setsumsum(fs, ssp, cksum((char *)ssp + sumstart,
2259 lfs_sb_getsumsize(fs) - sumstart));
2261 mutex_enter(&lfs_lock);
2262 lfs_sb_subbfree(fs, (lfs_btofsb(fs, ninos * lfs_sb_getibsize(fs)) +
2263 lfs_btofsb(fs, lfs_sb_getsumsize(fs))));
2264 lfs_sb_adddmeta(fs, (lfs_btofsb(fs, ninos * lfs_sb_getibsize(fs)) +
2265 lfs_btofsb(fs, lfs_sb_getsumsize(fs))));
2266 mutex_exit(&lfs_lock);
2269 * When we simply write the blocks we lose a rotation for every block
2270 * written. To avoid this problem, we cluster the buffers into a
2271 * chunk and write the chunk. MAXPHYS is the largest size I/O
2272 * devices can handle, use that for the size of the chunks.
2274 * Blocks that are already clusters (from GOP_WRITE), however, we
2275 * don't bother to copy into other clusters.
2278 #define CHUNKSIZE MAXPHYS
2280 if (devvp == NULL)
2281 panic("devvp is NULL");
2282 for (bpp = sp->bpp, i = nblocks; i;) {
2283 cbp = lfs_newclusterbuf(fs, devvp, (*bpp)->b_blkno, i);
2284 cl = cbp->b_private;
2286 cbp->b_flags |= B_ASYNC;
2287 cbp->b_cflags |= BC_BUSY;
2288 cbp->b_bcount = 0;
2290 #if defined(DEBUG) && defined(DIAGNOSTIC)
2291 if (bpp - sp->bpp > (lfs_sb_getsumsize(fs) - SEGSUM_SIZE(fs))
2292 / sizeof(int32_t)) {
2293 panic("lfs_writeseg: real bpp overwrite");
2295 if (bpp - sp->bpp > lfs_segsize(fs) / lfs_sb_getfsize(fs)) {
2296 panic("lfs_writeseg: theoretical bpp overwrite");
2298 #endif
2301 * Construct the cluster.
2303 mutex_enter(&lfs_lock);
2304 ++fs->lfs_iocount;
2305 mutex_exit(&lfs_lock);
2306 while (i && cbp->b_bcount < CHUNKSIZE) {
2307 bp = *bpp;
2309 if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
2310 break;
2311 if (cbp->b_bcount > 0 && !(cl->flags & LFS_CL_MALLOC))
2312 break;
2314 /* Clusters from GOP_WRITE are expedited */
2315 if (bp->b_bcount > lfs_sb_getbsize(fs)) {
2316 if (cbp->b_bcount > 0)
2317 /* Put in its own buffer */
2318 break;
2319 else {
2320 cbp->b_data = bp->b_data;
2322 } else if (cbp->b_bcount == 0) {
2323 p = cbp->b_data = lfs_malloc(fs, CHUNKSIZE,
2324 LFS_NB_CLUSTER);
2325 cl->flags |= LFS_CL_MALLOC;
2327 #ifdef DIAGNOSTIC
2328 if (lfs_dtosn(fs, LFS_DBTOFSB(fs, bp->b_blkno +
2329 btodb(bp->b_bcount - 1))) !=
2330 sp->seg_number) {
2331 printf("blk size %d daddr %" PRIx64
2332 " not in seg %d\n",
2333 bp->b_bcount, bp->b_blkno,
2334 sp->seg_number);
2335 panic("segment overwrite");
2337 #endif
2339 #ifdef LFS_USE_B_INVAL
2341 * Fake buffers from the cleaner are marked as B_INVAL.
2342 * We need to copy the data from user space rather than
2343 * from the buffer indicated.
2344 * XXX == what do I do on an error?
2346 if ((bp->b_cflags & BC_INVAL) != 0 &&
2347 bp->b_iodone != NULL) {
2348 if (copyin(bp->b_saveaddr, p, bp->b_bcount))
2349 panic("lfs_writeseg: "
2350 "copyin failed [2]");
2351 } else
2352 #endif /* LFS_USE_B_INVAL */
2353 if (cl->flags & LFS_CL_MALLOC) {
2354 /* copy data into our cluster. */
2355 memcpy(p, bp->b_data, bp->b_bcount);
2356 p += bp->b_bcount;
2359 cbp->b_bcount += bp->b_bcount;
2360 cl->bufsize += bp->b_bcount;
2362 bp->b_flags &= ~B_READ;
2363 bp->b_error = 0;
2364 cl->bpp[cl->bufcount++] = bp;
2366 vp = bp->b_vp;
2367 mutex_enter(&bufcache_lock);
2368 mutex_enter(vp->v_interlock);
2369 bp->b_oflags &= ~(BO_DELWRI | BO_DONE);
2370 reassignbuf(bp, vp);
2371 vp->v_numoutput++;
2372 mutex_exit(vp->v_interlock);
2373 mutex_exit(&bufcache_lock);
2375 bpp++;
2376 i--;
2378 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
2379 BIO_SETPRIO(cbp, BPRIO_TIMECRITICAL);
2380 else
2381 BIO_SETPRIO(cbp, BPRIO_TIMELIMITED);
2382 mutex_enter(devvp->v_interlock);
2383 devvp->v_numoutput++;
2384 mutex_exit(devvp->v_interlock);
2385 VOP_STRATEGY(devvp, cbp);
2386 curlwp->l_ru.ru_oublock++;
2389 if (lfs_dostats) {
2390 ++lfs_stats.psegwrites;
2391 lfs_stats.blocktot += nblocks - 1;
2392 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
2393 ++lfs_stats.psyncwrites;
2394 if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
2395 ++lfs_stats.pcleanwrites;
2396 lfs_stats.cleanblocks += nblocks - 1;
2400 return (lfs_initseg(fs) || do_again);
2403 void
2404 lfs_writesuper(struct lfs *fs, daddr_t daddr)
2406 struct buf *bp;
2407 struct vnode *devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2408 int s;
2410 ASSERT_MAYBE_SEGLOCK(fs);
2411 #ifdef DIAGNOSTIC
2412 if (fs->lfs_is64) {
2413 KASSERT(fs->lfs_dlfs_u.u_64.dlfs_magic == LFS64_MAGIC);
2414 } else {
2415 KASSERT(fs->lfs_dlfs_u.u_32.dlfs_magic == LFS_MAGIC);
2417 #endif
2419 * If we can write one superblock while another is in
2420 * progress, we risk not having a complete checkpoint if we crash.
2421 * So, block here if a superblock write is in progress.
2423 mutex_enter(&lfs_lock);
2424 s = splbio();
2425 while (fs->lfs_sbactive) {
2426 mtsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0,
2427 &lfs_lock);
2429 fs->lfs_sbactive = daddr;
2430 splx(s);
2431 mutex_exit(&lfs_lock);
2433 /* Set timestamp of this version of the superblock */
2434 if (lfs_sb_getversion(fs) == 1)
2435 lfs_sb_setotstamp(fs, time_second);
2436 lfs_sb_settstamp(fs, time_second);
2438 /* The next chunk of code relies on this assumption */
2439 CTASSERT(sizeof(struct dlfs) == sizeof(struct dlfs64));
2441 /* Checksum the superblock and copy it into a buffer. */
2442 lfs_sb_setcksum(fs, lfs_sb_cksum(fs));
2443 bp = lfs_newbuf(fs, devvp,
2444 LFS_FSBTODB(fs, daddr), LFS_SBPAD, LFS_NB_SBLOCK);
2445 memcpy(bp->b_data, &fs->lfs_dlfs_u, sizeof(struct dlfs));
2446 memset((char *)bp->b_data + sizeof(struct dlfs), 0,
2447 LFS_SBPAD - sizeof(struct dlfs));
2449 bp->b_cflags |= BC_BUSY;
2450 bp->b_flags = (bp->b_flags & ~B_READ) | B_ASYNC;
2451 bp->b_oflags &= ~(BO_DONE | BO_DELWRI);
2452 bp->b_error = 0;
2453 bp->b_iodone = lfs_supercallback;
2455 if (fs->lfs_sp != NULL && fs->lfs_sp->seg_flags & SEGM_SYNC)
2456 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
2457 else
2458 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
2459 curlwp->l_ru.ru_oublock++;
2461 mutex_enter(devvp->v_interlock);
2462 devvp->v_numoutput++;
2463 mutex_exit(devvp->v_interlock);
2465 mutex_enter(&lfs_lock);
2466 ++fs->lfs_iocount;
2467 mutex_exit(&lfs_lock);
2468 VOP_STRATEGY(devvp, bp);
2472 * Logical block number match routines used when traversing the dirty block
2473 * chain.
2476 lfs_match_fake(struct lfs *fs, struct buf *bp)
2479 ASSERT_SEGLOCK(fs);
2480 return LFS_IS_MALLOC_BUF(bp);
2483 #if 0
2485 lfs_match_real(struct lfs *fs, struct buf *bp)
2488 ASSERT_SEGLOCK(fs);
2489 return (lfs_match_data(fs, bp) && !lfs_match_fake(fs, bp));
2491 #endif
2494 lfs_match_data(struct lfs *fs, struct buf *bp)
2497 ASSERT_SEGLOCK(fs);
2498 return (bp->b_lblkno >= 0);
2502 lfs_match_indir(struct lfs *fs, struct buf *bp)
2504 daddr_t lbn;
2506 ASSERT_SEGLOCK(fs);
2507 lbn = bp->b_lblkno;
2508 return (lbn < 0 && (-lbn - ULFS_NDADDR) % LFS_NINDIR(fs) == 0);
2512 lfs_match_dindir(struct lfs *fs, struct buf *bp)
2514 daddr_t lbn;
2516 ASSERT_SEGLOCK(fs);
2517 lbn = bp->b_lblkno;
2518 return (lbn < 0 && (-lbn - ULFS_NDADDR) % LFS_NINDIR(fs) == 1);
2522 lfs_match_tindir(struct lfs *fs, struct buf *bp)
2524 daddr_t lbn;
2526 ASSERT_SEGLOCK(fs);
2527 lbn = bp->b_lblkno;
2528 return (lbn < 0 && (-lbn - ULFS_NDADDR) % LFS_NINDIR(fs) == 2);
2531 static void
2532 lfs_free_aiodone(struct buf *bp)
2534 struct lfs *fs;
2536 KERNEL_LOCK(1, curlwp);
2537 fs = bp->b_private;
2538 ASSERT_NO_SEGLOCK(fs);
2539 lfs_freebuf(fs, bp);
2540 KERNEL_UNLOCK_LAST(curlwp);
2543 static void
2544 lfs_super_aiodone(struct buf *bp)
2546 struct lfs *fs;
2548 KERNEL_LOCK(1, curlwp);
2549 fs = bp->b_private;
2550 ASSERT_NO_SEGLOCK(fs);
2551 mutex_enter(&lfs_lock);
2552 fs->lfs_sbactive = 0;
2553 if (--fs->lfs_iocount <= 1)
2554 wakeup(&fs->lfs_iocount);
2555 wakeup(&fs->lfs_sbactive);
2556 mutex_exit(&lfs_lock);
2557 lfs_freebuf(fs, bp);
2558 KERNEL_UNLOCK_LAST(curlwp);
2561 static void
2562 lfs_cluster_aiodone(struct buf *bp)
2564 struct lfs_cluster *cl;
2565 struct lfs *fs;
2566 struct buf *tbp, *fbp;
2567 struct vnode *vp, *devvp, *ovp;
2568 struct inode *ip;
2569 int error;
2571 KERNEL_LOCK(1, curlwp);
2573 error = bp->b_error;
2574 cl = bp->b_private;
2575 fs = cl->fs;
2576 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2577 ASSERT_NO_SEGLOCK(fs);
2579 /* Put the pages back, and release the buffer */
2580 while (cl->bufcount--) {
2581 tbp = cl->bpp[cl->bufcount];
2582 KASSERT(tbp->b_cflags & BC_BUSY);
2583 if (error) {
2584 tbp->b_error = error;
2588 * We're done with tbp. If it has not been re-dirtied since
2589 * the cluster was written, free it. Otherwise, keep it on
2590 * the locked list to be written again.
2592 vp = tbp->b_vp;
2594 tbp->b_flags &= ~B_GATHERED;
2596 LFS_BCLEAN_LOG(fs, tbp);
2598 mutex_enter(&bufcache_lock);
2599 if (tbp->b_iodone == NULL) {
2600 KASSERT(tbp->b_flags & B_LOCKED);
2601 bremfree(tbp);
2602 if (vp) {
2603 mutex_enter(vp->v_interlock);
2604 reassignbuf(tbp, vp);
2605 mutex_exit(vp->v_interlock);
2607 tbp->b_flags |= B_ASYNC; /* for biodone */
2610 if (((tbp->b_flags | tbp->b_oflags) &
2611 (B_LOCKED | BO_DELWRI)) == B_LOCKED)
2612 LFS_UNLOCK_BUF(tbp);
2614 if (tbp->b_oflags & BO_DONE) {
2615 DLOG((DLOG_SEG, "blk %d biodone already (flags %lx)\n",
2616 cl->bufcount, (long)tbp->b_flags));
2619 if (tbp->b_iodone != NULL && !LFS_IS_MALLOC_BUF(tbp)) {
2621 * A buffer from the page daemon.
2622 * We use the same iodone as it does,
2623 * so we must manually disassociate its
2624 * buffers from the vp.
2626 if ((ovp = tbp->b_vp) != NULL) {
2627 /* This is just silly */
2628 mutex_enter(ovp->v_interlock);
2629 brelvp(tbp);
2630 mutex_exit(ovp->v_interlock);
2631 tbp->b_vp = vp;
2632 tbp->b_objlock = vp->v_interlock;
2634 /* Put it back the way it was */
2635 tbp->b_flags |= B_ASYNC;
2636 /* Master buffers have BC_AGE */
2637 if (tbp->b_private == tbp)
2638 tbp->b_cflags |= BC_AGE;
2640 mutex_exit(&bufcache_lock);
2642 biodone(tbp);
2645 * If this is the last block for this vnode, but
2646 * there are other blocks on its dirty list,
2647 * set IN_MODIFIED/IN_CLEANING depending on what
2648 * sort of block. Only do this for our mount point,
2649 * not for, e.g., inode blocks that are attached to
2650 * the devvp.
2651 * XXX KS - Shouldn't we set *both* if both types
2652 * of blocks are present (traverse the dirty list?)
2654 mutex_enter(vp->v_interlock);
2655 mutex_enter(&lfs_lock);
2656 if (vp != devvp && vp->v_numoutput == 0 &&
2657 (fbp = LIST_FIRST(&vp->v_dirtyblkhd)) != NULL) {
2658 ip = VTOI(vp);
2659 DLOG((DLOG_SEG, "lfs_cluster_aiodone: mark ino %d\n",
2660 ip->i_number));
2661 if (LFS_IS_MALLOC_BUF(fbp))
2662 LFS_SET_UINO(ip, IN_CLEANING);
2663 else
2664 LFS_SET_UINO(ip, IN_MODIFIED);
2666 cv_broadcast(&vp->v_cv);
2667 mutex_exit(&lfs_lock);
2668 mutex_exit(vp->v_interlock);
2671 /* Fix up the cluster buffer, and release it */
2672 if (cl->flags & LFS_CL_MALLOC)
2673 lfs_free(fs, bp->b_data, LFS_NB_CLUSTER);
2674 putiobuf(bp);
2676 /* Note i/o done */
2677 if (cl->flags & LFS_CL_SYNC) {
2678 if (--cl->seg->seg_iocount == 0)
2679 wakeup(&cl->seg->seg_iocount);
2681 mutex_enter(&lfs_lock);
2682 #ifdef DIAGNOSTIC
2683 if (fs->lfs_iocount == 0)
2684 panic("lfs_cluster_aiodone: zero iocount");
2685 #endif
2686 if (--fs->lfs_iocount <= 1)
2687 wakeup(&fs->lfs_iocount);
2688 mutex_exit(&lfs_lock);
2690 KERNEL_UNLOCK_LAST(curlwp);
2692 pool_put(&fs->lfs_bpppool, cl->bpp);
2693 cl->bpp = NULL;
2694 pool_put(&fs->lfs_clpool, cl);
2697 static void
2698 lfs_generic_callback(struct buf *bp, void (*aiodone)(struct buf *))
2700 /* reset b_iodone for when this is a single-buf i/o. */
2701 bp->b_iodone = aiodone;
2703 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
2706 static void
2707 lfs_cluster_callback(struct buf *bp)
2710 lfs_generic_callback(bp, lfs_cluster_aiodone);
2713 void
2714 lfs_supercallback(struct buf *bp)
2717 lfs_generic_callback(bp, lfs_super_aiodone);
2721 * The only buffers that are going to hit these functions are the
2722 * segment write blocks, or the segment summaries, or the superblocks.
2724 * All of the above are created by lfs_newbuf, and so do not need to be
2725 * released via brelse.
2727 void
2728 lfs_callback(struct buf *bp)
2731 lfs_generic_callback(bp, lfs_free_aiodone);
2735 * Shellsort (diminishing increment sort) from Data Structures and
2736 * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
2737 * see also Knuth Vol. 3, page 84. The increments are selected from
2738 * formula (8), page 95. Roughly O(N^3/2).
2741 * This is our own private copy of shellsort because we want to sort
2742 * two parallel arrays (the array of buffer pointers and the array of
2743 * logical block numbers) simultaneously. Note that we cast the array
2744 * of logical block numbers to a unsigned in this routine so that the
2745 * negative block numbers (meta data blocks) sort AFTER the data blocks.
2748 static void
2749 lfs_shellsort(struct lfs *fs,
2750 struct buf **bp_array, union lfs_blocks *lb_array,
2751 int nmemb, int size)
2753 static int __rsshell_increments[] = { 4, 1, 0 };
2754 int incr, *incrp, t1, t2;
2755 struct buf *bp_temp;
2757 #ifdef DEBUG
2758 incr = 0;
2759 for (t1 = 0; t1 < nmemb; t1++) {
2760 for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
2761 if (lfs_blocks_get(fs, lb_array, incr++) != bp_array[t1]->b_lblkno + t2) {
2762 /* dump before panic */
2763 printf("lfs_shellsort: nmemb=%d, size=%d\n",
2764 nmemb, size);
2765 incr = 0;
2766 for (t1 = 0; t1 < nmemb; t1++) {
2767 const struct buf *bp = bp_array[t1];
2769 printf("bp[%d]: lbn=%" PRIu64 ", size=%"
2770 PRIu64 "\n", t1,
2771 (uint64_t)bp->b_bcount,
2772 (uint64_t)bp->b_lblkno);
2773 printf("lbns:");
2774 for (t2 = 0; t2 * size < bp->b_bcount;
2775 t2++) {
2776 printf(" %jd",
2777 (intmax_t)lfs_blocks_get(fs, lb_array, incr++));
2779 printf("\n");
2781 panic("lfs_shellsort: inconsistent input");
2785 #endif
2787 for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
2788 for (t1 = incr; t1 < nmemb; ++t1)
2789 for (t2 = t1 - incr; t2 >= 0;)
2790 if ((u_int64_t)bp_array[t2]->b_lblkno >
2791 (u_int64_t)bp_array[t2 + incr]->b_lblkno) {
2792 bp_temp = bp_array[t2];
2793 bp_array[t2] = bp_array[t2 + incr];
2794 bp_array[t2 + incr] = bp_temp;
2795 t2 -= incr;
2796 } else
2797 break;
2799 /* Reform the list of logical blocks */
2800 incr = 0;
2801 for (t1 = 0; t1 < nmemb; t1++) {
2802 for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
2803 lfs_blocks_set(fs, lb_array, incr++,
2804 bp_array[t1]->b_lblkno + t2);
2810 * Set up an FINFO entry for a new file. The fip pointer is assumed to
2811 * point at uninitialized space.
2813 void
2814 lfs_acquire_finfo(struct lfs *fs, ino_t ino, int vers)
2816 struct segment *sp = fs->lfs_sp;
2817 SEGSUM *ssp;
2819 KASSERT(vers > 0);
2821 if (sp->seg_bytes_left < lfs_sb_getbsize(fs) ||
2822 sp->sum_bytes_left < FINFOSIZE(fs) + LFS_BLKPTRSIZE(fs))
2823 (void) lfs_writeseg(fs, fs->lfs_sp);
2825 sp->sum_bytes_left -= FINFOSIZE(fs);
2826 ssp = (SEGSUM *)sp->segsum;
2827 lfs_ss_setnfinfo(fs, ssp, lfs_ss_getnfinfo(fs, ssp) + 1);
2828 lfs_fi_setnblocks(fs, sp->fip, 0);
2829 lfs_fi_setino(fs, sp->fip, ino);
2830 lfs_fi_setversion(fs, sp->fip, vers);
2834 * Release the FINFO entry, either clearing out an unused entry or
2835 * advancing us to the next available entry.
2837 void
2838 lfs_release_finfo(struct lfs *fs)
2840 struct segment *sp = fs->lfs_sp;
2841 SEGSUM *ssp;
2843 if (lfs_fi_getnblocks(fs, sp->fip) != 0) {
2844 sp->fip = NEXT_FINFO(fs, sp->fip);
2845 lfs_blocks_fromfinfo(fs, &sp->start_lbp, sp->fip);
2846 } else {
2847 /* XXX shouldn't this update sp->fip? */
2848 sp->sum_bytes_left += FINFOSIZE(fs);
2849 ssp = (SEGSUM *)sp->segsum;
2850 lfs_ss_setnfinfo(fs, ssp, lfs_ss_getnfinfo(fs, ssp) - 1);