1 /* $NetBSD: lfs_balloc.c,v 1.87 2015/09/01 06:08:37 dholland Exp $ */
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant@hhhh.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (c) 1989, 1991, 1993
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)lfs_balloc.c 8.4 (Berkeley) 5/8/95
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.87 2015/09/01 06:08:37 dholland Exp $");
65 #if defined(_KERNEL_OPT)
66 #include "opt_quota.h"
69 #include <sys/param.h>
70 #include <sys/systm.h>
73 #include <sys/vnode.h>
74 #include <sys/mount.h>
75 #include <sys/resourcevar.h>
77 #include <sys/trace.h>
78 #include <sys/kauth.h>
80 #include <miscfs/specfs/specdev.h>
82 #include <ufs/lfs/ulfs_quotacommon.h>
83 #include <ufs/lfs/ulfs_inode.h>
84 #include <ufs/lfs/ulfsmount.h>
85 #include <ufs/lfs/ulfs_extern.h>
87 #include <ufs/lfs/lfs.h>
88 #include <ufs/lfs/lfs_accessors.h>
89 #include <ufs/lfs/lfs_extern.h>
90 #include <ufs/lfs/lfs_kernel.h>
94 int lfs_fragextend(struct vnode
*, int, int, daddr_t
, struct buf
**, kauth_cred_t
);
96 u_int64_t locked_fakequeue_count
;
99 * Allocate a block, and to inode and filesystem block accounting for it
100 * and for any indirect blocks the may need to be created in order for
101 * this block to be created.
103 * Blocks which have never been accounted for (i.e., which "do not exist")
104 * have disk address 0, which is translated by ulfs_bmap to the special value
105 * UNASSIGNED == -1, as in the historical ULFS.
107 * Blocks which have been accounted for but which have not yet been written
108 * to disk are given the new special disk address UNWRITTEN == -2, so that
109 * they can be differentiated from completely new blocks.
111 /* VOP_BWRITE ULFS_NIADDR+2 times */
113 lfs_balloc(struct vnode
*vp
, off_t startoffset
, int iosize
, kauth_cred_t cred
,
114 int flags
, struct buf
**bpp
)
117 daddr_t daddr
, idaddr
;
118 struct buf
*ibp
, *bp
;
121 struct indir indirs
[ULFS_NIADDR
+2], *idp
;
122 daddr_t lbn
, lastblock
;
124 int error
, frags
, i
, nsize
, osize
, num
;
128 offset
= lfs_blkoff(fs
, startoffset
);
129 KASSERT(iosize
<= lfs_sb_getbsize(fs
));
130 lbn
= lfs_lblkno(fs
, startoffset
);
131 /* (void)lfs_check(vp, lbn, 0); */
133 ASSERT_MAYBE_SEGLOCK(fs
);
136 * Three cases: it's a block beyond the end of file, it's a block in
137 * the file that may or may not have been assigned a disk address or
138 * we're writing an entire block.
140 * Note, if the daddr is UNWRITTEN, the block already exists in
141 * the cache (it was read or written earlier). If so, make sure
142 * we don't count it as a new block or zero out its contents. If
143 * it did not, make sure we allocate any necessary indirect
146 * If we are writing a block beyond the end of the file, we need to
147 * check if the old last block was a fragment. If it was, we need
154 /* Check for block beyond end of file and fragment extension needed. */
155 lastblock
= lfs_lblkno(fs
, ip
->i_size
);
156 if (lastblock
< ULFS_NDADDR
&& lastblock
< lbn
) {
157 osize
= lfs_blksize(fs
, ip
, lastblock
);
158 if (osize
< lfs_sb_getbsize(fs
) && osize
> 0) {
159 if ((error
= lfs_fragextend(vp
, osize
, lfs_sb_getbsize(fs
),
161 (bpp
? &bp
: NULL
), cred
)))
163 ip
->i_size
= (lastblock
+ 1) * lfs_sb_getbsize(fs
);
164 lfs_dino_setsize(fs
, ip
->i_din
, ip
->i_size
);
165 uvm_vnp_setsize(vp
, ip
->i_size
);
166 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
168 (void) VOP_BWRITE(bp
->b_vp
, bp
);
173 * If the block we are writing is a direct block, it's the last
174 * block in the file, and offset + iosize is less than a full
175 * block, we can write one or more fragments. There are two cases:
176 * the block is brand new and we should allocate it the correct
177 * size or it already exists and contains some fragments and
178 * may need to extend it.
180 if (lbn
< ULFS_NDADDR
&& lfs_lblkno(fs
, ip
->i_size
) <= lbn
) {
181 osize
= lfs_blksize(fs
, ip
, lbn
);
182 nsize
= lfs_fragroundup(fs
, offset
+ iosize
);
183 if (lfs_lblktosize(fs
, lbn
) >= ip
->i_size
) {
184 /* Brand new block or fragment */
185 frags
= lfs_numfrags(fs
, nsize
);
186 if (!ISSPACE(fs
, frags
, cred
))
189 *bpp
= bp
= getblk(vp
, lbn
, nsize
, 0, 0);
190 bp
->b_blkno
= UNWRITTEN
;
191 if (flags
& B_CLRBUF
)
194 ip
->i_lfs_effnblks
+= frags
;
195 mutex_enter(&lfs_lock
);
196 lfs_sb_subbfree(fs
, frags
);
197 mutex_exit(&lfs_lock
);
198 lfs_dino_setdb(fs
, ip
->i_din
, lbn
, UNWRITTEN
);
200 if (nsize
<= osize
) {
201 /* No need to extend */
202 if (bpp
&& (error
= bread(vp
, lbn
, osize
,
206 /* Extend existing block */
208 lfs_fragextend(vp
, osize
, nsize
, lbn
,
209 (bpp
? &bp
: NULL
), cred
)))
218 error
= ulfs_bmaparray(vp
, lbn
, &daddr
, &indirs
[0], &num
, NULL
, NULL
);
222 KASSERT(daddr
<= LFS_MAX_DADDR(fs
));
225 * Do byte accounting all at once, so we can gracefully fail *before*
226 * we start assigning blocks.
228 frags
= fs
->um_seqinc
;
230 if (daddr
== UNASSIGNED
) {
233 for (i
= 1; i
< num
; ++i
) {
234 if (!indirs
[i
].in_exists
) {
238 if (ISSPACE(fs
, bcount
, cred
)) {
239 mutex_enter(&lfs_lock
);
240 lfs_sb_subbfree(fs
, bcount
);
241 mutex_exit(&lfs_lock
);
242 ip
->i_lfs_effnblks
+= bcount
;
247 if (daddr
== UNASSIGNED
) {
248 if (num
> 0 && lfs_dino_getib(fs
, ip
->i_din
, indirs
[0].in_off
) == 0) {
249 lfs_dino_setib(fs
, ip
->i_din
, indirs
[0].in_off
, UNWRITTEN
);
253 * Create new indirect blocks if necessary
256 idaddr
= lfs_dino_getib(fs
, ip
->i_din
, indirs
[0].in_off
);
257 for (i
= 1; i
< num
; ++i
) {
258 ibp
= getblk(vp
, indirs
[i
].in_lbn
,
259 lfs_sb_getbsize(fs
), 0,0);
260 if (!indirs
[i
].in_exists
) {
262 ibp
->b_blkno
= UNWRITTEN
;
263 } else if (!(ibp
->b_oflags
& (BO_DELWRI
| BO_DONE
))) {
264 ibp
->b_blkno
= LFS_FSBTODB(fs
, idaddr
);
265 ibp
->b_flags
|= B_READ
;
266 VOP_STRATEGY(vp
, ibp
);
270 * This block exists, but the next one may not.
271 * If that is the case mark it UNWRITTEN to keep
272 * the accounting straight.
275 if (((int32_t *)ibp
->b_data
)[indirs
[i
].in_off
] == 0)
276 ((int32_t *)ibp
->b_data
)[indirs
[i
].in_off
] =
279 idaddr
= ((int32_t *)ibp
->b_data
)[indirs
[i
].in_off
];
281 if (vp
== fs
->lfs_ivnode
) {
282 LFS_ENTER_LOG("balloc", __FILE__
,
283 __LINE__
, indirs
[i
].in_lbn
,
284 ibp
->b_flags
, curproc
->p_pid
);
287 if ((error
= VOP_BWRITE(ibp
->b_vp
, ibp
)))
295 * Get the existing block from the cache, if requested.
298 *bpp
= bp
= getblk(vp
, lbn
, lfs_blksize(fs
, ip
, lbn
), 0, 0);
301 * Do accounting on blocks that represent pages.
304 lfs_register_block(vp
, lbn
);
307 * The block we are writing may be a brand new block
308 * in which case we need to do accounting.
310 * We can tell a truly new block because ulfs_bmaparray will say
311 * it is UNASSIGNED. Once we allocate it we will assign it the
312 * disk address UNWRITTEN.
314 if (daddr
== UNASSIGNED
) {
316 if (flags
& B_CLRBUF
)
319 /* Note the new address */
320 bp
->b_blkno
= UNWRITTEN
;
325 lfs_dino_setdb(fs
, ip
->i_din
, lbn
, UNWRITTEN
);
328 lfs_dino_setib(fs
, ip
->i_din
, indirs
[0].in_off
, UNWRITTEN
);
331 idp
= &indirs
[num
- 1];
332 if (bread(vp
, idp
->in_lbn
, lfs_sb_getbsize(fs
),
334 panic("lfs_balloc: bread bno %lld",
335 (long long)idp
->in_lbn
);
337 ((int32_t *)ibp
->b_data
)[idp
->in_off
] = UNWRITTEN
;
339 if (vp
== fs
->lfs_ivnode
) {
340 LFS_ENTER_LOG("balloc", __FILE__
,
341 __LINE__
, idp
->in_lbn
,
342 ibp
->b_flags
, curproc
->p_pid
);
345 VOP_BWRITE(ibp
->b_vp
, ibp
);
347 } else if (bpp
&& !(bp
->b_oflags
& (BO_DONE
|BO_DELWRI
))) {
349 * Not a brand new block, also not in the cache;
350 * read it in from disk.
352 if (iosize
== lfs_sb_getbsize(fs
))
353 /* Optimization: I/O is unnecessary. */
357 * We need to read the block to preserve the
361 bp
->b_flags
|= B_READ
;
362 VOP_STRATEGY(vp
, bp
);
363 return (biowait(bp
));
370 /* VOP_BWRITE 1 time */
372 lfs_fragextend(struct vnode
*vp
, int osize
, int nsize
, daddr_t lbn
, struct buf
**bpp
,
379 extern long locked_queue_bytes
;
384 frags
= (long)lfs_numfrags(fs
, nsize
- osize
);
387 ASSERT_NO_SEGLOCK(fs
);
390 * Get the seglock so we don't enlarge blocks while a segment
391 * is being written. If we're called with bpp==NULL, though,
392 * we are only pretending to change a buffer, so we don't have to
397 rw_enter(&fs
->lfs_fraglock
, RW_READER
);
398 LFS_DEBUG_COUNTLOCKED("frag");
401 if (!ISSPACE(fs
, frags
, cred
)) {
407 * If we are not asked to actually return the block, all we need
408 * to do is allocate space for it. UBC will handle dirtying the
409 * appropriate things and making sure it all goes to disk.
410 * Don't bother to read in that case.
412 if (bpp
&& (error
= bread(vp
, lbn
, osize
, 0, bpp
))) {
415 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
416 if ((error
= lfs_chkdq(ip
, frags
, cred
, 0))) {
423 * Adjust accounting for lfs_avail. If there's not enough room,
424 * we will have to wait for the cleaner, which we can't do while
425 * holding a block busy or while holding the seglock. In that case,
426 * release both and start over after waiting.
429 if (bpp
&& ((*bpp
)->b_oflags
& BO_DELWRI
)) {
430 if (!lfs_fits(fs
, frags
)) {
433 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
434 lfs_chkdq(ip
, -frags
, cred
, 0);
436 rw_exit(&fs
->lfs_fraglock
);
437 lfs_availwait(fs
, frags
);
440 lfs_sb_subavail(fs
, frags
);
443 mutex_enter(&lfs_lock
);
444 lfs_sb_subbfree(fs
, frags
);
445 mutex_exit(&lfs_lock
);
446 ip
->i_lfs_effnblks
+= frags
;
447 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
450 obufsize
= (*bpp
)->b_bufsize
;
451 allocbuf(*bpp
, nsize
, 1);
453 /* Adjust locked-list accounting */
454 if (((*bpp
)->b_flags
& B_LOCKED
) != 0 &&
455 (*bpp
)->b_iodone
== NULL
) {
456 mutex_enter(&lfs_lock
);
457 locked_queue_bytes
+= (*bpp
)->b_bufsize
- obufsize
;
458 mutex_exit(&lfs_lock
);
461 memset((char *)((*bpp
)->b_data
) + osize
, 0, (u_int
)(nsize
- osize
));
466 rw_exit(&fs
->lfs_fraglock
);
472 lge(struct lbnentry
*a
, struct lbnentry
*b
)
474 return a
->lbn
- b
->lbn
;
477 SPLAY_PROTOTYPE(lfs_splay
, lbnentry
, entry
, lge
);
479 SPLAY_GENERATE(lfs_splay
, lbnentry
, entry
, lge
);
482 * Record this lbn as being "write pending". We used to have this information
483 * on the buffer headers, but since pages don't have buffer headers we
484 * record it here instead.
487 lfs_register_block(struct vnode
*vp
, daddr_t lbn
)
491 struct lbnentry
*lbp
;
495 /* Don't count metadata */
496 if (lbn
< 0 || vp
->v_type
!= VREG
|| ip
->i_number
== LFS_IFILE_INUM
)
501 ASSERT_NO_SEGLOCK(fs
);
503 /* If no space, wait for the cleaner */
504 lfs_availwait(fs
, lfs_btofsb(fs
, 1 << lfs_sb_getbshift(fs
)));
506 lbp
= (struct lbnentry
*)pool_get(&lfs_lbnentry_pool
, PR_WAITOK
);
508 mutex_enter(&lfs_lock
);
509 if (SPLAY_INSERT(lfs_splay
, &ip
->i_lfs_lbtree
, lbp
) != NULL
) {
510 mutex_exit(&lfs_lock
);
512 pool_put(&lfs_lbnentry_pool
, lbp
);
517 fs
->lfs_favail
+= lfs_btofsb(fs
, (1 << lfs_sb_getbshift(fs
)));
518 fs
->lfs_pages
+= lfs_sb_getbsize(fs
) >> PAGE_SHIFT
;
519 ++locked_fakequeue_count
;
520 lfs_subsys_pages
+= lfs_sb_getbsize(fs
) >> PAGE_SHIFT
;
521 mutex_exit(&lfs_lock
);
525 lfs_do_deregister(struct lfs
*fs
, struct inode
*ip
, struct lbnentry
*lbp
)
527 ASSERT_MAYBE_SEGLOCK(fs
);
529 mutex_enter(&lfs_lock
);
531 SPLAY_REMOVE(lfs_splay
, &ip
->i_lfs_lbtree
, lbp
);
532 if (fs
->lfs_favail
> lfs_btofsb(fs
, (1 << lfs_sb_getbshift(fs
))))
533 fs
->lfs_favail
-= lfs_btofsb(fs
, (1 << lfs_sb_getbshift(fs
)));
534 fs
->lfs_pages
-= lfs_sb_getbsize(fs
) >> PAGE_SHIFT
;
535 if (locked_fakequeue_count
> 0)
536 --locked_fakequeue_count
;
537 lfs_subsys_pages
-= lfs_sb_getbsize(fs
) >> PAGE_SHIFT
;
538 mutex_exit(&lfs_lock
);
540 pool_put(&lfs_lbnentry_pool
, lbp
);
544 lfs_deregister_block(struct vnode
*vp
, daddr_t lbn
)
548 struct lbnentry
*lbp
;
553 /* Don't count metadata */
554 if (lbn
< 0 || vp
->v_type
!= VREG
|| ip
->i_number
== LFS_IFILE_INUM
)
559 lbp
= SPLAY_FIND(lfs_splay
, &ip
->i_lfs_lbtree
, &tmp
);
563 lfs_do_deregister(fs
, ip
, lbp
);
567 lfs_deregister_all(struct vnode
*vp
)
569 struct lbnentry
*lbp
, *nlbp
;
570 struct lfs_splay
*hd
;
576 hd
= &ip
->i_lfs_lbtree
;
578 for (lbp
= SPLAY_MIN(lfs_splay
, hd
); lbp
!= NULL
; lbp
= nlbp
) {
579 nlbp
= SPLAY_NEXT(lfs_splay
, hd
, lbp
);
580 lfs_do_deregister(fs
, ip
, lbp
);