1 /* $NetBSD: ulfs_inode.c,v 1.11 2015/09/01 06:16:59 dholland Exp $ */
2 /* from NetBSD: ufs_inode.c,v 1.89 2013/01/22 09:39:18 dholland Exp */
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: ulfs_inode.c,v 1.11 2015/09/01 06:16:59 dholland Exp $");
43 #if defined(_KERNEL_OPT)
45 #include "opt_quota.h"
46 #include "opt_wapbl.h"
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/kernel.h>
55 #include <sys/namei.h>
56 #include <sys/kauth.h>
57 #include <sys/wapbl.h>
58 #include <sys/fstrans.h>
61 #include <ufs/lfs/lfs.h>
62 #include <ufs/lfs/lfs_accessors.h>
63 #include <ufs/lfs/lfs_extern.h>
65 #include <ufs/lfs/ulfs_inode.h>
66 #include <ufs/lfs/ulfsmount.h>
67 #include <ufs/lfs/ulfs_extern.h>
69 #include <ufs/lfs/ulfs_dirhash.h>
72 #include <ufs/lfs/ulfs_extattr.h>
80 * Last reference to an inode. If necessary, write or delete it.
83 ulfs_inactive(void *v
)
85 struct vop_inactive_args
/* {
87 struct bool *a_recycle;
89 struct vnode
*vp
= ap
->a_vp
;
90 struct inode
*ip
= VTOI(vp
);
91 struct mount
*transmp
;
95 transmp
= vp
->v_mount
;
96 fstrans_start(transmp
, FSTRANS_LAZY
);
98 * Ignore inodes related to stale file handles.
102 if (ip
->i_nlink
<= 0 && (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) == 0) {
104 ulfs_extattr_vnode_inactive(vp
, curlwp
);
106 if (ip
->i_size
!= 0) {
107 error
= lfs_truncate(vp
, (off_t
)0, 0, NOCRED
);
109 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
110 (void)lfs_chkiq(ip
, -1, NOCRED
, 0);
112 DIP_ASSIGN(ip
, rdev
, 0);
116 DIP_ASSIGN(ip
, mode
, 0);
117 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
119 * Defer final inode free and update to ulfs_reclaim().
123 if (ip
->i_flag
& (IN_CHANGE
| IN_UPDATE
| IN_MODIFIED
)) {
124 lfs_update(vp
, NULL
, NULL
, 0);
129 * If we are done with the inode, reclaim it
130 * so that it can be reused immediately.
132 *ap
->a_recycle
= (ip
->i_mode
== 0);
134 fstrans_done(transmp
);
139 * Reclaim an inode so that it can be used for other purposes.
142 ulfs_reclaim(struct vnode
*vp
)
144 struct inode
*ip
= VTOI(vp
);
146 if (prtactive
&& vp
->v_usecount
> 1)
147 vprint("ulfs_reclaim: pushing active", vp
);
149 /* XXX: do we really need two of these? */
150 /* note: originally the first was inside a wapbl txn */
151 lfs_update(vp
, NULL
, NULL
, UPDATE_CLOSE
);
152 lfs_update(vp
, NULL
, NULL
, UPDATE_CLOSE
);
155 * Remove the inode from the vnode cache.
157 vcache_remove(vp
->v_mount
, &ip
->i_number
, sizeof(ip
->i_number
));
163 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
167 if (ip
->i_dirhash
!= NULL
)
168 ulfsdirhash_free(ip
);
174 * allocate a range of blocks in a file.
175 * after this function returns, any page entirely contained within the range
176 * will map to invalid data and thus must be overwritten before it is made
177 * accessible to others.
181 ulfs_balloc_range(struct vnode
*vp
, off_t off
, off_t len
, kauth_cred_t cred
,
184 off_t neweof
; /* file size after the operation */
185 off_t neweob
; /* offset next to the last block after the operation */
186 off_t pagestart
; /* starting offset of range covered by pgs */
187 off_t eob
; /* offset next to allocated blocks */
188 struct uvm_object
*uobj
;
189 int i
, delta
, error
, npages
;
190 int bshift
= vp
->v_mount
->mnt_fs_bshift
;
191 int bsize
= 1 << bshift
;
192 int ppb
= MAX(bsize
>> PAGE_SHIFT
, 1);
193 struct vm_page
**pgs
;
195 UVMHIST_FUNC("ulfs_balloc_range"); UVMHIST_CALLED(ubchist
);
196 UVMHIST_LOG(ubchist
, "vp %p off 0x%x len 0x%x u_size 0x%x",
197 vp
, off
, len
, vp
->v_size
);
199 neweof
= MAX(vp
->v_size
, off
+ len
);
200 GOP_SIZE(vp
, neweof
, &neweob
, 0);
206 * read or create pages covering the range of the allocation and
207 * keep them locked until the new block is allocated, so there
208 * will be no window where the old contents of the new block are
209 * visible to racing threads.
212 pagestart
= trunc_page(off
) & ~(bsize
- 1);
213 npages
= MIN(ppb
, (round_page(neweob
) - pagestart
) >> PAGE_SHIFT
);
214 pgssize
= npages
* sizeof(struct vm_page
*);
215 pgs
= kmem_zalloc(pgssize
, KM_SLEEP
);
218 * adjust off to be block-aligned.
221 delta
= off
& (bsize
- 1);
225 genfs_node_wrlock(vp
);
226 mutex_enter(uobj
->vmobjlock
);
227 error
= VOP_GETPAGES(vp
, pagestart
, pgs
, &npages
, 0,
228 VM_PROT_WRITE
, 0, PGO_SYNCIO
| PGO_PASTEOF
| PGO_NOBLOCKALLOC
|
229 PGO_NOTIMESTAMP
| PGO_GLOCKHELD
);
235 * now allocate the range.
238 error
= GOP_ALLOC(vp
, off
, len
, flags
, cred
);
239 genfs_node_unlock(vp
);
242 * if the allocation succeeded, clear PG_CLEAN on all the pages
243 * and clear PG_RDONLY on any pages that are now fully backed
244 * by disk blocks. if the allocation failed, we do not invalidate
245 * the pages since they might have already existed and been dirty,
246 * in which case we need to keep them around. if we created the pages,
247 * they will be clean and read-only, and leaving such pages
248 * in the cache won't cause any problems.
251 GOP_SIZE(vp
, off
+ len
, &eob
, 0);
252 mutex_enter(uobj
->vmobjlock
);
253 mutex_enter(&uvm_pageqlock
);
254 for (i
= 0; i
< npages
; i
++) {
255 KASSERT((pgs
[i
]->flags
& PG_RELEASED
) == 0);
257 if (off
<= pagestart
+ (i
<< PAGE_SHIFT
) &&
258 pagestart
+ ((i
+ 1) << PAGE_SHIFT
) <= eob
) {
259 pgs
[i
]->flags
&= ~PG_RDONLY
;
261 pgs
[i
]->flags
&= ~PG_CLEAN
;
263 uvm_pageactivate(pgs
[i
]);
265 mutex_exit(&uvm_pageqlock
);
266 uvm_page_unbusy(pgs
, npages
);
267 mutex_exit(uobj
->vmobjlock
);
270 kmem_free(pgs
, pgssize
);