1 /* $NetBSD: lfs_vnops.c,v 1.293 2015/09/21 01:24:23 dholland Exp $ */
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant@hhhh.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (c) 1986, 1989, 1991, 1993, 1995
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)lfs_vnops.c 8.13 (Berkeley) 6/10/95
62 /* from NetBSD: ufs_vnops.c,v 1.213 2013/06/08 05:47:02 kardel Exp */
64 * Copyright (c) 2008 The NetBSD Foundation, Inc.
65 * All rights reserved.
67 * This code is derived from software contributed to The NetBSD Foundation
68 * by Wasabi Systems, Inc.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce the above copyright
76 * notice, this list of conditions and the following disclaimer in the
77 * documentation and/or other materials provided with the distribution.
79 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
80 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
81 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
82 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
83 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
84 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
85 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
86 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
87 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
88 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
89 * POSSIBILITY OF SUCH DAMAGE.
92 * Copyright (c) 1982, 1986, 1989, 1993, 1995
93 * The Regents of the University of California. All rights reserved.
94 * (c) UNIX System Laboratories, Inc.
95 * All or some portions of this file are derived from material licensed
96 * to the University of California by American Telephone and Telegraph
97 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
98 * the permission of UNIX System Laboratories, Inc.
100 * Redistribution and use in source and binary forms, with or without
101 * modification, are permitted provided that the following conditions
103 * 1. Redistributions of source code must retain the above copyright
104 * notice, this list of conditions and the following disclaimer.
105 * 2. Redistributions in binary form must reproduce the above copyright
106 * notice, this list of conditions and the following disclaimer in the
107 * documentation and/or other materials provided with the distribution.
108 * 3. Neither the name of the University nor the names of its contributors
109 * may be used to endorse or promote products derived from this software
110 * without specific prior written permission.
112 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
113 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
114 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
115 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
116 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
117 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
118 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
119 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
120 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
121 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
124 * @(#)ufs_vnops.c 8.28 (Berkeley) 7/31/95
127 #include <sys/cdefs.h>
128 __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.293 2015/09/21 01:24:23 dholland Exp $");
131 #include "opt_compat_netbsd.h"
132 #include "opt_uvm_page_trkown.h"
135 #include <sys/param.h>
136 #include <sys/systm.h>
137 #include <sys/namei.h>
138 #include <sys/resourcevar.h>
139 #include <sys/kernel.h>
140 #include <sys/file.h>
141 #include <sys/stat.h>
143 #include <sys/proc.h>
144 #include <sys/mount.h>
145 #include <sys/vnode.h>
146 #include <sys/pool.h>
147 #include <sys/signalvar.h>
148 #include <sys/kauth.h>
149 #include <sys/syslog.h>
150 #include <sys/fstrans.h>
152 #include <miscfs/fifofs/fifo.h>
153 #include <miscfs/genfs/genfs.h>
154 #include <miscfs/specfs/specdev.h>
156 #include <ufs/lfs/ulfs_inode.h>
157 #include <ufs/lfs/ulfsmount.h>
158 #include <ufs/lfs/ulfs_bswap.h>
159 #include <ufs/lfs/ulfs_extern.h>
162 #include <uvm/uvm_pmap.h>
163 #include <uvm/uvm_stat.h>
164 #include <uvm/uvm_pager.h>
166 #include <ufs/lfs/lfs.h>
167 #include <ufs/lfs/lfs_accessors.h>
168 #include <ufs/lfs/lfs_kernel.h>
169 #include <ufs/lfs/lfs_extern.h>
171 extern pid_t lfs_writer_daemon
;
172 int lfs_ignore_lazy_sync
= 1;
174 static int lfs_openextattr(void *v
);
175 static int lfs_closeextattr(void *v
);
176 static int lfs_getextattr(void *v
);
177 static int lfs_setextattr(void *v
);
178 static int lfs_listextattr(void *v
);
179 static int lfs_deleteextattr(void *v
);
181 /* Global vfs data structures for lfs. */
182 int (**lfs_vnodeop_p
)(void *);
183 const struct vnodeopv_entry_desc lfs_vnodeop_entries
[] = {
184 { &vop_default_desc
, vn_default_error
},
185 { &vop_lookup_desc
, ulfs_lookup
}, /* lookup */
186 { &vop_create_desc
, lfs_create
}, /* create */
187 { &vop_whiteout_desc
, ulfs_whiteout
}, /* whiteout */
188 { &vop_mknod_desc
, lfs_mknod
}, /* mknod */
189 { &vop_open_desc
, ulfs_open
}, /* open */
190 { &vop_close_desc
, lfs_close
}, /* close */
191 { &vop_access_desc
, ulfs_access
}, /* access */
192 { &vop_getattr_desc
, lfs_getattr
}, /* getattr */
193 { &vop_setattr_desc
, lfs_setattr
}, /* setattr */
194 { &vop_read_desc
, lfs_read
}, /* read */
195 { &vop_write_desc
, lfs_write
}, /* write */
196 { &vop_fallocate_desc
, genfs_eopnotsupp
}, /* fallocate */
197 { &vop_fdiscard_desc
, genfs_eopnotsupp
}, /* fdiscard */
198 { &vop_ioctl_desc
, ulfs_ioctl
}, /* ioctl */
199 { &vop_fcntl_desc
, lfs_fcntl
}, /* fcntl */
200 { &vop_poll_desc
, ulfs_poll
}, /* poll */
201 { &vop_kqfilter_desc
, genfs_kqfilter
}, /* kqfilter */
202 { &vop_revoke_desc
, ulfs_revoke
}, /* revoke */
203 { &vop_mmap_desc
, lfs_mmap
}, /* mmap */
204 { &vop_fsync_desc
, lfs_fsync
}, /* fsync */
205 { &vop_seek_desc
, ulfs_seek
}, /* seek */
206 { &vop_remove_desc
, lfs_remove
}, /* remove */
207 { &vop_link_desc
, lfs_link
}, /* link */
208 { &vop_rename_desc
, lfs_rename
}, /* rename */
209 { &vop_mkdir_desc
, lfs_mkdir
}, /* mkdir */
210 { &vop_rmdir_desc
, lfs_rmdir
}, /* rmdir */
211 { &vop_symlink_desc
, lfs_symlink
}, /* symlink */
212 { &vop_readdir_desc
, ulfs_readdir
}, /* readdir */
213 { &vop_readlink_desc
, ulfs_readlink
}, /* readlink */
214 { &vop_abortop_desc
, ulfs_abortop
}, /* abortop */
215 { &vop_inactive_desc
, lfs_inactive
}, /* inactive */
216 { &vop_reclaim_desc
, lfs_reclaim
}, /* reclaim */
217 { &vop_lock_desc
, ulfs_lock
}, /* lock */
218 { &vop_unlock_desc
, ulfs_unlock
}, /* unlock */
219 { &vop_bmap_desc
, ulfs_bmap
}, /* bmap */
220 { &vop_strategy_desc
, lfs_strategy
}, /* strategy */
221 { &vop_print_desc
, ulfs_print
}, /* print */
222 { &vop_islocked_desc
, ulfs_islocked
}, /* islocked */
223 { &vop_pathconf_desc
, ulfs_pathconf
}, /* pathconf */
224 { &vop_advlock_desc
, ulfs_advlock
}, /* advlock */
225 { &vop_bwrite_desc
, lfs_bwrite
}, /* bwrite */
226 { &vop_getpages_desc
, lfs_getpages
}, /* getpages */
227 { &vop_putpages_desc
, lfs_putpages
}, /* putpages */
228 { &vop_openextattr_desc
, lfs_openextattr
}, /* openextattr */
229 { &vop_closeextattr_desc
, lfs_closeextattr
}, /* closeextattr */
230 { &vop_getextattr_desc
, lfs_getextattr
}, /* getextattr */
231 { &vop_setextattr_desc
, lfs_setextattr
}, /* setextattr */
232 { &vop_listextattr_desc
, lfs_listextattr
}, /* listextattr */
233 { &vop_deleteextattr_desc
, lfs_deleteextattr
}, /* deleteextattr */
236 const struct vnodeopv_desc lfs_vnodeop_opv_desc
=
237 { &lfs_vnodeop_p
, lfs_vnodeop_entries
};
239 int (**lfs_specop_p
)(void *);
240 const struct vnodeopv_entry_desc lfs_specop_entries
[] = {
241 { &vop_default_desc
, vn_default_error
},
242 { &vop_lookup_desc
, spec_lookup
}, /* lookup */
243 { &vop_create_desc
, spec_create
}, /* create */
244 { &vop_mknod_desc
, spec_mknod
}, /* mknod */
245 { &vop_open_desc
, spec_open
}, /* open */
246 { &vop_close_desc
, lfsspec_close
}, /* close */
247 { &vop_access_desc
, ulfs_access
}, /* access */
248 { &vop_getattr_desc
, lfs_getattr
}, /* getattr */
249 { &vop_setattr_desc
, lfs_setattr
}, /* setattr */
250 { &vop_read_desc
, ulfsspec_read
}, /* read */
251 { &vop_write_desc
, ulfsspec_write
}, /* write */
252 { &vop_fallocate_desc
, spec_fallocate
}, /* fallocate */
253 { &vop_fdiscard_desc
, spec_fdiscard
}, /* fdiscard */
254 { &vop_ioctl_desc
, spec_ioctl
}, /* ioctl */
255 { &vop_fcntl_desc
, ulfs_fcntl
}, /* fcntl */
256 { &vop_poll_desc
, spec_poll
}, /* poll */
257 { &vop_kqfilter_desc
, spec_kqfilter
}, /* kqfilter */
258 { &vop_revoke_desc
, spec_revoke
}, /* revoke */
259 { &vop_mmap_desc
, spec_mmap
}, /* mmap */
260 { &vop_fsync_desc
, spec_fsync
}, /* fsync */
261 { &vop_seek_desc
, spec_seek
}, /* seek */
262 { &vop_remove_desc
, spec_remove
}, /* remove */
263 { &vop_link_desc
, spec_link
}, /* link */
264 { &vop_rename_desc
, spec_rename
}, /* rename */
265 { &vop_mkdir_desc
, spec_mkdir
}, /* mkdir */
266 { &vop_rmdir_desc
, spec_rmdir
}, /* rmdir */
267 { &vop_symlink_desc
, spec_symlink
}, /* symlink */
268 { &vop_readdir_desc
, spec_readdir
}, /* readdir */
269 { &vop_readlink_desc
, spec_readlink
}, /* readlink */
270 { &vop_abortop_desc
, spec_abortop
}, /* abortop */
271 { &vop_inactive_desc
, lfs_inactive
}, /* inactive */
272 { &vop_reclaim_desc
, lfs_reclaim
}, /* reclaim */
273 { &vop_lock_desc
, ulfs_lock
}, /* lock */
274 { &vop_unlock_desc
, ulfs_unlock
}, /* unlock */
275 { &vop_bmap_desc
, spec_bmap
}, /* bmap */
276 { &vop_strategy_desc
, spec_strategy
}, /* strategy */
277 { &vop_print_desc
, ulfs_print
}, /* print */
278 { &vop_islocked_desc
, ulfs_islocked
}, /* islocked */
279 { &vop_pathconf_desc
, spec_pathconf
}, /* pathconf */
280 { &vop_advlock_desc
, spec_advlock
}, /* advlock */
281 { &vop_bwrite_desc
, vn_bwrite
}, /* bwrite */
282 { &vop_getpages_desc
, spec_getpages
}, /* getpages */
283 { &vop_putpages_desc
, spec_putpages
}, /* putpages */
284 { &vop_openextattr_desc
, lfs_openextattr
}, /* openextattr */
285 { &vop_closeextattr_desc
, lfs_closeextattr
}, /* closeextattr */
286 { &vop_getextattr_desc
, lfs_getextattr
}, /* getextattr */
287 { &vop_setextattr_desc
, lfs_setextattr
}, /* setextattr */
288 { &vop_listextattr_desc
, lfs_listextattr
}, /* listextattr */
289 { &vop_deleteextattr_desc
, lfs_deleteextattr
}, /* deleteextattr */
292 const struct vnodeopv_desc lfs_specop_opv_desc
=
293 { &lfs_specop_p
, lfs_specop_entries
};
295 int (**lfs_fifoop_p
)(void *);
296 const struct vnodeopv_entry_desc lfs_fifoop_entries
[] = {
297 { &vop_default_desc
, vn_default_error
},
298 { &vop_lookup_desc
, vn_fifo_bypass
}, /* lookup */
299 { &vop_create_desc
, vn_fifo_bypass
}, /* create */
300 { &vop_mknod_desc
, vn_fifo_bypass
}, /* mknod */
301 { &vop_open_desc
, vn_fifo_bypass
}, /* open */
302 { &vop_close_desc
, lfsfifo_close
}, /* close */
303 { &vop_access_desc
, ulfs_access
}, /* access */
304 { &vop_getattr_desc
, lfs_getattr
}, /* getattr */
305 { &vop_setattr_desc
, lfs_setattr
}, /* setattr */
306 { &vop_read_desc
, ulfsfifo_read
}, /* read */
307 { &vop_write_desc
, ulfsfifo_write
}, /* write */
308 { &vop_fallocate_desc
, vn_fifo_bypass
}, /* fallocate */
309 { &vop_fdiscard_desc
, vn_fifo_bypass
}, /* fdiscard */
310 { &vop_ioctl_desc
, vn_fifo_bypass
}, /* ioctl */
311 { &vop_fcntl_desc
, ulfs_fcntl
}, /* fcntl */
312 { &vop_poll_desc
, vn_fifo_bypass
}, /* poll */
313 { &vop_kqfilter_desc
, vn_fifo_bypass
}, /* kqfilter */
314 { &vop_revoke_desc
, vn_fifo_bypass
}, /* revoke */
315 { &vop_mmap_desc
, vn_fifo_bypass
}, /* mmap */
316 { &vop_fsync_desc
, vn_fifo_bypass
}, /* fsync */
317 { &vop_seek_desc
, vn_fifo_bypass
}, /* seek */
318 { &vop_remove_desc
, vn_fifo_bypass
}, /* remove */
319 { &vop_link_desc
, vn_fifo_bypass
}, /* link */
320 { &vop_rename_desc
, vn_fifo_bypass
}, /* rename */
321 { &vop_mkdir_desc
, vn_fifo_bypass
}, /* mkdir */
322 { &vop_rmdir_desc
, vn_fifo_bypass
}, /* rmdir */
323 { &vop_symlink_desc
, vn_fifo_bypass
}, /* symlink */
324 { &vop_readdir_desc
, vn_fifo_bypass
}, /* readdir */
325 { &vop_readlink_desc
, vn_fifo_bypass
}, /* readlink */
326 { &vop_abortop_desc
, vn_fifo_bypass
}, /* abortop */
327 { &vop_inactive_desc
, lfs_inactive
}, /* inactive */
328 { &vop_reclaim_desc
, lfs_reclaim
}, /* reclaim */
329 { &vop_lock_desc
, ulfs_lock
}, /* lock */
330 { &vop_unlock_desc
, ulfs_unlock
}, /* unlock */
331 { &vop_bmap_desc
, vn_fifo_bypass
}, /* bmap */
332 { &vop_strategy_desc
, vn_fifo_bypass
}, /* strategy */
333 { &vop_print_desc
, ulfs_print
}, /* print */
334 { &vop_islocked_desc
, ulfs_islocked
}, /* islocked */
335 { &vop_pathconf_desc
, vn_fifo_bypass
}, /* pathconf */
336 { &vop_advlock_desc
, vn_fifo_bypass
}, /* advlock */
337 { &vop_bwrite_desc
, lfs_bwrite
}, /* bwrite */
338 { &vop_putpages_desc
, vn_fifo_bypass
}, /* putpages */
339 { &vop_openextattr_desc
, lfs_openextattr
}, /* openextattr */
340 { &vop_closeextattr_desc
, lfs_closeextattr
}, /* closeextattr */
341 { &vop_getextattr_desc
, lfs_getextattr
}, /* getextattr */
342 { &vop_setextattr_desc
, lfs_setextattr
}, /* setextattr */
343 { &vop_listextattr_desc
, lfs_listextattr
}, /* listextattr */
344 { &vop_deleteextattr_desc
, lfs_deleteextattr
}, /* deleteextattr */
347 const struct vnodeopv_desc lfs_fifoop_opv_desc
=
348 { &lfs_fifoop_p
, lfs_fifoop_entries
};
350 #define LFS_READWRITE
351 #include <ufs/lfs/ulfs_readwrite.c>
355 * Synch an open file.
361 struct vop_fsync_args
/* {
368 struct vnode
*vp
= ap
->a_vp
;
370 struct inode
*ip
= VTOI(vp
);
371 struct lfs
*fs
= ip
->i_lfs
;
373 /* If we're mounted read-only, don't try to sync. */
377 /* If a removed vnode is being cleaned, no need to sync here. */
378 if ((ap
->a_flags
& FSYNC_RECLAIM
) != 0 && ip
->i_mode
== 0)
382 * Trickle sync simply adds this vnode to the pager list, as if
383 * the pagedaemon had requested a pageout.
385 if (ap
->a_flags
& FSYNC_LAZY
) {
386 if (lfs_ignore_lazy_sync
== 0) {
387 mutex_enter(&lfs_lock
);
388 if (!(ip
->i_flags
& IN_PAGING
)) {
389 ip
->i_flags
|= IN_PAGING
;
390 TAILQ_INSERT_TAIL(&fs
->lfs_pchainhd
, ip
,
393 wakeup(&lfs_writer_daemon
);
394 mutex_exit(&lfs_lock
);
400 * If a vnode is bring cleaned, flush it out before we try to
401 * reuse it. This prevents the cleaner from writing files twice
402 * in the same partial segment, causing an accounting underflow.
404 if (ap
->a_flags
& FSYNC_RECLAIM
&& ip
->i_flags
& IN_CLEANING
) {
408 wait
= (ap
->a_flags
& FSYNC_WAIT
);
410 mutex_enter(vp
->v_interlock
);
411 error
= VOP_PUTPAGES(vp
, trunc_page(ap
->a_offlo
),
412 round_page(ap
->a_offhi
),
413 PGO_CLEANIT
| (wait
? PGO_SYNCIO
: 0));
414 if (error
== EAGAIN
) {
415 mutex_enter(&lfs_lock
);
416 mtsleep(&fs
->lfs_availsleep
, PCATCH
| PUSER
,
417 "lfs_fsync", hz
/ 100 + 1, &lfs_lock
);
418 mutex_exit(&lfs_lock
);
420 } while (error
== EAGAIN
);
424 if ((ap
->a_flags
& FSYNC_DATAONLY
) == 0)
425 error
= lfs_update(vp
, NULL
, NULL
, wait
? UPDATE_WAIT
: 0);
427 if (error
== 0 && ap
->a_flags
& FSYNC_CACHE
) {
429 error
= VOP_IOCTL(ip
->i_devvp
, DIOCCACHESYNC
, &l
, FWRITE
,
432 if (wait
&& !VPISEMPTY(vp
))
433 LFS_SET_UINO(ip
, IN_MODIFIED
);
439 * Take IN_ADIROP off, then call ulfs_inactive.
442 lfs_inactive(void *v
)
444 struct vop_inactive_args
/* {
448 lfs_unmark_vnode(ap
->a_vp
);
451 * The Ifile is only ever inactivated on unmount.
452 * Streamline this process by not giving it more dirty blocks.
454 if (VTOI(ap
->a_vp
)->i_number
== LFS_IFILE_INUM
) {
455 mutex_enter(&lfs_lock
);
456 LFS_CLR_UINO(VTOI(ap
->a_vp
), IN_ALLMOD
);
457 mutex_exit(&lfs_lock
);
458 VOP_UNLOCK(ap
->a_vp
);
464 * This might happen on unmount.
465 * XXX If it happens at any other time, it should be a panic.
467 if (ap
->a_vp
->v_uflag
& VU_DIROP
) {
468 struct inode
*ip
= VTOI(ap
->a_vp
);
469 printf("lfs_inactive: inactivating VU_DIROP? ino = %d\n", (int)ip
->i_number
);
471 #endif /* DIAGNOSTIC */
473 return ulfs_inactive(v
);
477 lfs_set_dirop(struct vnode
*dvp
, struct vnode
*vp
)
482 KASSERT(VOP_ISLOCKED(dvp
));
483 KASSERT(vp
== NULL
|| VOP_ISLOCKED(vp
));
485 fs
= VTOI(dvp
)->i_lfs
;
487 ASSERT_NO_SEGLOCK(fs
);
489 * LFS_NRESERVE calculates direct and indirect blocks as well
490 * as an inode block; an overestimate in most cases.
492 if ((error
= lfs_reserve(fs
, dvp
, vp
, LFS_NRESERVE(fs
))) != 0)
496 mutex_enter(&lfs_lock
);
497 if (fs
->lfs_dirops
== 0) {
498 mutex_exit(&lfs_lock
);
499 lfs_check(dvp
, LFS_UNUSED_LBN
, 0);
500 mutex_enter(&lfs_lock
);
502 while (fs
->lfs_writer
) {
503 error
= mtsleep(&fs
->lfs_dirops
, (PRIBIO
+ 1) | PCATCH
,
504 "lfs_sdirop", 0, &lfs_lock
);
505 if (error
== EINTR
) {
506 mutex_exit(&lfs_lock
);
510 if (lfs_dirvcount
> LFS_MAX_DIROP
&& fs
->lfs_dirops
== 0) {
511 wakeup(&lfs_writer_daemon
);
512 mutex_exit(&lfs_lock
);
517 if (lfs_dirvcount
> LFS_MAX_DIROP
) {
518 DLOG((DLOG_DIROP
, "lfs_set_dirop: sleeping with dirops=%d, "
519 "dirvcount=%d\n", fs
->lfs_dirops
, lfs_dirvcount
));
520 if ((error
= mtsleep(&lfs_dirvcount
,
521 PCATCH
| PUSER
| PNORELOCK
, "lfs_maxdirop", 0,
529 /* fs->lfs_doifile = 1; */ /* XXX why? --ks */
530 mutex_exit(&lfs_lock
);
532 /* Hold a reference so SET_ENDOP will be happy */
543 lfs_reserve(fs
, dvp
, vp
, -LFS_NRESERVE(fs
));
548 * Opposite of lfs_set_dirop... mostly. For now at least must call
549 * UNMARK_VNODE(dvp) explicitly first. (XXX: clean that up)
552 lfs_unset_dirop(struct lfs
*fs
, struct vnode
*dvp
, const char *str
)
554 mutex_enter(&lfs_lock
);
556 if (!fs
->lfs_dirops
) {
557 if (fs
->lfs_nadirop
) {
558 panic("lfs_unset_dirop: %s: no dirops but "
562 wakeup(&fs
->lfs_writer
);
563 mutex_exit(&lfs_lock
);
564 lfs_check(dvp
, LFS_UNUSED_LBN
, 0);
566 mutex_exit(&lfs_lock
);
568 lfs_reserve(fs
, dvp
, NULL
, -LFS_NRESERVE(fs
));
572 lfs_mark_vnode(struct vnode
*vp
)
574 struct inode
*ip
= VTOI(vp
);
575 struct lfs
*fs
= ip
->i_lfs
;
577 mutex_enter(&lfs_lock
);
578 if (!(ip
->i_flag
& IN_ADIROP
)) {
579 if (!(vp
->v_uflag
& VU_DIROP
)) {
580 mutex_exit(&lfs_lock
);
582 mutex_enter(&lfs_lock
);
585 TAILQ_INSERT_TAIL(&fs
->lfs_dchainhd
, ip
, i_lfs_dchain
);
586 vp
->v_uflag
|= VU_DIROP
;
589 ip
->i_flag
&= ~IN_CDIROP
;
590 ip
->i_flag
|= IN_ADIROP
;
592 KASSERT(vp
->v_uflag
& VU_DIROP
);
593 mutex_exit(&lfs_lock
);
597 lfs_unmark_vnode(struct vnode
*vp
)
599 struct inode
*ip
= VTOI(vp
);
601 mutex_enter(&lfs_lock
);
602 if (ip
&& (ip
->i_flag
& IN_ADIROP
)) {
603 KASSERT(vp
->v_uflag
& VU_DIROP
);
604 --ip
->i_lfs
->lfs_nadirop
;
605 ip
->i_flag
&= ~IN_ADIROP
;
607 mutex_exit(&lfs_lock
);
613 struct vop_symlink_v3_args
/* {
615 struct vnode **a_vpp;
616 struct componentname *a_cnp;
621 struct vnode
*dvp
, **vpp
;
623 struct ulfs_lookup_results
*ulr
;
624 ssize_t len
; /* XXX should be size_t */
630 KASSERT(vpp
!= NULL
);
631 KASSERT(*vpp
== NULL
);
632 KASSERT(ap
->a_vap
->va_type
== VLNK
);
634 /* XXX should handle this material another way */
635 ulr
= &VTOI(ap
->a_dvp
)->i_crap
;
636 ULFS_CHECK_CRAPCOUNTER(VTOI(ap
->a_dvp
));
638 fs
= VFSTOULFS(dvp
->v_mount
)->um_lfs
;
639 ASSERT_NO_SEGLOCK(fs
);
644 error
= lfs_set_dirop(dvp
, NULL
);
648 fstrans_start(dvp
->v_mount
, FSTRANS_SHARED
);
649 error
= ulfs_makeinode(ap
->a_vap
, dvp
, ulr
, vpp
, ap
->a_cnp
);
654 VN_KNOTE(ap
->a_dvp
, NOTE_WRITE
);
657 len
= strlen(ap
->a_target
);
658 if (len
< ip
->i_lfs
->um_maxsymlinklen
) {
659 memcpy((char *)SHORTLINK(ip
), ap
->a_target
, len
);
661 DIP_ASSIGN(ip
, size
, len
);
662 uvm_vnp_setsize(*vpp
, ip
->i_size
);
663 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
664 if ((*vpp
)->v_mount
->mnt_flag
& MNT_RELATIME
)
665 ip
->i_flag
|= IN_ACCESS
;
667 error
= ulfs_bufio(UIO_WRITE
, *vpp
, ap
->a_target
, len
, (off_t
)0,
668 IO_NODELOCKED
| IO_JOURNALLOCKED
, ap
->a_cnp
->cn_cred
, NULL
,
677 fstrans_done(dvp
->v_mount
);
680 /* XXX: is it even possible for the symlink to get MARK'd? */
685 lfs_unset_dirop(fs
, dvp
, "symlink");
694 struct vop_mknod_v3_args
/* {
696 struct vnode **a_vpp;
697 struct componentname *a_cnp;
701 struct vnode
*dvp
, **vpp
;
706 struct ulfs_lookup_results
*ulr
;
712 KASSERT(vpp
!= NULL
);
713 KASSERT(*vpp
== NULL
);
715 /* XXX should handle this material another way */
716 ulr
= &VTOI(dvp
)->i_crap
;
717 ULFS_CHECK_CRAPCOUNTER(VTOI(dvp
));
719 fs
= VFSTOULFS(dvp
->v_mount
)->um_lfs
;
720 ASSERT_NO_SEGLOCK(fs
);
725 error
= lfs_set_dirop(dvp
, NULL
);
729 fstrans_start(ap
->a_dvp
->v_mount
, FSTRANS_SHARED
);
730 error
= ulfs_makeinode(vap
, dvp
, ulr
, vpp
, ap
->a_cnp
);
732 /* Either way we're done with the dirop at this point */
735 lfs_unset_dirop(fs
, dvp
, "mknod");
737 * XXX this is where this used to be (though inside some evil
738 * macros) but it clearly should be moved further down.
739 * - dholland 20140515
744 fstrans_done(ap
->a_dvp
->v_mount
);
749 VN_KNOTE(dvp
, NOTE_WRITE
);
752 ip
->i_flag
|= IN_ACCESS
| IN_CHANGE
| IN_UPDATE
;
755 * Call fsync to write the vnode so that we don't have to deal with
756 * flushing it when it's marked VU_DIROP or reclaiming.
758 * XXX KS - If we can't flush we also can't call vgone(), so must
759 * return. But, that leaves this vnode in limbo, also not good.
760 * Can this ever happen (barring hardware failure)?
762 if ((error
= VOP_FSYNC(*vpp
, NOCRED
, FSYNC_WAIT
, 0, 0)) != 0) {
763 panic("lfs_mknod: couldn't fsync (ino %llu)",
764 (unsigned long long)ino
);
765 /* return (error); */
768 fstrans_done(ap
->a_dvp
->v_mount
);
775 * Create a regular file
780 struct vop_create_v3_args
/* {
782 struct vnode **a_vpp;
783 struct componentname *a_cnp;
787 struct vnode
*dvp
, **vpp
;
789 struct ulfs_lookup_results
*ulr
;
796 KASSERT(vpp
!= NULL
);
797 KASSERT(*vpp
== NULL
);
799 /* XXX should handle this material another way */
800 ulr
= &VTOI(dvp
)->i_crap
;
801 ULFS_CHECK_CRAPCOUNTER(VTOI(dvp
));
803 fs
= VFSTOULFS(dvp
->v_mount
)->um_lfs
;
804 ASSERT_NO_SEGLOCK(fs
);
809 error
= lfs_set_dirop(dvp
, NULL
);
813 fstrans_start(dvp
->v_mount
, FSTRANS_SHARED
);
814 error
= ulfs_makeinode(vap
, dvp
, ulr
, vpp
, ap
->a_cnp
);
816 fstrans_done(dvp
->v_mount
);
819 fstrans_done(dvp
->v_mount
);
820 VN_KNOTE(dvp
, NOTE_WRITE
);
830 lfs_unset_dirop(fs
, dvp
, "create");
839 struct vop_mkdir_v3_args
/* {
841 struct vnode **a_vpp;
842 struct componentname *a_cnp;
846 struct vnode
*dvp
, *tvp
, **vpp
;
847 struct inode
*dp
, *ip
;
848 struct componentname
*cnp
;
850 struct ulfs_lookup_results
*ulr
;
865 KASSERT(vap
->va_type
== VDIR
);
866 KASSERT(vpp
!= NULL
);
867 KASSERT(*vpp
== NULL
);
869 /* XXX should handle this material another way */
871 ULFS_CHECK_CRAPCOUNTER(dp
);
873 fs
= VFSTOULFS(dvp
->v_mount
)->um_lfs
;
874 ASSERT_NO_SEGLOCK(fs
);
878 dirblksiz
= fs
->um_dirblksiz
;
879 /* XXX dholland 20150911 I believe this to be true, but... */
880 //KASSERT(dirblksiz == LFS_DIRBLKSIZ);
882 error
= lfs_set_dirop(dvp
, NULL
);
886 fstrans_start(dvp
->v_mount
, FSTRANS_SHARED
);
888 if ((nlink_t
)dp
->i_nlink
>= LINK_MAX
) {
894 * Must simulate part of ulfs_makeinode here to acquire the inode,
895 * but not have it entered in the parent directory. The entry is
896 * made later after writing "." and ".." entries.
898 error
= vcache_new(dvp
->v_mount
, dvp
, vap
, cnp
->cn_cred
, ap
->a_vpp
);
902 error
= vn_lock(*ap
->a_vpp
, LK_EXCLUSIVE
);
912 ip
->i_flag
|= IN_ACCESS
| IN_CHANGE
| IN_UPDATE
;
914 DIP_ASSIGN(ip
, nlink
, 2);
915 if (cnp
->cn_flags
& ISWHITEOUT
) {
916 ip
->i_flags
|= UF_OPAQUE
;
917 DIP_ASSIGN(ip
, flags
, ip
->i_flags
);
921 * Bump link count in parent directory to reflect work done below.
924 DIP_ASSIGN(dp
, nlink
, dp
->i_nlink
);
925 dp
->i_flag
|= IN_CHANGE
;
926 if ((error
= lfs_update(dvp
, NULL
, NULL
, UPDATE_DIROP
)) != 0)
930 * Initialize directory with "." and "..". This used to use a
931 * static template but that adds moving parts for very little
934 if ((error
= lfs_balloc(tvp
, (off_t
)0, dirblksiz
, cnp
->cn_cred
,
935 B_CLRBUF
, &bp
)) != 0)
937 ip
->i_size
= dirblksiz
;
938 DIP_ASSIGN(ip
, size
, dirblksiz
);
939 ip
->i_flag
|= IN_ACCESS
| IN_CHANGE
| IN_UPDATE
;
940 uvm_vnp_setsize(tvp
, ip
->i_size
);
944 lfs_dir_setino(fs
, dirp
, ip
->i_number
);
945 lfs_dir_setreclen(fs
, dirp
, LFS_DIRECTSIZ(fs
, 1));
946 lfs_dir_settype(fs
, dirp
, LFS_DT_DIR
);
947 lfs_dir_setnamlen(fs
, dirp
, 1);
948 lfs_copydirname(fs
, lfs_dir_nameptr(fs
, dirp
), ".", 1,
949 LFS_DIRECTSIZ(fs
, 1));
950 dirp
= LFS_NEXTDIR(fs
, dirp
);
952 lfs_dir_setino(fs
, dirp
, dp
->i_number
);
953 lfs_dir_setreclen(fs
, dirp
, dirblksiz
- LFS_DIRECTSIZ(fs
, 1));
954 lfs_dir_settype(fs
, dirp
, LFS_DT_DIR
);
955 lfs_dir_setnamlen(fs
, dirp
, 2);
956 lfs_copydirname(fs
, lfs_dir_nameptr(fs
, dirp
), "..", 2,
957 dirblksiz
- LFS_DIRECTSIZ(fs
, 1));
960 * Directory set up; now install its entry in the parent directory.
962 if ((error
= VOP_BWRITE(bp
->b_vp
, bp
)) != 0)
964 if ((error
= lfs_update(tvp
, NULL
, NULL
, UPDATE_DIROP
)) != 0) {
967 error
= ulfs_direnter(dvp
, ulr
, tvp
,
968 cnp
, ip
->i_number
, LFS_IFTODT(ip
->i_mode
), bp
);
971 VN_KNOTE(dvp
, NOTE_WRITE
| NOTE_LINK
);
975 DIP_ASSIGN(dp
, nlink
, dp
->i_nlink
);
976 dp
->i_flag
|= IN_CHANGE
;
978 * No need to do an explicit lfs_truncate here, vrele will
979 * do this for us because we set the link count to 0.
982 DIP_ASSIGN(ip
, nlink
, 0);
983 ip
->i_flag
|= IN_CHANGE
;
984 /* If IN_ADIROP, account for it */
985 lfs_unmark_vnode(tvp
);
990 fstrans_done(dvp
->v_mount
);
997 lfs_unset_dirop(fs
, dvp
, "mkdir");
1006 struct vop_remove_args
/* {
1007 struct vnode *a_dvp;
1009 struct componentname *a_cnp;
1011 struct vnode
*dvp
, *vp
;
1018 if ((error
= lfs_set_dirop(dvp
, vp
)) != 0) {
1026 error
= ulfs_remove(ap
);
1027 if (ip
->i_nlink
== 0)
1028 lfs_orphan(ip
->i_lfs
, ip
->i_number
);
1032 UNMARK_VNODE(ap
->a_vp
);
1034 lfs_unset_dirop(ip
->i_lfs
, dvp
, "remove");
1046 struct vop_rmdir_args
/* {
1047 struct vnodeop_desc *a_desc;
1048 struct vnode *a_dvp;
1050 struct componentname *a_cnp;
1058 if ((error
= lfs_set_dirop(ap
->a_dvp
, ap
->a_vp
)) != 0) {
1059 if (ap
->a_dvp
== vp
)
1066 error
= ulfs_rmdir(ap
);
1067 if (ip
->i_nlink
== 0)
1068 lfs_orphan(ip
->i_lfs
, ip
->i_number
);
1070 UNMARK_VNODE(ap
->a_dvp
);
1072 UNMARK_VNODE(ap
->a_vp
);
1074 lfs_unset_dirop(ip
->i_lfs
, ap
->a_dvp
, "rmdir");
1086 struct vop_link_v2_args
/* {
1087 struct vnode *a_dvp;
1089 struct componentname *a_cnp;
1097 fs
= VFSTOULFS(dvp
->v_mount
)->um_lfs
;
1098 ASSERT_NO_SEGLOCK(fs
);
1099 if (fs
->lfs_ronly
) {
1103 error
= lfs_set_dirop(dvp
, NULL
);
1108 error
= ulfs_link(ap
);
1111 lfs_unset_dirop(fs
, dvp
, "link");
1117 /* XXX hack to avoid calling ITIMES in getattr */
1119 lfs_getattr(void *v
)
1121 struct vop_getattr_args
/* {
1123 struct vattr *a_vap;
1124 kauth_cred_t a_cred;
1126 struct vnode
*vp
= ap
->a_vp
;
1127 struct inode
*ip
= VTOI(vp
);
1128 struct vattr
*vap
= ap
->a_vap
;
1129 struct lfs
*fs
= ip
->i_lfs
;
1131 fstrans_start(vp
->v_mount
, FSTRANS_SHARED
);
1133 * Copy from inode table
1135 vap
->va_fsid
= ip
->i_dev
;
1136 vap
->va_fileid
= ip
->i_number
;
1137 vap
->va_mode
= ip
->i_mode
& ~LFS_IFMT
;
1138 vap
->va_nlink
= ip
->i_nlink
;
1139 vap
->va_uid
= ip
->i_uid
;
1140 vap
->va_gid
= ip
->i_gid
;
1141 switch (vp
->v_type
) {
1144 vap
->va_rdev
= (dev_t
)lfs_dino_getrdev(fs
, ip
->i_din
);
1147 vap
->va_rdev
= NODEV
;
1150 vap
->va_size
= vp
->v_size
;
1151 vap
->va_atime
.tv_sec
= lfs_dino_getatime(fs
, ip
->i_din
);
1152 vap
->va_atime
.tv_nsec
= lfs_dino_getatimensec(fs
, ip
->i_din
);
1153 vap
->va_mtime
.tv_sec
= lfs_dino_getmtime(fs
, ip
->i_din
);
1154 vap
->va_mtime
.tv_nsec
= lfs_dino_getmtimensec(fs
, ip
->i_din
);
1155 vap
->va_ctime
.tv_sec
= lfs_dino_getctime(fs
, ip
->i_din
);
1156 vap
->va_ctime
.tv_nsec
= lfs_dino_getctimensec(fs
, ip
->i_din
);
1157 vap
->va_flags
= ip
->i_flags
;
1158 vap
->va_gen
= ip
->i_gen
;
1159 /* this doesn't belong here */
1160 if (vp
->v_type
== VBLK
)
1161 vap
->va_blocksize
= BLKDEV_IOSIZE
;
1162 else if (vp
->v_type
== VCHR
)
1163 vap
->va_blocksize
= MAXBSIZE
;
1165 vap
->va_blocksize
= vp
->v_mount
->mnt_stat
.f_iosize
;
1166 vap
->va_bytes
= lfs_fsbtob(fs
, ip
->i_lfs_effnblks
);
1167 vap
->va_type
= vp
->v_type
;
1168 vap
->va_filerev
= ip
->i_modrev
;
1169 fstrans_done(vp
->v_mount
);
1174 * Check to make sure the inode blocks won't choke the buffer
1175 * cache, then call ulfs_setattr as usual.
1178 lfs_setattr(void *v
)
1180 struct vop_setattr_args
/* {
1182 struct vattr *a_vap;
1183 kauth_cred_t a_cred;
1185 struct vnode
*vp
= ap
->a_vp
;
1187 lfs_check(vp
, LFS_UNUSED_LBN
, 0);
1188 return ulfs_setattr(v
);
1192 * Release the block we hold on lfs_newseg wrapping. Called on file close,
1193 * or explicitly from LFCNWRAPGO. Called with the interlock held.
1196 lfs_wrapgo(struct lfs
*fs
, struct inode
*ip
, int waitfor
)
1198 if (fs
->lfs_stoplwp
!= curlwp
)
1201 fs
->lfs_stoplwp
= NULL
;
1202 cv_signal(&fs
->lfs_stopcv
);
1204 KASSERT(fs
->lfs_nowrap
> 0);
1205 if (fs
->lfs_nowrap
<= 0) {
1209 if (--fs
->lfs_nowrap
== 0) {
1210 log(LOG_NOTICE
, "%s: re-enabled log wrap\n",
1211 lfs_sb_getfsmnt(fs
));
1212 wakeup(&fs
->lfs_wrappass
);
1213 lfs_wakeup_cleaner(fs
);
1216 mtsleep(&fs
->lfs_nextsegsleep
, PCATCH
| PUSER
, "segment",
1226 * Update the times on the inode.
1232 struct vop_close_args
/* {
1235 kauth_cred_t a_cred;
1237 struct vnode
*vp
= ap
->a_vp
;
1238 struct inode
*ip
= VTOI(vp
);
1239 struct lfs
*fs
= ip
->i_lfs
;
1241 if ((ip
->i_number
== ULFS_ROOTINO
|| ip
->i_number
== LFS_IFILE_INUM
) &&
1242 fs
->lfs_stoplwp
== curlwp
) {
1243 mutex_enter(&lfs_lock
);
1244 log(LOG_NOTICE
, "lfs_close: releasing log wrap control\n");
1245 lfs_wrapgo(fs
, ip
, 0);
1246 mutex_exit(&lfs_lock
);
1249 if (vp
== ip
->i_lfs
->lfs_ivnode
&&
1250 vp
->v_mount
->mnt_iflag
& IMNT_UNMOUNT
)
1253 fstrans_start(vp
->v_mount
, FSTRANS_SHARED
);
1254 if (vp
->v_usecount
> 1 && vp
!= ip
->i_lfs
->lfs_ivnode
) {
1255 LFS_ITIMES(ip
, NULL
, NULL
, NULL
);
1257 fstrans_done(vp
->v_mount
);
1262 * Close wrapper for special devices.
1264 * Update the times on the inode then do device close.
1267 lfsspec_close(void *v
)
1269 struct vop_close_args
/* {
1272 kauth_cred_t a_cred;
1279 if (vp
->v_usecount
> 1) {
1280 LFS_ITIMES(ip
, NULL
, NULL
, NULL
);
1282 return (VOCALL (spec_vnodeop_p
, VOFFSET(vop_close
), ap
));
1286 * Close wrapper for fifo's.
1288 * Update the times on the inode then do device close.
1291 lfsfifo_close(void *v
)
1293 struct vop_close_args
/* {
1303 if (ap
->a_vp
->v_usecount
> 1) {
1304 LFS_ITIMES(ip
, NULL
, NULL
, NULL
);
1306 return (VOCALL (fifo_vnodeop_p
, VOFFSET(vop_close
), ap
));
1310 * Reclaim an inode so that it can be used for other purposes.
1314 lfs_reclaim(void *v
)
1316 struct vop_reclaim_args
/* {
1319 struct vnode
*vp
= ap
->a_vp
;
1320 struct inode
*ip
= VTOI(vp
);
1321 struct lfs
*fs
= ip
->i_lfs
;
1325 * The inode must be freed and updated before being removed
1326 * from its hash chain. Other threads trying to gain a hold
1327 * or lock on the inode will be stalled.
1329 if (ip
->i_nlink
<= 0 && (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) == 0)
1330 lfs_vfree(vp
, ip
->i_number
, ip
->i_omode
);
1332 mutex_enter(&lfs_lock
);
1333 LFS_CLR_UINO(ip
, IN_ALLMOD
);
1334 mutex_exit(&lfs_lock
);
1335 if ((error
= ulfs_reclaim(vp
)))
1339 * Take us off the paging and/or dirop queues if we were on them.
1340 * We shouldn't be on them.
1342 mutex_enter(&lfs_lock
);
1343 if (ip
->i_flags
& IN_PAGING
) {
1344 log(LOG_WARNING
, "%s: reclaimed vnode is IN_PAGING\n",
1345 lfs_sb_getfsmnt(fs
));
1346 ip
->i_flags
&= ~IN_PAGING
;
1347 TAILQ_REMOVE(&fs
->lfs_pchainhd
, ip
, i_lfs_pchain
);
1349 if (vp
->v_uflag
& VU_DIROP
) {
1350 panic("reclaimed vnode is VU_DIROP");
1351 vp
->v_uflag
&= ~VU_DIROP
;
1352 TAILQ_REMOVE(&fs
->lfs_dchainhd
, ip
, i_lfs_dchain
);
1354 mutex_exit(&lfs_lock
);
1356 pool_put(&lfs_dinode_pool
, ip
->i_din
);
1357 lfs_deregister_all(vp
);
1358 pool_put(&lfs_inoext_pool
, ip
->inode_ext
.lfs
);
1359 ip
->inode_ext
.lfs
= NULL
;
1360 genfs_node_destroy(vp
);
1361 pool_put(&lfs_inode_pool
, vp
->v_data
);
1367 * Read a block from a storage device.
1369 * Calculate the logical to physical mapping if not done already,
1370 * then call the device strategy routine.
1372 * In order to avoid reading blocks that are in the process of being
1373 * written by the cleaner---and hence are not mutexed by the normal
1374 * buffer cache / page cache mechanisms---check for collisions before
1377 * We inline ulfs_strategy to make sure that the VOP_BMAP occurs *before*
1378 * the active cleaner test.
1380 * XXX This code assumes that lfs_markv makes synchronous checkpoints.
1383 lfs_strategy(void *v
)
1385 struct vop_strategy_args
/* {
1395 int i
, sn
, error
, slept
, loopcount
;
1402 /* lfs uses its strategy routine only for read */
1403 KASSERT(bp
->b_flags
& B_READ
);
1405 if (vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
)
1406 panic("lfs_strategy: spec");
1407 KASSERT(bp
->b_bcount
!= 0);
1408 if (bp
->b_blkno
== bp
->b_lblkno
) {
1409 error
= VOP_BMAP(vp
, bp
->b_lblkno
, NULL
, &bp
->b_blkno
,
1412 bp
->b_error
= error
;
1413 bp
->b_resid
= bp
->b_bcount
;
1417 if ((long)bp
->b_blkno
== -1) /* no valid data */
1420 if ((long)bp
->b_blkno
< 0) { /* block is not on disk */
1421 bp
->b_resid
= bp
->b_bcount
;
1428 mutex_enter(&lfs_lock
);
1429 while (slept
&& fs
->lfs_seglock
) {
1430 mutex_exit(&lfs_lock
);
1432 * Look through list of intervals.
1433 * There will only be intervals to look through
1434 * if the cleaner holds the seglock.
1435 * Since the cleaner is synchronous, we can trust
1436 * the list of intervals to be current.
1438 tbn
= LFS_DBTOFSB(fs
, bp
->b_blkno
);
1439 sn
= lfs_dtosn(fs
, tbn
);
1441 for (i
= 0; i
< fs
->lfs_cleanind
; i
++) {
1442 if (sn
== lfs_dtosn(fs
, fs
->lfs_cleanint
[i
]) &&
1443 tbn
>= fs
->lfs_cleanint
[i
]) {
1445 "lfs_strategy: ino %d lbn %" PRId64
1446 " ind %d sn %d fsb %" PRIx64
1447 " given sn %d fsb %" PRIx64
"\n",
1448 ip
->i_number
, bp
->b_lblkno
, i
,
1449 lfs_dtosn(fs
, fs
->lfs_cleanint
[i
]),
1450 fs
->lfs_cleanint
[i
], sn
, tbn
));
1452 "lfs_strategy: sleeping on ino %d lbn %"
1453 PRId64
"\n", ip
->i_number
, bp
->b_lblkno
));
1454 mutex_enter(&lfs_lock
);
1455 if (LFS_SEGLOCK_HELD(fs
) && fs
->lfs_iocount
) {
1457 * Cleaner can't wait for itself.
1458 * Instead, wait for the blocks
1459 * to be written to disk.
1460 * XXX we need pribio in the test
1463 mtsleep(&fs
->lfs_iocount
,
1464 (PRIBIO
+ 1) | PNORELOCK
,
1465 "clean2", hz
/10 + 1,
1470 } else if (fs
->lfs_seglock
) {
1471 mtsleep(&fs
->lfs_seglock
,
1472 (PRIBIO
+ 1) | PNORELOCK
,
1478 mutex_exit(&lfs_lock
);
1481 mutex_enter(&lfs_lock
);
1482 if (loopcount
> MAXLOOP
) {
1483 printf("lfs_strategy: breaking out of clean2 loop\n");
1487 mutex_exit(&lfs_lock
);
1490 return VOP_STRATEGY(vp
, bp
);
1494 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops.
1495 * Technically this is a checkpoint (the on-disk state is valid)
1496 * even though we are leaving out all the file data.
1499 lfs_flush_dirops(struct lfs
*fs
)
1501 struct inode
*ip
, *nip
;
1503 extern int lfs_dostats
; /* XXX this does not belong here */
1509 ASSERT_MAYBE_SEGLOCK(fs
);
1510 KASSERT(fs
->lfs_nadirop
== 0);
1515 mutex_enter(&lfs_lock
);
1516 if (TAILQ_FIRST(&fs
->lfs_dchainhd
) == NULL
) {
1517 mutex_exit(&lfs_lock
);
1520 mutex_exit(&lfs_lock
);
1523 ++lfs_stats
.flush_invoked
;
1526 lfs_seglock(fs
, flags
);
1530 * lfs_writevnodes, optimized to get dirops out of the way.
1531 * Only write dirops, and don't flush files' pages, only
1532 * blocks from the directories.
1534 * We don't need to vref these files because they are
1535 * dirops and so hold an extra reference until the
1536 * segunlock clears them of that status.
1538 * We don't need to check for IN_ADIROP because we know that
1539 * no dirops are active.
1542 mutex_enter(&lfs_lock
);
1543 for (ip
= TAILQ_FIRST(&fs
->lfs_dchainhd
); ip
!= NULL
; ip
= nip
) {
1544 nip
= TAILQ_NEXT(ip
, i_lfs_dchain
);
1545 mutex_exit(&lfs_lock
);
1547 mutex_enter(vp
->v_interlock
);
1549 KASSERT((ip
->i_flag
& IN_ADIROP
) == 0);
1550 KASSERT(vp
->v_uflag
& VU_DIROP
);
1551 KASSERT(vdead_check(vp
, VDEAD_NOWAIT
) == 0);
1554 * All writes to directories come from dirops; all
1555 * writes to files' direct blocks go through the page
1556 * cache, which we're not touching. Reads to files
1557 * and/or directories will not be affected by writing
1558 * directory blocks inodes and file inodes. So we don't
1559 * really need to lock.
1561 if (vdead_check(vp
, VDEAD_NOWAIT
) != 0) {
1562 mutex_exit(vp
->v_interlock
);
1563 mutex_enter(&lfs_lock
);
1566 mutex_exit(vp
->v_interlock
);
1568 * waslocked = VOP_ISLOCKED(vp);
1570 if (vp
->v_type
!= VREG
&&
1571 ((ip
->i_flag
& IN_ALLMOD
) || !VPISEMPTY(vp
))) {
1572 error
= lfs_writefile(fs
, sp
, vp
);
1573 if (!VPISEMPTY(vp
) && !WRITEINPROG(vp
) &&
1574 !(ip
->i_flag
& IN_ALLMOD
)) {
1575 mutex_enter(&lfs_lock
);
1576 LFS_SET_UINO(ip
, IN_MODIFIED
);
1577 mutex_exit(&lfs_lock
);
1579 if (error
&& (sp
->seg_flags
& SEGM_SINGLE
)) {
1580 mutex_enter(&lfs_lock
);
1585 KDASSERT(ip
->i_number
!= LFS_IFILE_INUM
);
1586 error
= lfs_writeinode(fs
, sp
, ip
);
1587 mutex_enter(&lfs_lock
);
1588 if (error
&& (sp
->seg_flags
& SEGM_SINGLE
)) {
1594 * We might need to update these inodes again,
1595 * for example, if they have data blocks to write.
1596 * Make sure that after this flush, they are still
1597 * marked IN_MODIFIED so that we don't forget to
1600 /* XXX only for non-directories? --KS */
1601 LFS_SET_UINO(ip
, IN_MODIFIED
);
1603 mutex_exit(&lfs_lock
);
1604 /* We've written all the dirops there are */
1605 ssp
= (SEGSUM
*)sp
->segsum
;
1606 lfs_ss_setflags(fs
, ssp
, lfs_ss_getflags(fs
, ssp
) & ~(SS_CONT
));
1607 lfs_finalize_fs_seguse(fs
);
1608 (void) lfs_writeseg(fs
, sp
);
1615 * Flush all vnodes for which the pagedaemon has requested pageouts.
1616 * Skip over any files that are marked VU_DIROP (since lfs_flush_dirop()
1617 * has just run, this would be an error). If we have to skip a vnode
1618 * for any reason, just skip it; if we have to wait for the cleaner,
1619 * abort. The writer daemon will call us again later.
1622 lfs_flush_pchain(struct lfs
*fs
)
1624 struct inode
*ip
, *nip
;
1626 extern int lfs_dostats
;
1630 ASSERT_NO_SEGLOCK(fs
);
1635 mutex_enter(&lfs_lock
);
1636 if (TAILQ_FIRST(&fs
->lfs_pchainhd
) == NULL
) {
1637 mutex_exit(&lfs_lock
);
1640 mutex_exit(&lfs_lock
);
1642 /* Get dirops out of the way */
1643 if ((error
= lfs_flush_dirops(fs
)) != 0)
1647 ++lfs_stats
.flush_invoked
;
1650 * Inline lfs_segwrite/lfs_writevnodes, but just for pageouts.
1657 * lfs_writevnodes, optimized to clear pageout requests.
1658 * Only write non-dirop files that are in the pageout queue.
1659 * We're very conservative about what we write; we want to be
1662 mutex_enter(&lfs_lock
);
1664 for (ip
= TAILQ_FIRST(&fs
->lfs_pchainhd
); ip
!= NULL
; ip
= nip
) {
1665 struct mount
*mp
= ITOV(ip
)->v_mount
;
1666 ino_t ino
= ip
->i_number
;
1668 nip
= TAILQ_NEXT(ip
, i_lfs_pchain
);
1670 if (!(ip
->i_flags
& IN_PAGING
))
1673 mutex_exit(&lfs_lock
);
1674 if (vcache_get(mp
, &ino
, sizeof(ino
), &vp
) != 0) {
1675 mutex_enter(&lfs_lock
);
1678 if (vn_lock(vp
, LK_EXCLUSIVE
| LK_NOWAIT
) != 0) {
1680 mutex_enter(&lfs_lock
);
1684 mutex_enter(&lfs_lock
);
1685 if ((vp
->v_uflag
& VU_DIROP
) != 0 || vp
->v_type
!= VREG
||
1686 !(ip
->i_flags
& IN_PAGING
)) {
1687 mutex_exit(&lfs_lock
);
1689 mutex_enter(&lfs_lock
);
1692 mutex_exit(&lfs_lock
);
1694 error
= lfs_writefile(fs
, sp
, vp
);
1695 if (!VPISEMPTY(vp
) && !WRITEINPROG(vp
) &&
1696 !(ip
->i_flag
& IN_ALLMOD
)) {
1697 mutex_enter(&lfs_lock
);
1698 LFS_SET_UINO(ip
, IN_MODIFIED
);
1699 mutex_exit(&lfs_lock
);
1701 KDASSERT(ip
->i_number
!= LFS_IFILE_INUM
);
1702 error2
= lfs_writeinode(fs
, sp
, ip
);
1707 if (error
== EAGAIN
|| error2
== EAGAIN
) {
1708 lfs_writeseg(fs
, sp
);
1709 mutex_enter(&lfs_lock
);
1712 mutex_enter(&lfs_lock
);
1714 mutex_exit(&lfs_lock
);
1715 (void) lfs_writeseg(fs
, sp
);
1722 * Conversion for compat.
1725 block_info_from_70(BLOCK_INFO
*bi
, const BLOCK_INFO_70
*bi70
)
1727 bi
->bi_inode
= bi70
->bi_inode
;
1728 bi
->bi_lbn
= bi70
->bi_lbn
;
1729 bi
->bi_daddr
= bi70
->bi_daddr
;
1730 bi
->bi_segcreate
= bi70
->bi_segcreate
;
1731 bi
->bi_version
= bi70
->bi_version
;
1732 bi
->bi_bp
= bi70
->bi_bp
;
1733 bi
->bi_size
= bi70
->bi_size
;
1737 block_info_to_70(BLOCK_INFO_70
*bi70
, const BLOCK_INFO
*bi
)
1739 bi70
->bi_inode
= bi
->bi_inode
;
1740 bi70
->bi_lbn
= bi
->bi_lbn
;
1741 bi70
->bi_daddr
= bi
->bi_daddr
;
1742 bi70
->bi_segcreate
= bi
->bi_segcreate
;
1743 bi70
->bi_version
= bi
->bi_version
;
1744 bi70
->bi_bp
= bi
->bi_bp
;
1745 bi70
->bi_size
= bi
->bi_size
;
1749 * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}.
1754 struct vop_fcntl_args
/* {
1759 kauth_cred_t a_cred;
1762 struct timeval
*tvp
;
1764 BLOCK_INFO_70
*blkiov70
;
1767 int blkcnt
, i
, error
;
1769 struct lfs_fcntl_markv blkvp
;
1770 struct lfs_fcntl_markv_70 blkvp70
;
1779 /* Only respect LFS fcntls on fs root or Ifile */
1780 if (VTOI(ap
->a_vp
)->i_number
!= ULFS_ROOTINO
&&
1781 VTOI(ap
->a_vp
)->i_number
!= LFS_IFILE_INUM
) {
1782 return ulfs_fcntl(v
);
1785 /* Avoid locking a draining lock */
1786 if (ap
->a_vp
->v_mount
->mnt_iflag
& IMNT_UNMOUNT
) {
1790 /* LFS control and monitoring fcntls are available only to root */
1792 if (((ap
->a_command
& 0xff00) >> 8) == 'L' &&
1793 (error
= kauth_authorize_system(l
->l_cred
, KAUTH_SYSTEM_LFS
,
1794 KAUTH_REQ_SYSTEM_LFS_FCNTL
, NULL
, NULL
, NULL
)) != 0)
1797 fs
= VTOI(ap
->a_vp
)->i_lfs
;
1798 fsidp
= &ap
->a_vp
->v_mount
->mnt_stat
.f_fsidx
;
1801 switch ((int)ap
->a_command
) {
1802 case LFCNSEGWAITALL_COMPAT_50
:
1803 case LFCNSEGWAITALL_COMPAT
:
1806 case LFCNSEGWAIT_COMPAT_50
:
1807 case LFCNSEGWAIT_COMPAT
:
1809 struct timeval50
*tvp50
1810 = (struct timeval50
*)ap
->a_data
;
1811 timeval50_to_timeval(tvp50
, &tv
);
1814 goto segwait_common
;
1815 case LFCNSEGWAITALL
:
1819 tvp
= (struct timeval
*)ap
->a_data
;
1821 mutex_enter(&lfs_lock
);
1823 mutex_exit(&lfs_lock
);
1825 error
= lfs_segwait(fsidp
, tvp
);
1827 mutex_enter(&lfs_lock
);
1828 if (--fs
->lfs_sleepers
== 0)
1829 wakeup(&fs
->lfs_sleepers
);
1830 mutex_exit(&lfs_lock
);
1833 case LFCNBMAPV_COMPAT_70
:
1834 case LFCNMARKV_COMPAT_70
:
1835 blkvp70
= *(struct lfs_fcntl_markv_70
*)ap
->a_data
;
1837 blkcnt
= blkvp70
.blkcnt
;
1838 if ((u_int
) blkcnt
> LFS_MARKV_MAXBLKCNT
)
1840 blkiov
= lfs_malloc(fs
, blkcnt
* sizeof(BLOCK_INFO
), LFS_NB_BLKIOV
);
1841 blkiov70
= lfs_malloc(fs
, sizeof(BLOCK_INFO_70
), LFS_NB_BLKIOV
);
1842 for (i
= 0; i
< blkcnt
; i
++) {
1843 error
= copyin(&blkvp70
.blkiov
[i
], blkiov70
,
1846 lfs_free(fs
, blkiov70
, LFS_NB_BLKIOV
);
1847 lfs_free(fs
, blkiov
, LFS_NB_BLKIOV
);
1850 block_info_from_70(&blkiov
[i
], blkiov70
);
1853 mutex_enter(&lfs_lock
);
1855 mutex_exit(&lfs_lock
);
1856 if (ap
->a_command
== LFCNBMAPV
)
1857 error
= lfs_bmapv(l
, fsidp
, blkiov
, blkcnt
);
1858 else /* LFCNMARKV */
1859 error
= lfs_markv(l
, fsidp
, blkiov
, blkcnt
);
1861 for (i
= 0; i
< blkcnt
; i
++) {
1862 block_info_to_70(blkiov70
, &blkiov
[i
]);
1863 error
= copyout(blkiov70
, &blkvp70
.blkiov
[i
],
1870 mutex_enter(&lfs_lock
);
1871 if (--fs
->lfs_sleepers
== 0)
1872 wakeup(&fs
->lfs_sleepers
);
1873 mutex_exit(&lfs_lock
);
1874 lfs_free(fs
, blkiov
, LFS_NB_BLKIOV
);
1879 blkvp
= *(struct lfs_fcntl_markv
*)ap
->a_data
;
1881 blkcnt
= blkvp
.blkcnt
;
1882 if ((u_int
) blkcnt
> LFS_MARKV_MAXBLKCNT
)
1884 blkiov
= lfs_malloc(fs
, blkcnt
* sizeof(BLOCK_INFO
), LFS_NB_BLKIOV
);
1885 if ((error
= copyin(blkvp
.blkiov
, blkiov
,
1886 blkcnt
* sizeof(BLOCK_INFO
))) != 0) {
1887 lfs_free(fs
, blkiov
, LFS_NB_BLKIOV
);
1891 mutex_enter(&lfs_lock
);
1893 mutex_exit(&lfs_lock
);
1894 if (ap
->a_command
== LFCNBMAPV
)
1895 error
= lfs_bmapv(l
, fsidp
, blkiov
, blkcnt
);
1896 else /* LFCNMARKV */
1897 error
= lfs_markv(l
, fsidp
, blkiov
, blkcnt
);
1899 error
= copyout(blkiov
, blkvp
.blkiov
,
1900 blkcnt
* sizeof(BLOCK_INFO
));
1901 mutex_enter(&lfs_lock
);
1902 if (--fs
->lfs_sleepers
== 0)
1903 wakeup(&fs
->lfs_sleepers
);
1904 mutex_exit(&lfs_lock
);
1905 lfs_free(fs
, blkiov
, LFS_NB_BLKIOV
);
1910 * Flush dirops and write Ifile, allowing empty segments
1911 * to be immediately reclaimed.
1913 lfs_writer_enter(fs
, "pndirop");
1914 off
= lfs_sb_getoffset(fs
);
1915 lfs_seglock(fs
, SEGM_FORCE_CKP
| SEGM_CKP
);
1916 lfs_flush_dirops(fs
);
1917 LFS_CLEANERINFO(cip
, fs
, bp
);
1918 oclean
= lfs_ci_getclean(fs
, cip
);
1919 LFS_SYNC_CLEANERINFO(cip
, fs
, bp
, 1);
1920 lfs_segwrite(ap
->a_vp
->v_mount
, SEGM_FORCE_CKP
);
1921 fs
->lfs_sp
->seg_flags
|= SEGM_PROT
;
1923 lfs_writer_leave(fs
);
1926 LFS_CLEANERINFO(cip
, fs
, bp
);
1927 DLOG((DLOG_CLEAN
, "lfs_fcntl: reclaim wrote %" PRId64
1928 " blocks, cleaned %" PRId32
" segments (activesb %d)\n",
1929 lfs_sb_getoffset(fs
) - off
,
1930 lfs_ci_getclean(fs
, cip
) - oclean
,
1932 LFS_SYNC_CLEANERINFO(cip
, fs
, bp
, 0);
1940 case LFCNIFILEFH_COMPAT
:
1941 /* Return the filehandle of the Ifile */
1942 if ((error
= kauth_authorize_system(l
->l_cred
,
1943 KAUTH_SYSTEM_FILEHANDLE
, 0, NULL
, NULL
, NULL
)) != 0)
1945 fhp
= (struct fhandle
*)ap
->a_data
;
1946 fhp
->fh_fsid
= *fsidp
;
1947 fh_size
= 16; /* former VFS_MAXFIDSIZ */
1948 return lfs_vptofh(fs
->lfs_ivnode
, &(fhp
->fh_fid
), &fh_size
);
1950 case LFCNIFILEFH_COMPAT2
:
1952 /* Return the filehandle of the Ifile */
1953 fhp
= (struct fhandle
*)ap
->a_data
;
1954 fhp
->fh_fsid
= *fsidp
;
1955 fh_size
= sizeof(struct lfs_fhandle
) -
1956 offsetof(fhandle_t
, fh_fid
);
1957 return lfs_vptofh(fs
->lfs_ivnode
, &(fhp
->fh_fid
), &fh_size
);
1960 /* Move lfs_offset to the lowest-numbered segment */
1961 return lfs_rewind(fs
, *(int *)ap
->a_data
);
1964 /* Mark a segment SEGUSE_INVAL */
1965 LFS_SEGENTRY(sup
, fs
, *(int *)ap
->a_data
, bp
);
1966 if (sup
->su_nbytes
> 0) {
1968 lfs_unset_inval_all(fs
);
1971 sup
->su_flags
|= SEGUSE_INVAL
;
1972 VOP_BWRITE(bp
->b_vp
, bp
);
1976 /* Resize the filesystem */
1977 return lfs_resize_fs(fs
, *(int *)ap
->a_data
);
1980 case LFCNWRAPSTOP_COMPAT
:
1982 * Hold lfs_newseg at segment 0; if requested, sleep until
1983 * the filesystem wraps around. To support external agents
1984 * (dump, fsck-based regression test) that need to look at
1985 * a snapshot of the filesystem, without necessarily
1986 * requiring that all fs activity stops.
1988 if (fs
->lfs_stoplwp
== curlwp
)
1991 mutex_enter(&lfs_lock
);
1992 while (fs
->lfs_stoplwp
!= NULL
)
1993 cv_wait(&fs
->lfs_stopcv
, &lfs_lock
);
1994 fs
->lfs_stoplwp
= curlwp
;
1995 if (fs
->lfs_nowrap
== 0)
1996 log(LOG_NOTICE
, "%s: disabled log wrap\n",
1997 lfs_sb_getfsmnt(fs
));
1999 if (*(int *)ap
->a_data
== 1
2000 || ap
->a_command
== LFCNWRAPSTOP_COMPAT
) {
2001 log(LOG_NOTICE
, "LFCNSTOPWRAP waiting for log wrap\n");
2002 error
= mtsleep(&fs
->lfs_nowrap
, PCATCH
| PUSER
,
2003 "segwrap", 0, &lfs_lock
);
2004 log(LOG_NOTICE
, "LFCNSTOPWRAP done waiting\n");
2006 lfs_wrapgo(fs
, VTOI(ap
->a_vp
), 0);
2009 mutex_exit(&lfs_lock
);
2013 case LFCNWRAPGO_COMPAT
:
2015 * Having done its work, the agent wakes up the writer.
2016 * If the argument is 1, it sleeps until a new segment
2019 mutex_enter(&lfs_lock
);
2020 error
= lfs_wrapgo(fs
, VTOI(ap
->a_vp
),
2021 ap
->a_command
== LFCNWRAPGO_COMPAT
? 1 :
2022 *((int *)ap
->a_data
));
2023 mutex_exit(&lfs_lock
);
2027 if ((VTOI(ap
->a_vp
)->i_lfs_iflags
& LFSI_WRAPWAIT
))
2029 mutex_enter(&lfs_lock
);
2030 if (fs
->lfs_stoplwp
!= curlwp
) {
2031 mutex_exit(&lfs_lock
);
2034 if (fs
->lfs_nowrap
== 0) {
2035 mutex_exit(&lfs_lock
);
2038 fs
->lfs_wrappass
= 1;
2039 wakeup(&fs
->lfs_wrappass
);
2040 /* Wait for the log to wrap, if asked */
2041 if (*(int *)ap
->a_data
) {
2043 VTOI(ap
->a_vp
)->i_lfs_iflags
|= LFSI_WRAPWAIT
;
2044 log(LOG_NOTICE
, "LFCNPASS waiting for log wrap\n");
2045 error
= mtsleep(&fs
->lfs_nowrap
, PCATCH
| PUSER
,
2046 "segwrap", 0, &lfs_lock
);
2047 log(LOG_NOTICE
, "LFCNPASS done waiting\n");
2048 VTOI(ap
->a_vp
)->i_lfs_iflags
&= ~LFSI_WRAPWAIT
;
2051 mutex_exit(&lfs_lock
);
2054 case LFCNWRAPSTATUS
:
2055 mutex_enter(&lfs_lock
);
2056 *(int *)ap
->a_data
= fs
->lfs_wrapstatus
;
2057 mutex_exit(&lfs_lock
);
2061 return ulfs_fcntl(v
);
2067 * Return the last logical file offset that should be written for this file
2068 * if we're doing a write that ends at "size". If writing, we need to know
2069 * about sizes on disk, i.e. fragments if there are any; if reading, we need
2070 * to know about entire blocks.
2073 lfs_gop_size(struct vnode
*vp
, off_t size
, off_t
*eobp
, int flags
)
2075 struct inode
*ip
= VTOI(vp
);
2076 struct lfs
*fs
= ip
->i_lfs
;
2079 olbn
= lfs_lblkno(fs
, ip
->i_size
);
2080 nlbn
= lfs_lblkno(fs
, size
);
2081 if (!(flags
& GOP_SIZE_MEM
) && nlbn
< ULFS_NDADDR
&& olbn
<= nlbn
) {
2082 *eobp
= lfs_fragroundup(fs
, size
);
2084 *eobp
= lfs_blkroundup(fs
, size
);
2089 void lfs_dump_vop(void *);
2092 lfs_dump_vop(void *v
)
2094 struct vop_putpages_args
/* {
2101 struct inode
*ip
= VTOI(ap
->a_vp
);
2102 struct lfs
*fs
= ip
->i_lfs
;
2105 vfs_vnode_print(ap
->a_vp
, 0, printf
);
2107 lfs_dump_dinode(fs
, ip
->i_din
);
2114 struct vop_mmap_args
/* {
2115 const struct vnodeop_desc *a_desc;
2118 kauth_cred_t a_cred;
2121 if (VTOI(ap
->a_vp
)->i_number
== LFS_IFILE_INUM
)
2123 return ulfs_mmap(v
);
2127 lfs_openextattr(void *v
)
2129 struct vop_openextattr_args
/* {
2131 kauth_cred_t a_cred;
2134 struct inode
*ip
= VTOI(ap
->a_vp
);
2135 struct ulfsmount
*ump
= ip
->i_ump
;
2136 //struct lfs *fs = ip->i_lfs;
2138 /* Not supported for ULFS1 file systems. */
2139 if (ump
->um_fstype
== ULFS1
)
2140 return (EOPNOTSUPP
);
2142 /* XXX Not implemented for ULFS2 file systems. */
2143 return (EOPNOTSUPP
);
2147 lfs_closeextattr(void *v
)
2149 struct vop_closeextattr_args
/* {
2152 kauth_cred_t a_cred;
2155 struct inode
*ip
= VTOI(ap
->a_vp
);
2156 struct ulfsmount
*ump
= ip
->i_ump
;
2157 //struct lfs *fs = ip->i_lfs;
2159 /* Not supported for ULFS1 file systems. */
2160 if (ump
->um_fstype
== ULFS1
)
2161 return (EOPNOTSUPP
);
2163 /* XXX Not implemented for ULFS2 file systems. */
2164 return (EOPNOTSUPP
);
2168 lfs_getextattr(void *v
)
2170 struct vop_getextattr_args
/* {
2172 int a_attrnamespace;
2176 kauth_cred_t a_cred;
2179 struct vnode
*vp
= ap
->a_vp
;
2180 struct inode
*ip
= VTOI(vp
);
2181 struct ulfsmount
*ump
= ip
->i_ump
;
2182 //struct lfs *fs = ip->i_lfs;
2185 if (ump
->um_fstype
== ULFS1
) {
2187 fstrans_start(vp
->v_mount
, FSTRANS_SHARED
);
2188 error
= ulfs_getextattr(ap
);
2189 fstrans_done(vp
->v_mount
);
2196 /* XXX Not implemented for ULFS2 file systems. */
2197 return (EOPNOTSUPP
);
2201 lfs_setextattr(void *v
)
2203 struct vop_setextattr_args
/* {
2205 int a_attrnamespace;
2208 kauth_cred_t a_cred;
2211 struct vnode
*vp
= ap
->a_vp
;
2212 struct inode
*ip
= VTOI(vp
);
2213 struct ulfsmount
*ump
= ip
->i_ump
;
2214 //struct lfs *fs = ip->i_lfs;
2217 if (ump
->um_fstype
== ULFS1
) {
2219 fstrans_start(vp
->v_mount
, FSTRANS_SHARED
);
2220 error
= ulfs_setextattr(ap
);
2221 fstrans_done(vp
->v_mount
);
2228 /* XXX Not implemented for ULFS2 file systems. */
2229 return (EOPNOTSUPP
);
2233 lfs_listextattr(void *v
)
2235 struct vop_listextattr_args
/* {
2237 int a_attrnamespace;
2240 kauth_cred_t a_cred;
2243 struct vnode
*vp
= ap
->a_vp
;
2244 struct inode
*ip
= VTOI(vp
);
2245 struct ulfsmount
*ump
= ip
->i_ump
;
2246 //struct lfs *fs = ip->i_lfs;
2249 if (ump
->um_fstype
== ULFS1
) {
2251 fstrans_start(vp
->v_mount
, FSTRANS_SHARED
);
2252 error
= ulfs_listextattr(ap
);
2253 fstrans_done(vp
->v_mount
);
2260 /* XXX Not implemented for ULFS2 file systems. */
2261 return (EOPNOTSUPP
);
2265 lfs_deleteextattr(void *v
)
2267 struct vop_deleteextattr_args
/* {
2269 int a_attrnamespace;
2270 kauth_cred_t a_cred;
2273 struct vnode
*vp
= ap
->a_vp
;
2274 struct inode
*ip
= VTOI(vp
);
2275 struct ulfsmount
*ump
= ip
->i_ump
;
2276 //struct fs *fs = ip->i_lfs;
2279 if (ump
->um_fstype
== ULFS1
) {
2281 fstrans_start(vp
->v_mount
, FSTRANS_SHARED
);
2282 error
= ulfs_deleteextattr(ap
);
2283 fstrans_done(vp
->v_mount
);
2290 /* XXX Not implemented for ULFS2 file systems. */
2291 return (EOPNOTSUPP
);