4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1984, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2015, Joyent, Inc.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
28 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
32 * Portions of this source code were derived from Berkeley 4.3 BSD
33 * under license from the Regents of the University of California.
36 #include <sys/types.h>
37 #include <sys/t_lock.h>
38 #include <sys/ksynch.h>
39 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysmacros.h>
43 #include <sys/resource.h>
44 #include <sys/signal.h>
49 #include <sys/vfs_opreg.h>
50 #include <sys/vnode.h>
54 #include <sys/fcntl.h>
55 #include <sys/flock.h>
56 #include <sys/atomic.h>
62 #include <sys/pathname.h>
63 #include <sys/debug.h>
64 #include <sys/vmsystm.h>
65 #include <sys/cmn_err.h>
66 #include <sys/filio.h>
67 #include <sys/policy.h>
69 #include <sys/fs/ufs_fs.h>
70 #include <sys/fs/ufs_lockfs.h>
71 #include <sys/fs/ufs_filio.h>
72 #include <sys/fs/ufs_inode.h>
73 #include <sys/fs/ufs_fsdir.h>
74 #include <sys/fs/ufs_quota.h>
75 #include <sys/fs/ufs_log.h>
76 #include <sys/fs/ufs_snap.h>
77 #include <sys/fs/ufs_trans.h>
78 #include <sys/fs/ufs_panic.h>
79 #include <sys/fs/ufs_bio.h>
80 #include <sys/dirent.h> /* must be AFTER <sys/fs/fsdir.h>! */
81 #include <sys/errno.h>
82 #include <sys/fssnap_if.h>
83 #include <sys/unistd.h>
84 #include <sys/sunddi.h>
86 #include <sys/filio.h> /* _FIOIO */
93 #include <vm/seg_map.h>
94 #include <vm/seg_vn.h>
95 #include <vm/seg_kmem.h>
99 #include <fs/fs_subr.h>
101 #include <sys/fs/decomp.h>
103 static struct instats ins
;
105 static int ufs_getpage_ra(struct vnode
*, u_offset_t
, struct seg
*, caddr_t
);
106 static int ufs_getpage_miss(struct vnode
*, u_offset_t
, size_t, struct seg
*,
107 caddr_t
, struct page
**, size_t, enum seg_rw
, int);
108 static int ufs_open(struct vnode
**, int, struct cred
*, caller_context_t
*);
109 static int ufs_close(struct vnode
*, int, int, offset_t
, struct cred
*,
111 static int ufs_read(struct vnode
*, struct uio
*, int, struct cred
*,
112 struct caller_context
*);
113 static int ufs_write(struct vnode
*, struct uio
*, int, struct cred
*,
114 struct caller_context
*);
115 static int ufs_ioctl(struct vnode
*, int, intptr_t, int, struct cred
*,
116 int *, caller_context_t
*);
117 static int ufs_getattr(struct vnode
*, struct vattr
*, int, struct cred
*,
119 static int ufs_setattr(struct vnode
*, struct vattr
*, int, struct cred
*,
121 static int ufs_access(struct vnode
*, int, int, struct cred
*,
123 static int ufs_lookup(struct vnode
*, char *, struct vnode
**,
124 struct pathname
*, int, struct vnode
*, struct cred
*,
125 caller_context_t
*, int *, pathname_t
*);
126 static int ufs_create(struct vnode
*, char *, struct vattr
*, enum vcexcl
,
127 int, struct vnode
**, struct cred
*, int,
128 caller_context_t
*, vsecattr_t
*);
129 static int ufs_remove(struct vnode
*, char *, struct cred
*,
130 caller_context_t
*, int);
131 static int ufs_link(struct vnode
*, struct vnode
*, char *, struct cred
*,
132 caller_context_t
*, int);
133 static int ufs_rename(struct vnode
*, char *, struct vnode
*, char *,
134 struct cred
*, caller_context_t
*, int);
135 static int ufs_mkdir(struct vnode
*, char *, struct vattr
*, struct vnode
**,
136 struct cred
*, caller_context_t
*, int, vsecattr_t
*);
137 static int ufs_rmdir(struct vnode
*, char *, struct vnode
*, struct cred
*,
138 caller_context_t
*, int);
139 static int ufs_readdir(struct vnode
*, struct uio
*, struct cred
*, int *,
140 caller_context_t
*, int);
141 static int ufs_symlink(struct vnode
*, char *, struct vattr
*, char *,
142 struct cred
*, caller_context_t
*, int);
143 static int ufs_readlink(struct vnode
*, struct uio
*, struct cred
*,
145 static int ufs_fsync(struct vnode
*, int, struct cred
*, caller_context_t
*);
146 static void ufs_inactive(struct vnode
*, struct cred
*, caller_context_t
*);
147 static int ufs_fid(struct vnode
*, struct fid
*, caller_context_t
*);
148 static int ufs_rwlock(struct vnode
*, int, caller_context_t
*);
149 static void ufs_rwunlock(struct vnode
*, int, caller_context_t
*);
150 static int ufs_seek(struct vnode
*, offset_t
, offset_t
*, caller_context_t
*);
151 static int ufs_frlock(struct vnode
*, int, struct flock64
*, int, offset_t
,
152 struct flk_callback
*, struct cred
*,
154 static int ufs_space(struct vnode
*, int, struct flock64
*, int, offset_t
,
155 cred_t
*, caller_context_t
*);
156 static int ufs_getpage(struct vnode
*, offset_t
, size_t, uint_t
*,
157 struct page
**, size_t, struct seg
*, caddr_t
,
158 enum seg_rw
, struct cred
*, caller_context_t
*);
159 static int ufs_putpage(struct vnode
*, offset_t
, size_t, int, struct cred
*,
161 static int ufs_putpages(struct vnode
*, offset_t
, size_t, int, struct cred
*);
162 static int ufs_map(struct vnode
*, offset_t
, struct as
*, caddr_t
*, size_t,
163 uchar_t
, uchar_t
, uint_t
, struct cred
*, caller_context_t
*);
164 static int ufs_addmap(struct vnode
*, offset_t
, struct as
*, caddr_t
, size_t,
165 uchar_t
, uchar_t
, uint_t
, struct cred
*, caller_context_t
*);
166 static int ufs_delmap(struct vnode
*, offset_t
, struct as
*, caddr_t
, size_t,
167 uint_t
, uint_t
, uint_t
, struct cred
*, caller_context_t
*);
168 static int ufs_poll(vnode_t
*, short, int, short *, struct pollhead
**,
170 static int ufs_dump(vnode_t
*, caddr_t
, offset_t
, offset_t
,
172 static int ufs_l_pathconf(struct vnode
*, int, ulong_t
*, struct cred
*,
174 static int ufs_pageio(struct vnode
*, struct page
*, u_offset_t
, size_t, int,
175 struct cred
*, caller_context_t
*);
176 static int ufs_dumpctl(vnode_t
*, int, offset_t
*, caller_context_t
*);
177 static daddr32_t
*save_dblks(struct inode
*, struct ufsvfs
*, daddr32_t
*,
178 daddr32_t
*, int, int);
179 static int ufs_getsecattr(struct vnode
*, vsecattr_t
*, int, struct cred
*,
181 static int ufs_setsecattr(struct vnode
*, vsecattr_t
*, int, struct cred
*,
183 static int ufs_priv_access(void *, int, struct cred
*);
184 static int ufs_eventlookup(struct vnode
*, char *, struct cred
*,
186 extern int as_map_locked(struct as
*, caddr_t
, size_t, int ((*)()), void *);
189 * For lockfs: ulockfs begin/end is now inlined in the ufs_xxx functions.
191 * XXX - ULOCKFS in fs_pathconf and ufs_ioctl is not inlined yet.
193 struct vnodeops
*ufs_vnodeops
;
195 /* NOTE: "not blkd" below means that the operation isn't blocked by lockfs */
196 const fs_operation_def_t ufs_vnodeops_template
[] = {
197 VOPNAME_OPEN
, { .vop_open
= ufs_open
}, /* not blkd */
198 VOPNAME_CLOSE
, { .vop_close
= ufs_close
}, /* not blkd */
199 VOPNAME_READ
, { .vop_read
= ufs_read
},
200 VOPNAME_WRITE
, { .vop_write
= ufs_write
},
201 VOPNAME_IOCTL
, { .vop_ioctl
= ufs_ioctl
},
202 VOPNAME_GETATTR
, { .vop_getattr
= ufs_getattr
},
203 VOPNAME_SETATTR
, { .vop_setattr
= ufs_setattr
},
204 VOPNAME_ACCESS
, { .vop_access
= ufs_access
},
205 VOPNAME_LOOKUP
, { .vop_lookup
= ufs_lookup
},
206 VOPNAME_CREATE
, { .vop_create
= ufs_create
},
207 VOPNAME_REMOVE
, { .vop_remove
= ufs_remove
},
208 VOPNAME_LINK
, { .vop_link
= ufs_link
},
209 VOPNAME_RENAME
, { .vop_rename
= ufs_rename
},
210 VOPNAME_MKDIR
, { .vop_mkdir
= ufs_mkdir
},
211 VOPNAME_RMDIR
, { .vop_rmdir
= ufs_rmdir
},
212 VOPNAME_READDIR
, { .vop_readdir
= ufs_readdir
},
213 VOPNAME_SYMLINK
, { .vop_symlink
= ufs_symlink
},
214 VOPNAME_READLINK
, { .vop_readlink
= ufs_readlink
},
215 VOPNAME_FSYNC
, { .vop_fsync
= ufs_fsync
},
216 VOPNAME_INACTIVE
, { .vop_inactive
= ufs_inactive
}, /* not blkd */
217 VOPNAME_FID
, { .vop_fid
= ufs_fid
},
218 VOPNAME_RWLOCK
, { .vop_rwlock
= ufs_rwlock
}, /* not blkd */
219 VOPNAME_RWUNLOCK
, { .vop_rwunlock
= ufs_rwunlock
}, /* not blkd */
220 VOPNAME_SEEK
, { .vop_seek
= ufs_seek
},
221 VOPNAME_FRLOCK
, { .vop_frlock
= ufs_frlock
},
222 VOPNAME_SPACE
, { .vop_space
= ufs_space
},
223 VOPNAME_GETPAGE
, { .vop_getpage
= ufs_getpage
},
224 VOPNAME_PUTPAGE
, { .vop_putpage
= ufs_putpage
},
225 VOPNAME_MAP
, { .vop_map
= ufs_map
},
226 VOPNAME_ADDMAP
, { .vop_addmap
= ufs_addmap
}, /* not blkd */
227 VOPNAME_DELMAP
, { .vop_delmap
= ufs_delmap
}, /* not blkd */
228 VOPNAME_POLL
, { .vop_poll
= ufs_poll
}, /* not blkd */
229 VOPNAME_DUMP
, { .vop_dump
= ufs_dump
},
230 VOPNAME_PATHCONF
, { .vop_pathconf
= ufs_l_pathconf
},
231 VOPNAME_PAGEIO
, { .vop_pageio
= ufs_pageio
},
232 VOPNAME_DUMPCTL
, { .vop_dumpctl
= ufs_dumpctl
},
233 VOPNAME_GETSECATTR
, { .vop_getsecattr
= ufs_getsecattr
},
234 VOPNAME_SETSECATTR
, { .vop_setsecattr
= ufs_setsecattr
},
235 VOPNAME_VNEVENT
, { .vop_vnevent
= fs_vnevent_support
},
239 #define MAX_BACKFILE_COUNT 9999
242 * Created by ufs_dumpctl() to store a file's disk block info into memory.
243 * Used by ufs_dump() to dump data to disk directly.
246 struct inode
*ip
; /* the file we contain */
247 daddr_t fsbs
; /* number of blocks stored */
248 struct timeval32 time
; /* time stamp for the struct */
249 daddr32_t dblk
[1]; /* place holder for block info */
252 static struct dump
*dump_info
= NULL
;
255 * Previously there was no special action required for ordinary files.
256 * (Devices are handled through the device file system.)
257 * Now we support Large Files and Large File API requires open to
258 * fail if file is large.
259 * We could take care to prevent data corruption
260 * by doing an atomic check of size and truncate if file is opened with
261 * FTRUNC flag set but traditionally this is being done by the vfs/vnode
262 * layers. So taking care of truncation here is a change in the existing
263 * semantics of VOP_OPEN and therefore we chose not to implement any thing
264 * here. The check for the size of the file > 2GB is being done at the
265 * vfs layer in routine vn_open().
270 ufs_open(struct vnode
**vpp
, int flag
, struct cred
*cr
, caller_context_t
*ct
)
277 ufs_close(struct vnode
*vp
, int flag
, int count
, offset_t offset
,
278 struct cred
*cr
, caller_context_t
*ct
)
280 cleanlocks(vp
, ttoproc(curthread
)->p_pid
, 0);
281 cleanshares(vp
, ttoproc(curthread
)->p_pid
);
284 * Push partially filled cluster at last close.
285 * ``last close'' is approximated because the dnlc
286 * may have a hold on the vnode.
287 * Checking for VBAD here will also act as a forced umount check.
289 if (vp
->v_count
<= 2 && vp
->v_type
!= VBAD
) {
290 struct inode
*ip
= VTOI(vp
);
291 if (ip
->i_delaylen
) {
292 ins
.in_poc
.value
.ul
++;
293 (void) ufs_putpages(vp
, ip
->i_delayoff
, ip
->i_delaylen
,
294 B_ASYNC
| B_FREE
, cr
);
304 ufs_read(struct vnode
*vp
, struct uio
*uiop
, int ioflag
, struct cred
*cr
,
305 struct caller_context
*ct
)
307 struct inode
*ip
= VTOI(vp
);
308 struct ufsvfs
*ufsvfsp
;
309 struct ulockfs
*ulp
= NULL
;
313 ASSERT(RW_READ_HELD(&ip
->i_rwlock
));
316 * Mandatory locking needs to be done before ufs_lockfs_begin()
317 * and TRANS_BEGIN_SYNC() calls since mandatory locks can sleep.
319 if (MANDLOCK(vp
, ip
->i_mode
)) {
321 * ufs_getattr ends up being called by chklock
323 error
= chklock(vp
, FREAD
, uiop
->uio_loffset
,
324 uiop
->uio_resid
, uiop
->uio_fmode
, ct
);
329 ufsvfsp
= ip
->i_ufsvfs
;
330 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READ_MASK
);
335 * In the case that a directory is opened for reading as a file
336 * (eg "cat .") with the O_RSYNC, O_SYNC and O_DSYNC flags set.
337 * The locking order had to be changed to avoid a deadlock with
338 * an update taking place on that directory at the same time.
340 if ((ip
->i_mode
& IFMT
) == IFDIR
) {
342 rw_enter(&ip
->i_contents
, RW_READER
);
343 error
= rdip(ip
, uiop
, ioflag
, cr
);
344 rw_exit(&ip
->i_contents
);
352 if (ulp
&& (ioflag
& FRSYNC
) && (ioflag
& (FSYNC
| FDSYNC
)) &&
353 TRANS_ISTRANS(ufsvfsp
)) {
354 rw_exit(&ip
->i_rwlock
);
355 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_READ_SYNC
, TOP_READ_SIZE
,
358 TRANS_END_SYNC(ufsvfsp
, error
, TOP_READ_SYNC
,
360 rw_enter(&ip
->i_rwlock
, RW_READER
);
364 * Only transact reads to files opened for sync-read and
365 * sync-write on a file system that is not write locked.
367 * The ``not write locked'' check prevents problems with
368 * enabling/disabling logging on a busy file system. E.g.,
369 * logging exists at the beginning of the read but does not
373 if (ulp
&& (ioflag
& FRSYNC
) && (ioflag
& (FSYNC
| FDSYNC
)) &&
374 TRANS_ISTRANS(ufsvfsp
)) {
375 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_READ_SYNC
, TOP_READ_SIZE
,
381 rw_enter(&ip
->i_contents
, RW_READER
);
382 error
= rdip(ip
, uiop
, ioflag
, cr
);
383 rw_exit(&ip
->i_contents
);
386 TRANS_END_SYNC(ufsvfsp
, error
, TOP_READ_SYNC
,
399 extern int ufs_HW
; /* high water mark */
400 extern int ufs_LW
; /* low water mark */
401 int ufs_WRITES
= 1; /* XXX - enable/disable */
402 int ufs_throttles
= 0; /* throttling count */
403 int ufs_allow_shared_writes
= 1; /* directio shared writes */
406 ufs_check_rewrite(struct inode
*ip
, struct uio
*uiop
, int ioflag
)
411 * If the FDSYNC flag is set then ignore the global
412 * ufs_allow_shared_writes in this case.
414 shared_write
= (ioflag
& FDSYNC
) | ufs_allow_shared_writes
;
417 * Filter to determine if this request is suitable as a
418 * concurrent rewrite. This write must not allocate blocks
419 * by extending the file or filling in holes. No use trying
420 * through FSYNC descriptors as the inode will be synchronously
421 * updated after the write. The uio structure has not yet been
422 * checked for sanity, so assume nothing.
424 return (((ip
->i_mode
& IFMT
) == IFREG
) && !(ioflag
& FAPPEND
) &&
425 (uiop
->uio_loffset
>= (offset_t
)0) &&
426 (uiop
->uio_loffset
< ip
->i_size
) && (uiop
->uio_resid
> 0) &&
427 ((ip
->i_size
- uiop
->uio_loffset
) >= uiop
->uio_resid
) &&
428 !(ioflag
& FSYNC
) && !bmap_has_holes(ip
) &&
434 ufs_write(struct vnode
*vp
, struct uio
*uiop
, int ioflag
, cred_t
*cr
,
435 caller_context_t
*ct
)
437 struct inode
*ip
= VTOI(vp
);
438 struct ufsvfs
*ufsvfsp
;
441 int error
, resv
, resid
= 0;
445 long start_resid
= uiop
->uio_resid
;
447 ASSERT(RW_LOCK_HELD(&ip
->i_rwlock
));
451 * Mandatory locking needs to be done before ufs_lockfs_begin()
452 * and TRANS_BEGIN_[A]SYNC() calls since mandatory locks can sleep.
453 * Check for forced unmounts normally done in ufs_lockfs_begin().
455 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
) {
459 if (MANDLOCK(vp
, ip
->i_mode
)) {
461 ASSERT(RW_WRITE_HELD(&ip
->i_rwlock
));
464 * ufs_getattr ends up being called by chklock
466 error
= chklock(vp
, FWRITE
, uiop
->uio_loffset
,
467 uiop
->uio_resid
, uiop
->uio_fmode
, ct
);
472 /* i_rwlock can change in chklock */
473 exclusive
= rw_write_held(&ip
->i_rwlock
);
474 rewriteflg
= ufs_check_rewrite(ip
, uiop
, ioflag
);
477 * Check for fast-path special case of directio re-writes.
479 if ((ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) &&
480 !exclusive
&& rewriteflg
) {
482 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_WRITE_MASK
);
486 rw_enter(&ip
->i_contents
, RW_READER
);
487 error
= ufs_directio_write(ip
, uiop
, ioflag
, 1, cr
,
489 if (directio_status
== DIRECTIO_SUCCESS
) {
492 if (start_resid
!= uiop
->uio_resid
)
495 * Special treatment of access times for re-writes.
496 * If IMOD is not already set, then convert it
497 * to IMODACC for this operation. This defers
498 * entering a delta into the log until the inode
499 * is flushed. This mimics what is done for read
500 * operations and inode access time.
502 mutex_enter(&ip
->i_tlock
);
503 i_flag_save
= ip
->i_flag
;
504 ip
->i_flag
|= IUPD
| ICHG
;
507 if ((i_flag_save
& IMOD
) == 0) {
509 ip
->i_flag
|= IMODACC
;
511 mutex_exit(&ip
->i_tlock
);
512 rw_exit(&ip
->i_contents
);
517 rw_exit(&ip
->i_contents
);
522 if (!exclusive
&& !rw_tryupgrade(&ip
->i_rwlock
)) {
523 rw_exit(&ip
->i_rwlock
);
524 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
526 * Mandatory locking could have been enabled
527 * after dropping the i_rwlock.
529 if (MANDLOCK(vp
, ip
->i_mode
))
533 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_WRITE_MASK
);
538 * Amount of log space needed for this write
540 if (!rewriteflg
|| !(ioflag
& FDSYNC
))
541 TRANS_WRITE_RESV(ip
, uiop
, ulp
, &resv
, &resid
);
546 if (ufs_WRITES
&& (ip
->i_writes
> ufs_HW
)) {
547 mutex_enter(&ip
->i_tlock
);
548 while (ip
->i_writes
> ufs_HW
) {
550 cv_wait(&ip
->i_wrcv
, &ip
->i_tlock
);
552 mutex_exit(&ip
->i_tlock
);
558 * If the write is a rewrite there is no need to open a transaction
559 * if the FDSYNC flag is set and not the FSYNC. In this case just
560 * set the IMODACC flag to modify do the update at a later time
561 * thus avoiding the overhead of the logging transaction that is
564 if (ioflag
& (FSYNC
|FDSYNC
)) {
569 rw_enter(&ip
->i_contents
, RW_READER
);
570 mutex_enter(&ip
->i_tlock
);
571 i_flag_save
= ip
->i_flag
;
572 ip
->i_flag
|= IUPD
| ICHG
;
575 if ((i_flag_save
& IMOD
) == 0) {
577 ip
->i_flag
|= IMODACC
;
579 mutex_exit(&ip
->i_tlock
);
580 rw_exit(&ip
->i_contents
);
583 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_WRITE_SYNC
, resv
,
590 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_WRITE
, resv
);
596 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
597 rw_enter(&ip
->i_contents
, RW_WRITER
);
598 if ((ioflag
& FAPPEND
) != 0 && (ip
->i_mode
& IFMT
) == IFREG
) {
600 * In append mode start at end of file.
602 uiop
->uio_loffset
= ip
->i_size
;
606 * Mild optimisation, don't call ufs_trans_write() unless we have to
607 * Also, suppress file system full messages if we will retry.
610 ip
->i_flag
|= IQUIET
;
612 TRANS_WRITE(ip
, uiop
, ioflag
, error
, ulp
, cr
, resv
, resid
);
614 error
= wrip(ip
, uiop
, ioflag
, cr
);
616 ip
->i_flag
&= ~IQUIET
;
618 rw_exit(&ip
->i_contents
);
619 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
625 if (ioflag
& (FSYNC
|FDSYNC
)) {
629 TRANS_END_SYNC(ufsvfsp
, terr
, TOP_WRITE_SYNC
,
635 TRANS_END_ASYNC(ufsvfsp
, TOP_WRITE
, resv
);
640 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
642 * Any blocks tied up in pending deletes?
644 ufs_delete_drain_wait(ufsvfsp
, 1);
649 if (error
== ENOSPC
&& (start_resid
!= uiop
->uio_resid
))
656 * Don't cache write blocks to files with the sticky bit set.
657 * Used to keep swap files from blowing the page cache on a server.
662 * Free behind hacks. The pager is busted.
663 * XXX - need to pass the information down to writedone() in a flag like B_SEQ
664 * or B_FREE_IF_TIGHT_ON_MEMORY.
668 u_offset_t smallfile64
= 32 * 1024;
671 * While we should, in most cases, cache the pages for write, we
672 * may also want to cache the pages for read as long as they are
673 * frequently re-usable.
675 * If cache_read_ahead = 1, the pages for read will go to the tail
676 * of the cache list when they are released, otherwise go to the head.
678 int cache_read_ahead
= 0;
681 * Freebehind exists so that as we read large files sequentially we
682 * don't consume most of memory with pages from a few files. It takes
683 * longer to re-read from disk multiple small files as it does reading
684 * one large one sequentially. As system memory grows customers need
685 * to retain bigger chunks of files in memory. The advent of the
686 * cachelist opens up of the possibility freeing pages to the head or
689 * Not freeing a page is a bet that the page will be read again before
690 * it's segmap slot is needed for something else. If we loose the bet,
691 * it means some other thread is burdened with the page free we did
692 * not do. If we win we save a free and reclaim.
694 * Freeing it at the tail vs the head of cachelist is a bet that the
695 * page will survive until the next read. It's also saying that this
696 * page is more likely to be re-used than a page freed some time ago
697 * and never reclaimed.
699 * Freebehind maintains a range of file offset [smallfile1; smallfile2]
701 * 0 < offset < smallfile1 : pages are not freed.
702 * smallfile1 < offset < smallfile2 : pages freed to tail of cachelist.
703 * smallfile2 < offset : pages freed to head of cachelist.
705 * The range is computed at most once per second and depends on
706 * freemem and ncpus_online. Both parameters are bounded to be
707 * >= smallfile && >= smallfile64.
709 * smallfile1 = (free memory / ncpu) / 1000
710 * smallfile2 = (free memory / ncpu) / 10
712 * A few examples values:
714 * Free Mem (in Bytes) [smallfile1; smallfile2] [smallfile1; smallfile2]
715 * ncpus_online = 4 ncpus_online = 64
716 * ------------------ ----------------------- -----------------------
717 * 1G [256K; 25M] [32K; 1.5M]
718 * 10G [2.5M; 250M] [156K; 15M]
719 * 100G [25M; 2.5G] [1.5M; 150M]
723 #define SMALLFILE1_D 1000
724 #define SMALLFILE2_D 10
725 static u_offset_t smallfile1
= 32 * 1024;
726 static u_offset_t smallfile2
= 32 * 1024;
727 static clock_t smallfile_update
= 0; /* lbolt value of when to recompute */
728 uint_t smallfile1_d
= SMALLFILE1_D
;
729 uint_t smallfile2_d
= SMALLFILE2_D
;
732 * wrip does the real work of write requests for ufs.
735 wrip(struct inode
*ip
, struct uio
*uio
, int ioflag
, struct cred
*cr
)
737 rlim64_t limit
= uio
->uio_llimit
;
739 u_offset_t old_i_size
;
742 struct ufsvfs
*ufsvfsp
;
744 long start_resid
= uio
->uio_resid
; /* save starting resid */
745 long premove_resid
; /* resid before uiomove() */
748 int iupdat_flag
, directio_status
;
750 int error
, pagecreate
;
751 int do_dqrwlock
; /* drop/reacquire vfs_dqrwlock */
756 * ip->i_size is incremented before the uiomove
757 * is done on a write. If the move fails (bad user
758 * address) reset ip->i_size.
759 * The better way would be to increment ip->i_size
760 * only if the uiomove succeeds.
762 int i_size_changed
= 0;
764 int i_seq_needed
= 0;
769 * check for forced unmount - should not happen as
770 * the request passed the lockfs checks.
772 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
777 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
779 /* check for valid filetype */
780 type
= ip
->i_mode
& IFMT
;
781 if ((type
!= IFREG
) && (type
!= IFDIR
) && (type
!= IFATTRDIR
) &&
782 (type
!= IFLNK
) && (type
!= IFSHAD
)) {
787 * the actual limit of UFS file size
790 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
793 if (uio
->uio_loffset
>= limit
) {
794 proc_t
*p
= ttoproc(curthread
);
796 mutex_enter(&p
->p_lock
);
797 (void) rctl_action(rctlproc_legacy
[RLIMIT_FSIZE
], p
->p_rctls
,
798 p
, RCA_UNSAFE_SIGINFO
);
799 mutex_exit(&p
->p_lock
);
804 * if largefiles are disallowed, the limit is
805 * the pre-largefiles value of 2GB
807 if (ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
)
808 limit
= MIN(UFS_MAXOFFSET_T
, limit
);
810 limit
= MIN(MAXOFF32_T
, limit
);
812 if (uio
->uio_loffset
< (offset_t
)0) {
815 if (uio
->uio_resid
== 0) {
819 if (uio
->uio_loffset
>= limit
)
822 ip
->i_flag
|= INOACC
; /* don't update ref time in getpage */
824 if (ioflag
& (FSYNC
|FDSYNC
)) {
831 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) {
832 uio
->uio_llimit
= limit
;
833 error
= ufs_directio_write(ip
, uio
, ioflag
, 0, cr
,
836 * If ufs_directio wrote to the file or set the flags,
837 * we need to update i_seq, but it may be deferred.
839 if (start_resid
!= uio
->uio_resid
||
840 (ip
->i_flag
& (ICHG
|IUPD
))) {
844 if (directio_status
== DIRECTIO_SUCCESS
)
849 * Behavior with respect to dropping/reacquiring vfs_dqrwlock:
851 * o shadow inodes: vfs_dqrwlock is not held at all
852 * o quota updates: vfs_dqrwlock is read or write held
853 * o other updates: vfs_dqrwlock is read held
855 * The first case is the only one where we do not hold
856 * vfs_dqrwlock at all while entering wrip().
857 * We must make sure not to downgrade/drop vfs_dqrwlock if we
858 * have it as writer, i.e. if we are updating the quota inode.
859 * There is no potential deadlock scenario in this case as
860 * ufs_getpage() takes care of this and avoids reacquiring
861 * vfs_dqrwlock in that case.
863 * This check is done here since the above conditions do not change
864 * and we possibly loop below, so save a few cycles.
866 if ((type
== IFSHAD
) ||
867 (rw_owner(&ufsvfsp
->vfs_dqrwlock
) == curthread
)) {
874 * Large Files: We cast MAXBMASK to offset_t
875 * inorder to mask out the higher bits. Since offset_t
876 * is a signed value, the high order bit set in MAXBMASK
877 * value makes it do the right thing by having all bits 1
878 * in the higher word. May be removed for _SOLARIS64_.
883 u_offset_t uoff
= uio
->uio_loffset
;
884 off
= uoff
& (offset_t
)MAXBMASK
;
885 mapon
= (int)(uoff
& (offset_t
)MAXBOFFSET
);
886 on
= (int)blkoff(fs
, uoff
);
887 n
= (int)MIN(fs
->fs_bsize
- on
, uio
->uio_resid
);
890 if (type
== IFREG
&& uoff
+ n
>= limit
) {
896 * since uoff + n >= limit,
897 * therefore n >= limit - uoff, and n is an int
898 * so it is safe to cast it to an int
900 n
= (int)(limit
- (rlim64_t
)uoff
);
902 if (uoff
+ n
> ip
->i_size
) {
904 * We are extending the length of the file.
905 * bmap is used so that we are sure that
906 * if we need to allocate new blocks, that it
907 * is done here before we up the file size.
909 error
= bmap_write(ip
, uoff
, (int)(on
+ n
),
910 mapon
== 0, NULL
, cr
);
912 * bmap_write never drops i_contents so if
913 * the flags are set it changed the file.
915 if (ip
->i_flag
& (ICHG
|IUPD
)) {
922 * There is a window of vulnerability here.
923 * The sequence of operations: allocate file
924 * system blocks, uiomove the data into pages,
925 * and then update the size of the file in the
926 * inode, must happen atomically. However, due
927 * to current locking constraints, this can not
930 ASSERT(ip
->i_writer
== NULL
);
931 ip
->i_writer
= curthread
;
934 * If we are writing from the beginning of
935 * the mapping, we can just create the
936 * pages without having to read them.
938 pagecreate
= (mapon
== 0);
939 } else if (n
== MAXBSIZE
) {
941 * Going to do a whole mappings worth,
942 * so we can just create the pages w/o
943 * having to read them in. But before
944 * we do that, we need to make sure any
945 * needed blocks are allocated first.
947 iblocks
= ip
->i_blocks
;
948 error
= bmap_write(ip
, uoff
, (int)(on
+ n
),
949 BI_ALLOC_ONLY
, NULL
, cr
);
951 * bmap_write never drops i_contents so if
952 * the flags are set it changed the file.
954 if (ip
->i_flag
& (ICHG
|IUPD
)) {
962 * check if the new created page needed the
963 * allocation of new disk blocks.
965 if (iblocks
== ip
->i_blocks
)
966 new_iblocks
= 0; /* no new blocks allocated */
970 * In sync mode flush the indirect blocks which
971 * may have been allocated and not written on
972 * disk. In above cases bmap_write will allocate
975 if (ioflag
& (FSYNC
|FDSYNC
)) {
976 error
= ufs_indirblk_sync(ip
, uoff
);
983 * At this point we can enter ufs_getpage() in one
985 * 1) segmap_getmapflt() calls ufs_getpage() when the
986 * forcefault parameter is true (pagecreate == 0)
987 * 2) uiomove() causes a page fault.
989 * We have to drop the contents lock to prevent the VM
990 * system from trying to reacquire it in ufs_getpage()
991 * should the uiomove cause a pagefault.
993 * We have to drop the reader vfs_dqrwlock here as well.
995 rw_exit(&ip
->i_contents
);
997 ASSERT(RW_LOCK_HELD(&ufsvfsp
->vfs_dqrwlock
));
998 ASSERT(!(RW_WRITE_HELD(&ufsvfsp
->vfs_dqrwlock
)));
999 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
1003 premove_resid
= uio
->uio_resid
;
1006 * Touch the page and fault it in if it is not in core
1007 * before segmap_getmapflt or vpm_data_copy can lock it.
1008 * This is to avoid the deadlock if the buffer is mapped
1009 * to the same file through mmap which we want to write.
1011 uio_prefaultpages((long)n
, uio
);
1015 * Copy data. If new pages are created, part of
1016 * the page that is not written will be initizliazed
1019 error
= vpm_data_copy(vp
, (off
+ mapon
), (uint_t
)n
,
1020 uio
, !pagecreate
, &newpage
, 0, S_WRITE
);
1023 base
= segmap_getmapflt(segkmap
, vp
, (off
+ mapon
),
1024 (uint_t
)n
, !pagecreate
, S_WRITE
);
1027 * segmap_pagecreate() returns 1 if it calls
1028 * page_create_va() to allocate any pages.
1032 newpage
= segmap_pagecreate(segkmap
, base
,
1035 error
= uiomove(base
+ mapon
, (long)n
, UIO_WRITE
, uio
);
1039 * If "newpage" is set, then a new page was created and it
1040 * does not contain valid data, so it needs to be initialized
1042 * Otherwise the page contains old data, which was overwritten
1043 * partially or as a whole in uiomove.
1044 * If there is only one iovec structure within uio, then
1045 * on error uiomove will not be able to update uio->uio_loffset
1046 * and we would zero the whole page here!
1048 * If uiomove fails because of an error, the old valid data
1049 * is kept instead of filling the rest of the page with zero's.
1051 if (!vpm_enable
&& newpage
&&
1052 uio
->uio_loffset
< roundup(off
+ mapon
+ n
, PAGESIZE
)) {
1054 * We created pages w/o initializing them completely,
1055 * thus we need to zero the part that wasn't set up.
1056 * This happens on most EOF write cases and if
1057 * we had some sort of error during the uiomove.
1061 nmoved
= (int)(uio
->uio_loffset
- (off
+ mapon
));
1062 ASSERT(nmoved
>= 0 && nmoved
<= n
);
1063 nzero
= roundup(on
+ n
, PAGESIZE
) - nmoved
;
1064 ASSERT(nzero
> 0 && mapon
+ nmoved
+ nzero
<= MAXBSIZE
);
1065 (void) kzero(base
+ mapon
+ nmoved
, (uint_t
)nzero
);
1069 * Unlock the pages allocated by page_create_va()
1070 * in segmap_pagecreate()
1072 if (!vpm_enable
&& newpage
)
1073 segmap_pageunlock(segkmap
, base
, (size_t)n
, S_WRITE
);
1076 * If the size of the file changed, then update the
1077 * size field in the inode now. This can't be done
1078 * before the call to segmap_pageunlock or there is
1079 * a potential deadlock with callers to ufs_putpage().
1080 * They will be holding i_contents and trying to lock
1081 * a page, while this thread is holding a page locked
1082 * and trying to acquire i_contents.
1084 if (i_size_changed
) {
1085 rw_enter(&ip
->i_contents
, RW_WRITER
);
1086 old_i_size
= ip
->i_size
;
1087 UFS_SET_ISIZE(uoff
+ n
, ip
);
1088 TRANS_INODE(ufsvfsp
, ip
);
1090 * file has grown larger than 2GB. Set flag
1091 * in superblock to indicate this, if it
1092 * is not already set.
1094 if ((ip
->i_size
> MAXOFF32_T
) &&
1095 !(fs
->fs_flags
& FSLARGEFILES
)) {
1096 ASSERT(ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
);
1097 mutex_enter(&ufsvfsp
->vfs_lock
);
1098 fs
->fs_flags
|= FSLARGEFILES
;
1099 ufs_sbwrite(ufsvfsp
);
1100 mutex_exit(&ufsvfsp
->vfs_lock
);
1102 mutex_enter(&ip
->i_tlock
);
1103 ip
->i_writer
= NULL
;
1104 cv_broadcast(&ip
->i_wrcv
);
1105 mutex_exit(&ip
->i_tlock
);
1106 rw_exit(&ip
->i_contents
);
1111 * If we failed on a write, we may have already
1112 * allocated file blocks as well as pages. It's
1113 * hard to undo the block allocation, but we must
1114 * be sure to invalidate any pages that may have
1117 * If the page was created without initialization
1118 * then we must check if it should be possible
1119 * to destroy the new page and to keep the old data
1122 * It is possible to destroy the page without
1123 * having to write back its contents only when
1124 * - the size of the file keeps unchanged
1125 * - bmap_write() did not allocate new disk blocks
1126 * it is possible to create big files using "seek" and
1127 * write to the end of the file. A "write" to a
1128 * position before the end of the file would not
1129 * change the size of the file but it would allocate
1131 * - uiomove intended to overwrite the whole page.
1132 * - a new page was created (newpage == 1).
1135 if (i_size_changed
== 0 && new_iblocks
== 0 &&
1138 /* unwind what uiomove eventually last did */
1139 uio
->uio_resid
= premove_resid
;
1142 * destroy the page, do not write ambiguous
1148 * write the page back to the disk, if dirty,
1149 * and remove the page from the cache.
1158 (void) vpm_sync_pages(vp
, off
, n
, flags
);
1160 (void) segmap_release(segkmap
, base
, flags
);
1165 * Force write back for synchronous write cases.
1167 if ((ioflag
& (FSYNC
|FDSYNC
)) || type
== IFDIR
) {
1169 * If the sticky bit is set but the
1170 * execute bit is not set, we do a
1171 * synchronous write back and free
1172 * the page when done. We set up swap
1173 * files to be handled this way to
1174 * prevent servers from keeping around
1175 * the client's swap pages too long.
1176 * XXX - there ought to be a better way.
1178 if (IS_SWAPVP(vp
)) {
1179 flags
= SM_WRITE
| SM_FREE
|
1185 } else if (n
+ on
== MAXBSIZE
|| IS_SWAPVP(vp
)) {
1187 * Have written a whole block.
1188 * Start an asynchronous write and
1189 * mark the buffer to indicate that
1190 * it won't be needed again soon.
1192 flags
= SM_WRITE
| SM_ASYNC
| SM_DONTNEED
;
1198 error
= vpm_sync_pages(vp
, off
, n
, flags
);
1200 error
= segmap_release(segkmap
, base
, flags
);
1203 * If the operation failed and is synchronous,
1204 * then we need to unwind what uiomove() last
1205 * did so we can potentially return an error to
1206 * the caller. If this write operation was
1207 * done in two pieces and the first succeeded,
1208 * then we won't return an error for the second
1209 * piece that failed. However, we only want to
1210 * return a resid value that reflects what was
1213 * Failures for non-synchronous operations can
1214 * be ignored since the page subsystem will
1215 * retry the operation until it succeeds or the
1216 * file system is unmounted.
1219 if ((ioflag
& (FSYNC
| FDSYNC
)) ||
1221 uio
->uio_resid
= premove_resid
;
1229 * Re-acquire contents lock.
1230 * If it was dropped, reacquire reader vfs_dqrwlock as well.
1233 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
1234 rw_enter(&ip
->i_contents
, RW_WRITER
);
1237 * If the uiomove() failed or if a synchronous
1238 * page push failed, fix up i_size.
1241 if (i_size_changed
) {
1243 * The uiomove failed, and we
1244 * allocated blocks,so get rid
1247 (void) ufs_itrunc(ip
, old_i_size
, 0, cr
);
1251 * XXX - Can this be out of the loop?
1253 ip
->i_flag
|= IUPD
| ICHG
;
1255 * Only do one increase of i_seq for multiple
1256 * pieces. Because we drop locks, record
1257 * the fact that we changed the timestamp and
1258 * are deferring the increase in case another thread
1259 * pushes our timestamp update.
1264 ip
->i_flag
|= IATTCHG
;
1265 if ((ip
->i_mode
& (IEXEC
| (IEXEC
>> 3) |
1266 (IEXEC
>> 6))) != 0 &&
1267 (ip
->i_mode
& (ISUID
| ISGID
)) != 0 &&
1268 secpolicy_vnode_setid_retain(cr
,
1269 (ip
->i_mode
& ISUID
) != 0 && ip
->i_uid
== 0) != 0) {
1271 * Clear Set-UID & Set-GID bits on
1272 * successful write if not privileged
1273 * and at least one of the execute bits
1274 * is set. If we always clear Set-GID,
1275 * mandatory file and record locking is
1278 ip
->i_mode
&= ~(ISUID
| ISGID
);
1282 * In the case the FDSYNC flag is set and this is a
1283 * "rewrite" we won't log a delta.
1284 * The FSYNC flag overrides all cases.
1286 if (!ufs_check_rewrite(ip
, uio
, ioflag
) || !(ioflag
& FDSYNC
)) {
1287 TRANS_INODE(ufsvfsp
, ip
);
1289 } while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
1293 * Make sure i_seq is increased at least once per write
1297 ip
->i_flag
&= ~ISEQ
; /* no longer deferred */
1301 * Inode is updated according to this table -
1303 * FSYNC FDSYNC(posix.4)
1304 * --------------------------
1305 * always@ IATTCHG|IBDWRITE
1307 * @ - If we are doing synchronous write the only time we should
1308 * not be sync'ing the ip here is if we have the stickyhack
1309 * activated, the file is marked with the sticky bit and
1310 * no exec bit, the file length has not been changed and
1311 * no new blocks have been allocated during this write.
1314 if ((ip
->i_flag
& ISYNC
) != 0) {
1316 * we have eliminated nosync
1318 if ((ip
->i_flag
& (IATTCHG
|IBDWRITE
)) ||
1319 ((ioflag
& FSYNC
) && iupdat_flag
)) {
1325 * If we've already done a partial-write, terminate
1326 * the write but return no error unless the error is ENOSPC
1327 * because the caller can detect this and free resources and
1330 if ((start_resid
!= uio
->uio_resid
) && (error
!= ENOSPC
))
1333 ip
->i_flag
&= ~(INOACC
| ISYNC
);
1339 * rdip does the real work of read requests for ufs.
1342 rdip(struct inode
*ip
, struct uio
*uio
, int ioflag
, cred_t
*cr
)
1347 struct ufsvfs
*ufsvfsp
;
1349 long oresid
= uio
->uio_resid
;
1350 u_offset_t n
, on
, mapon
;
1354 int dofree
, directio_status
;
1361 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
1363 ufsvfsp
= ip
->i_ufsvfs
;
1365 if (ufsvfsp
== NULL
)
1368 fs
= ufsvfsp
->vfs_fs
;
1370 /* check for valid filetype */
1371 type
= ip
->i_mode
& IFMT
;
1372 if ((type
!= IFREG
) && (type
!= IFDIR
) && (type
!= IFATTRDIR
) &&
1373 (type
!= IFLNK
) && (type
!= IFSHAD
)) {
1377 if (uio
->uio_loffset
> UFS_MAXOFFSET_T
) {
1381 if (uio
->uio_loffset
< (offset_t
)0) {
1384 if (uio
->uio_resid
== 0) {
1388 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) && (fs
->fs_ronly
== 0) &&
1389 (!ufsvfsp
->vfs_noatime
)) {
1390 mutex_enter(&ip
->i_tlock
);
1392 mutex_exit(&ip
->i_tlock
);
1397 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
) {
1398 error
= ufs_directio_read(ip
, uio
, cr
, &directio_status
);
1399 if (directio_status
== DIRECTIO_SUCCESS
)
1403 rwtype
= (rw_write_held(&ip
->i_contents
)?RW_WRITER
:RW_READER
);
1407 u_offset_t uoff
= uio
->uio_loffset
;
1408 off
= uoff
& (offset_t
)MAXBMASK
;
1409 mapon
= (u_offset_t
)(uoff
& (offset_t
)MAXBOFFSET
);
1410 on
= (u_offset_t
)blkoff(fs
, uoff
);
1411 n
= MIN((u_offset_t
)fs
->fs_bsize
- on
,
1412 (u_offset_t
)uio
->uio_resid
);
1414 diff
= ip
->i_size
- uoff
;
1416 if (diff
<= (offset_t
)0) {
1420 if (diff
< (offset_t
)n
)
1424 * We update smallfile2 and smallfile1 at most every second.
1426 now
= ddi_get_lbolt();
1427 if (now
>= smallfile_update
) {
1428 uint64_t percpufreeb
;
1429 if (smallfile1_d
== 0) smallfile1_d
= SMALLFILE1_D
;
1430 if (smallfile2_d
== 0) smallfile2_d
= SMALLFILE2_D
;
1431 percpufreeb
= ptob((uint64_t)freemem
) / ncpus_online
;
1432 smallfile1
= percpufreeb
/ smallfile1_d
;
1433 smallfile2
= percpufreeb
/ smallfile2_d
;
1434 smallfile1
= MAX(smallfile1
, smallfile
);
1435 smallfile1
= MAX(smallfile1
, smallfile64
);
1436 smallfile2
= MAX(smallfile1
, smallfile2
);
1437 smallfile_update
= now
+ hz
;
1440 dofree
= freebehind
&&
1441 ip
->i_nextr
== (off
& PAGEMASK
) && off
> smallfile1
;
1444 * At this point we can enter ufs_getpage() in one of two
1446 * 1) segmap_getmapflt() calls ufs_getpage() when the
1447 * forcefault parameter is true (value of 1 is passed)
1448 * 2) uiomove() causes a page fault.
1450 * We cannot hold onto an i_contents reader lock without
1451 * risking deadlock in ufs_getpage() so drop a reader lock.
1452 * The ufs_getpage() dolock logic already allows for a
1453 * thread holding i_contents as writer to work properly
1454 * so we keep a writer lock.
1456 if (rwtype
== RW_READER
)
1457 rw_exit(&ip
->i_contents
);
1463 error
= vpm_data_copy(vp
, (off
+ mapon
), (uint_t
)n
,
1464 uio
, 1, NULL
, 0, S_READ
);
1466 base
= segmap_getmapflt(segkmap
, vp
, (off
+ mapon
),
1467 (uint_t
)n
, 1, S_READ
);
1468 error
= uiomove(base
+ mapon
, (long)n
, UIO_READ
, uio
);
1474 * If reading sequential we won't need this
1475 * buffer again soon. For offsets in range
1476 * [smallfile1, smallfile2] release the pages
1477 * at the tail of the cache list, larger
1478 * offsets are released at the head.
1481 flags
= SM_FREE
| SM_ASYNC
;
1482 if ((cache_read_ahead
== 0) &&
1484 flags
|= SM_DONTNEED
;
1487 * In POSIX SYNC (FSYNC and FDSYNC) read mode,
1488 * we want to make sure that the page which has
1489 * been read, is written on disk if it is dirty.
1490 * And corresponding indirect blocks should also
1493 if ((ioflag
& FRSYNC
) && (ioflag
& (FSYNC
|FDSYNC
))) {
1498 error
= vpm_sync_pages(vp
, off
, n
, flags
);
1500 error
= segmap_release(segkmap
, base
, flags
);
1504 (void) vpm_sync_pages(vp
, off
, n
, flags
);
1506 (void) segmap_release(segkmap
, base
, flags
);
1510 if (rwtype
== RW_READER
)
1511 rw_enter(&ip
->i_contents
, rwtype
);
1512 } while (error
== 0 && uio
->uio_resid
> 0 && n
!= 0);
1515 * Inode is updated according to this table if FRSYNC is set.
1517 * FSYNC FDSYNC(posix.4)
1518 * --------------------------
1519 * always IATTCHG|IBDWRITE
1522 * The inode is not updated if we're logging and the inode is a
1523 * directory with FRSYNC, FSYNC and FDSYNC flags set.
1525 if (ioflag
& FRSYNC
) {
1526 if (TRANS_ISTRANS(ufsvfsp
) && ((ip
->i_mode
& IFMT
) == IFDIR
)) {
1530 if ((ioflag
& FSYNC
) ||
1531 ((ioflag
& FDSYNC
) &&
1532 (ip
->i_flag
& (IATTCHG
|IBDWRITE
)))) {
1538 * If we've already done a partial read, terminate
1539 * the read but return no error.
1541 if (oresid
!= uio
->uio_resid
)
1557 caller_context_t
*ct
)
1559 struct lockfs lockfs
, lockfs_out
;
1560 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
1561 char *comment
, *original_comment
;
1563 struct ulockfs
*ulp
;
1572 * forcibly unmounted
1574 if (ufsvfsp
== NULL
|| vp
->v_vfsp
== NULL
||
1575 vp
->v_vfsp
->vfs_flag
& VFS_UNMOUNTED
)
1577 fs
= ufsvfsp
->vfs_fs
;
1579 if (cmd
== Q_QUOTACTL
) {
1580 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_QUOTA_MASK
);
1585 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_QUOTA
,
1586 TOP_SETQUOTA_SIZE(fs
));
1589 error
= quotactl(vp
, arg
, flag
, cr
);
1592 TRANS_END_ASYNC(ufsvfsp
, TOP_QUOTA
,
1593 TOP_SETQUOTA_SIZE(fs
));
1594 ufs_lockfs_end(ulp
);
1602 * file system locking
1604 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1607 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1608 if (copyin((caddr_t
)arg
, &lockfs
,
1609 sizeof (struct lockfs
)))
1612 #ifdef _SYSCALL32_IMPL
1614 struct lockfs32 lockfs32
;
1615 /* Translate ILP32 lockfs to LP64 lockfs */
1616 if (copyin((caddr_t
)arg
, &lockfs32
,
1617 sizeof (struct lockfs32
)))
1619 lockfs
.lf_lock
= (ulong_t
)lockfs32
.lf_lock
;
1620 lockfs
.lf_flags
= (ulong_t
)lockfs32
.lf_flags
;
1621 lockfs
.lf_key
= (ulong_t
)lockfs32
.lf_key
;
1622 lockfs
.lf_comlen
= (ulong_t
)lockfs32
.lf_comlen
;
1624 (caddr_t
)(uintptr_t)lockfs32
.lf_comment
;
1626 #endif /* _SYSCALL32_IMPL */
1628 if (lockfs
.lf_comlen
) {
1629 if (lockfs
.lf_comlen
> LOCKFS_MAXCOMMENTLEN
)
1630 return (ENAMETOOLONG
);
1632 kmem_alloc(lockfs
.lf_comlen
, KM_SLEEP
);
1633 if (copyin(lockfs
.lf_comment
, comment
,
1634 lockfs
.lf_comlen
)) {
1635 kmem_free(comment
, lockfs
.lf_comlen
);
1638 original_comment
= lockfs
.lf_comment
;
1639 lockfs
.lf_comment
= comment
;
1641 if ((error
= ufs_fiolfs(vp
, &lockfs
, 0)) == 0) {
1642 lockfs
.lf_comment
= original_comment
;
1644 if ((flag
& DATAMODEL_MASK
) ==
1646 (void) copyout(&lockfs
, (caddr_t
)arg
,
1647 sizeof (struct lockfs
));
1649 #ifdef _SYSCALL32_IMPL
1651 struct lockfs32 lockfs32
;
1652 /* Translate LP64 to ILP32 lockfs */
1654 (uint32_t)lockfs
.lf_lock
;
1656 (uint32_t)lockfs
.lf_flags
;
1658 (uint32_t)lockfs
.lf_key
;
1659 lockfs32
.lf_comlen
=
1660 (uint32_t)lockfs
.lf_comlen
;
1661 lockfs32
.lf_comment
=
1662 (uint32_t)(uintptr_t)
1664 (void) copyout(&lockfs32
, (caddr_t
)arg
,
1665 sizeof (struct lockfs32
));
1667 #endif /* _SYSCALL32_IMPL */
1670 if (lockfs
.lf_comlen
)
1671 kmem_free(comment
, lockfs
.lf_comlen
);
1677 * get file system locking status
1680 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1681 if (copyin((caddr_t
)arg
, &lockfs
,
1682 sizeof (struct lockfs
)))
1685 #ifdef _SYSCALL32_IMPL
1687 struct lockfs32 lockfs32
;
1688 /* Translate ILP32 lockfs to LP64 lockfs */
1689 if (copyin((caddr_t
)arg
, &lockfs32
,
1690 sizeof (struct lockfs32
)))
1692 lockfs
.lf_lock
= (ulong_t
)lockfs32
.lf_lock
;
1693 lockfs
.lf_flags
= (ulong_t
)lockfs32
.lf_flags
;
1694 lockfs
.lf_key
= (ulong_t
)lockfs32
.lf_key
;
1695 lockfs
.lf_comlen
= (ulong_t
)lockfs32
.lf_comlen
;
1697 (caddr_t
)(uintptr_t)lockfs32
.lf_comment
;
1699 #endif /* _SYSCALL32_IMPL */
1701 if (error
= ufs_fiolfss(vp
, &lockfs_out
))
1703 lockfs
.lf_lock
= lockfs_out
.lf_lock
;
1704 lockfs
.lf_key
= lockfs_out
.lf_key
;
1705 lockfs
.lf_flags
= lockfs_out
.lf_flags
;
1706 lockfs
.lf_comlen
= MIN(lockfs
.lf_comlen
,
1707 lockfs_out
.lf_comlen
);
1709 if ((flag
& DATAMODEL_MASK
) == DATAMODEL_NATIVE
) {
1710 if (copyout(&lockfs
, (caddr_t
)arg
,
1711 sizeof (struct lockfs
)))
1714 #ifdef _SYSCALL32_IMPL
1716 /* Translate LP64 to ILP32 lockfs */
1717 struct lockfs32 lockfs32
;
1718 lockfs32
.lf_lock
= (uint32_t)lockfs
.lf_lock
;
1719 lockfs32
.lf_flags
= (uint32_t)lockfs
.lf_flags
;
1720 lockfs32
.lf_key
= (uint32_t)lockfs
.lf_key
;
1721 lockfs32
.lf_comlen
= (uint32_t)lockfs
.lf_comlen
;
1722 lockfs32
.lf_comment
=
1723 (uint32_t)(uintptr_t)lockfs
.lf_comment
;
1724 if (copyout(&lockfs32
, (caddr_t
)arg
,
1725 sizeof (struct lockfs32
)))
1728 #endif /* _SYSCALL32_IMPL */
1730 if (lockfs
.lf_comlen
&&
1731 lockfs
.lf_comment
&& lockfs_out
.lf_comment
)
1732 if (copyout(lockfs_out
.lf_comment
,
1733 lockfs
.lf_comment
, lockfs
.lf_comlen
))
1743 * if mounted w/o atime, return quietly.
1744 * I briefly thought about returning ENOSYS, but
1745 * figured that most apps would consider this fatal
1746 * but the idea is to make this as seamless as poss.
1748 if (ufsvfsp
->vfs_noatime
)
1751 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1752 ULOCKFS_SETATTR_MASK
);
1757 trans_size
= (int)TOP_SETATTR_SIZE(VTOI(vp
));
1758 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
,
1759 TOP_SETATTR
, trans_size
);
1762 error
= ufs_fiosatime(vp
, (struct timeval
*)arg
,
1766 TRANS_END_CSYNC(ufsvfsp
, error
, issync
,
1767 TOP_SETATTR
, trans_size
);
1768 ufs_lockfs_end(ulp
);
1776 return (ufs_fiosdio(vp
, (uint_t
*)arg
, flag
, cr
));
1782 return (ufs_fiogdio(vp
, (uint_t
*)arg
, flag
, cr
));
1788 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1793 error
= ufs_fioio(vp
, (struct fioio
*)arg
, flag
, cr
);
1796 ufs_lockfs_end(ulp
);
1802 * file system flush (push w/invalidate)
1804 if ((caddr_t
)arg
!= NULL
)
1806 return (ufs_fioffs(vp
, NULL
, cr
));
1810 * Contract-private interface for Legato
1811 * Purge this vnode from the DNLC and decide
1812 * if this vnode is busy (*arg == 1) or not
1815 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1817 error
= ufs_fioisbusy(vp
, (int *)arg
, cr
);
1821 return (ufs_fiodirectio(vp
, (int)arg
, cr
));
1825 * Tune the file system (aka setting fs attributes)
1827 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1828 ULOCKFS_SETATTR_MASK
);
1832 error
= ufs_fiotune(vp
, (struct fiotune
*)arg
, cr
);
1835 ufs_lockfs_end(ulp
);
1839 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1841 return (ufs_fiologenable(vp
, (void *)arg
, cr
, flag
));
1843 case _FIOLOGDISABLE
:
1844 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1846 return (ufs_fiologdisable(vp
, (void *)arg
, cr
, flag
));
1849 return (ufs_fioislog(vp
, (void *)arg
, cr
, flag
));
1851 case _FIOSNAPSHOTCREATE_MULTI
:
1853 struct fiosnapcreate_multi fc
, *fcp
;
1856 if (copyin((void *)arg
, &fc
, sizeof (fc
)))
1858 if (fc
.backfilecount
> MAX_BACKFILE_COUNT
)
1860 fcm_size
= sizeof (struct fiosnapcreate_multi
) +
1861 (fc
.backfilecount
- 1) * sizeof (int);
1862 fcp
= (struct fiosnapcreate_multi
*)
1863 kmem_alloc(fcm_size
, KM_SLEEP
);
1864 if (copyin((void *)arg
, fcp
, fcm_size
)) {
1865 kmem_free(fcp
, fcm_size
);
1868 error
= ufs_snap_create(vp
, fcp
, cr
);
1870 * Do copyout even if there is an error because
1871 * the details of error is stored in fcp.
1873 if (copyout(fcp
, (void *)arg
, fcm_size
))
1875 kmem_free(fcp
, fcm_size
);
1879 case _FIOSNAPSHOTDELETE
:
1881 struct fiosnapdelete fc
;
1883 if (copyin((void *)arg
, &fc
, sizeof (fc
)))
1885 error
= ufs_snap_delete(vp
, &fc
, cr
);
1886 if (!error
&& copyout(&fc
, (void *)arg
, sizeof (fc
)))
1891 case _FIOGETSUPERBLOCK
:
1892 if (copyout(fs
, (void *)arg
, SBSIZE
))
1896 case _FIOGETMAXPHYS
:
1897 if (copyout(&maxphys
, (void *)arg
, sizeof (maxphys
)))
1902 * The following 3 ioctls are for TSufs support
1903 * although could potentially be used elsewhere
1905 case _FIO_SET_LUFS_DEBUG
:
1906 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1908 lufs_debug
= (uint32_t)arg
;
1911 case _FIO_SET_LUFS_ERROR
:
1912 if (secpolicy_fs_config(cr
, ufsvfsp
->vfs_vfs
) != 0)
1914 TRANS_SETERROR(ufsvfsp
);
1917 case _FIO_GET_TOP_STATS
:
1919 fio_lufs_stats_t
*ls
;
1920 ml_unit_t
*ul
= ufsvfsp
->vfs_log
;
1922 ls
= kmem_zalloc(sizeof (*ls
), KM_SLEEP
);
1923 ls
->ls_debug
= ul
->un_debug
; /* return debug value */
1924 /* Copy stucture if statistics are being kept */
1925 if (ul
->un_logmap
->mtm_tops
) {
1926 ls
->ls_topstats
= *(ul
->un_logmap
->mtm_tops
);
1929 if (copyout(ls
, (void *)arg
, sizeof (*ls
)))
1931 kmem_free(ls
, sizeof (*ls
));
1935 case _FIO_SEEK_DATA
:
1936 case _FIO_SEEK_HOLE
:
1937 if (ddi_copyin((void *)arg
, &off
, sizeof (off
), flag
))
1939 /* offset paramater is in/out */
1940 error
= ufs_fio_holey(vp
, cmd
, &off
);
1943 if (ddi_copyout(&off
, (void *)arg
, sizeof (off
), flag
))
1947 case _FIO_COMPRESSED
:
1950 * This is a project private ufs ioctl() to mark
1951 * the inode as that belonging to a compressed
1952 * file. This is used to mark individual
1953 * compressed files in a miniroot archive.
1954 * The files compressed in this manner are
1955 * automatically decompressed by the dcfs filesystem
1956 * (via an interception in ufs_lookup - see decompvp())
1957 * which is layered on top of ufs on a system running
1958 * from the archive. See uts/common/fs/dcfs for details.
1959 * This ioctl only marks the file as compressed - the
1960 * actual compression is done by fiocompress (a
1961 * userland utility) which invokes this ioctl().
1963 struct inode
*ip
= VTOI(vp
);
1965 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
1966 ULOCKFS_SETATTR_MASK
);
1971 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_IUPDAT
,
1972 TOP_IUPDAT_SIZE(ip
));
1975 error
= ufs_mark_compressed(vp
);
1978 TRANS_END_ASYNC(ufsvfsp
, TOP_IUPDAT
,
1979 TOP_IUPDAT_SIZE(ip
));
1980 ufs_lockfs_end(ulp
);
1995 ufs_getattr(struct vnode
*vp
, struct vattr
*vap
, int flags
,
1996 struct cred
*cr
, caller_context_t
*ct
)
1998 struct inode
*ip
= VTOI(vp
);
1999 struct ufsvfs
*ufsvfsp
;
2002 if (vap
->va_mask
== AT_SIZE
) {
2004 * for performance, if only the size is requested don't bother
2005 * with anything else.
2007 UFS_GET_ISIZE(&vap
->va_size
, ip
);
2012 * inlined lockfs checks
2014 ufsvfsp
= ip
->i_ufsvfs
;
2015 if ((ufsvfsp
== NULL
) || ULOCKFS_IS_HLOCK(&ufsvfsp
->vfs_ulockfs
)) {
2020 rw_enter(&ip
->i_contents
, RW_READER
);
2022 * Return all the attributes. This should be refined so
2023 * that it only returns what's asked for.
2027 * Copy from inode table.
2029 vap
->va_type
= vp
->v_type
;
2030 vap
->va_mode
= ip
->i_mode
& MODEMASK
;
2032 * If there is an ACL and there is a mask entry, then do the
2033 * extra work that completes the equivalent of an acltomode(3)
2034 * call. According to POSIX P1003.1e, the acl mask should be
2035 * returned in the group permissions field.
2037 * - start with the original permission and mode bits (from above)
2038 * - clear the group owner bits
2039 * - add in the mask bits.
2041 if (ip
->i_ufs_acl
&& ip
->i_ufs_acl
->aclass
.acl_ismask
) {
2042 vap
->va_mode
&= ~((VREAD
| VWRITE
| VEXEC
) >> 3);
2044 (ip
->i_ufs_acl
->aclass
.acl_maskbits
& PERMMASK
) << 3;
2046 vap
->va_uid
= ip
->i_uid
;
2047 vap
->va_gid
= ip
->i_gid
;
2048 vap
->va_fsid
= ip
->i_dev
;
2049 vap
->va_nodeid
= (ino64_t
)ip
->i_number
;
2050 vap
->va_nlink
= ip
->i_nlink
;
2051 vap
->va_size
= ip
->i_size
;
2052 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
2053 vap
->va_rdev
= ip
->i_rdev
;
2055 vap
->va_rdev
= 0; /* not a b/c spec. */
2056 mutex_enter(&ip
->i_tlock
);
2057 ITIMES_NOLOCK(ip
); /* mark correct time in inode */
2058 vap
->va_seq
= ip
->i_seq
;
2059 vap
->va_atime
.tv_sec
= (time_t)ip
->i_atime
.tv_sec
;
2060 vap
->va_atime
.tv_nsec
= ip
->i_atime
.tv_usec
*1000;
2061 vap
->va_mtime
.tv_sec
= (time_t)ip
->i_mtime
.tv_sec
;
2062 vap
->va_mtime
.tv_nsec
= ip
->i_mtime
.tv_usec
*1000;
2063 vap
->va_ctime
.tv_sec
= (time_t)ip
->i_ctime
.tv_sec
;
2064 vap
->va_ctime
.tv_nsec
= ip
->i_ctime
.tv_usec
*1000;
2065 mutex_exit(&ip
->i_tlock
);
2067 switch (ip
->i_mode
& IFMT
) {
2070 vap
->va_blksize
= MAXBSIZE
; /* was BLKDEV_IOSIZE */
2074 vap
->va_blksize
= MAXBSIZE
;
2078 vap
->va_blksize
= ip
->i_fs
->fs_bsize
;
2081 vap
->va_nblocks
= (fsblkcnt64_t
)ip
->i_blocks
;
2082 rw_exit(&ip
->i_contents
);
2090 * Special wrapper to provide a callback for secpolicy_vnode_setattr().
2091 * The i_contents lock is already held by the caller and we need to
2092 * declare the inode as 'void *' argument.
2095 ufs_priv_access(void *vip
, int mode
, struct cred
*cr
)
2097 struct inode
*ip
= vip
;
2099 return (ufs_iaccess(ip
, mode
, cr
, 0));
2109 caller_context_t
*ct
)
2111 struct inode
*ip
= VTOI(vp
);
2112 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
2114 struct ulockfs
*ulp
;
2118 long int mask
= vap
->va_mask
;
2133 * Cannot set these attributes.
2135 if ((mask
& AT_NOSET
) || (mask
& AT_XVATTR
))
2139 * check for forced unmount
2141 if (ufsvfsp
== NULL
)
2144 fs
= ufsvfsp
->vfs_fs
;
2145 if (fs
->fs_ronly
!= 0)
2155 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SETATTR_MASK
);
2160 * Acquire i_rwlock before TRANS_BEGIN_CSYNC() if this is a file.
2161 * This follows the protocol for read()/write().
2163 if (vp
->v_type
!= VDIR
) {
2165 * ufs_tryirwlock uses rw_tryenter and checks for SLOCK to
2166 * avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2167 * possible, retries the operation.
2169 ufs_tryirwlock(&ip
->i_rwlock
, RW_WRITER
, retry_file
);
2172 ufs_lockfs_end(ulp
);
2179 * Truncate file. Must have write permission and not be a directory.
2181 if (mask
& AT_SIZE
) {
2182 rw_enter(&ip
->i_contents
, RW_WRITER
);
2183 if (vp
->v_type
== VDIR
) {
2187 if (error
= ufs_iaccess(ip
, IWRITE
, cr
, 0))
2190 rw_exit(&ip
->i_contents
);
2191 error
= TRANS_ITRUNC(ip
, vap
->va_size
, 0, cr
);
2193 rw_enter(&ip
->i_contents
, RW_WRITER
);
2197 if (error
== 0 && vap
->va_size
)
2198 vnevent_truncate(vp
, ct
);
2202 trans_size
= (int)TOP_SETATTR_SIZE(ip
);
2203 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_SETATTR
, trans_size
);
2208 * Acquire i_rwlock after TRANS_BEGIN_CSYNC() if this is a directory.
2209 * This follows the protocol established by
2210 * ufs_link/create/remove/rename/mkdir/rmdir/symlink.
2212 if (vp
->v_type
== VDIR
) {
2213 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_SETATTR
,
2221 * Grab quota lock if we are changing the file's owner.
2223 if (mask
& AT_UID
) {
2224 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
2227 rw_enter(&ip
->i_contents
, RW_WRITER
);
2229 oldva
.va_mode
= ip
->i_mode
;
2230 oldva
.va_uid
= ip
->i_uid
;
2231 oldva
.va_gid
= ip
->i_gid
;
2233 vap
->va_mask
&= ~AT_SIZE
;
2235 error
= secpolicy_vnode_setattr(cr
, vp
, vap
, &oldva
, flags
,
2236 ufs_priv_access
, ip
);
2240 mask
= vap
->va_mask
;
2243 * Change file access modes.
2245 if (mask
& AT_MODE
) {
2246 ip
->i_mode
= (ip
->i_mode
& IFMT
) | (vap
->va_mode
& ~IFMT
);
2247 TRANS_INODE(ufsvfsp
, ip
);
2250 mutex_enter(&vp
->v_lock
);
2251 if ((ip
->i_mode
& (ISVTX
| IEXEC
| IFDIR
)) == ISVTX
)
2252 vp
->v_flag
|= VSWAPLIKE
;
2254 vp
->v_flag
&= ~VSWAPLIKE
;
2255 mutex_exit(&vp
->v_lock
);
2258 if (mask
& (AT_UID
|AT_GID
)) {
2259 if (mask
& AT_UID
) {
2261 * Don't change ownership of the quota inode.
2263 if (ufsvfsp
->vfs_qinod
== ip
) {
2264 ASSERT(ufsvfsp
->vfs_qflags
& MQ_ENABLED
);
2270 * No real ownership change.
2272 if (ip
->i_uid
== vap
->va_uid
) {
2277 * Remove the blocks and the file, from the old user's
2281 blocks
= ip
->i_blocks
;
2284 (void) chkdq(ip
, -blocks
, /* force */ 1, cr
,
2285 (char **)NULL
, (size_t *)NULL
);
2286 (void) chkiq(ufsvfsp
, /* change */ -1, ip
,
2287 (uid_t
)ip
->i_uid
, /* force */ 1, cr
,
2288 (char **)NULL
, (size_t *)NULL
);
2289 dqrele(ip
->i_dquot
);
2292 ip
->i_uid
= vap
->va_uid
;
2295 * There is a real ownership change.
2299 * Add the blocks and the file to the new
2302 ip
->i_dquot
= getinoquota(ip
);
2303 (void) chkdq(ip
, blocks
, /* force */ 1, cr
,
2305 (void) chkiq(ufsvfsp
, /* change */ 1,
2306 (struct inode
*)NULL
, (uid_t
)ip
->i_uid
,
2307 /* force */ 1, cr
, &errmsg2
, &len2
);
2310 if (mask
& AT_GID
) {
2311 ip
->i_gid
= vap
->va_gid
;
2313 TRANS_INODE(ufsvfsp
, ip
);
2317 * Change file access or modified times.
2319 if (mask
& (AT_ATIME
|AT_MTIME
)) {
2320 /* Check that the time value is within ufs range */
2321 if (((mask
& AT_ATIME
) && TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
2322 ((mask
& AT_MTIME
) && TIMESPEC_OVERFLOW(&vap
->va_mtime
))) {
2328 * if the "noaccess" mount option is set and only atime
2329 * update is requested, do nothing. No error is returned.
2331 if ((ufsvfsp
->vfs_noatime
) &&
2332 ((mask
& (AT_ATIME
|AT_MTIME
)) == AT_ATIME
))
2335 if (mask
& AT_ATIME
) {
2336 ip
->i_atime
.tv_sec
= vap
->va_atime
.tv_sec
;
2337 ip
->i_atime
.tv_usec
= vap
->va_atime
.tv_nsec
/ 1000;
2338 ip
->i_flag
&= ~IACC
;
2340 if (mask
& AT_MTIME
) {
2341 ip
->i_mtime
.tv_sec
= vap
->va_mtime
.tv_sec
;
2342 ip
->i_mtime
.tv_usec
= vap
->va_mtime
.tv_nsec
/ 1000;
2344 if (now
.tv_sec
> TIME32_MAX
) {
2346 * In 2038, ctime sticks forever..
2348 ip
->i_ctime
.tv_sec
= TIME32_MAX
;
2349 ip
->i_ctime
.tv_usec
= 0;
2351 ip
->i_ctime
.tv_sec
= now
.tv_sec
;
2352 ip
->i_ctime
.tv_usec
= now
.tv_nsec
/ 1000;
2354 ip
->i_flag
&= ~(IUPD
|ICHG
);
2355 ip
->i_flag
|= IMODTIME
;
2357 TRANS_INODE(ufsvfsp
, ip
);
2363 * The presence of a shadow inode may indicate an ACL, but does
2364 * not imply an ACL. Future FSD types should be handled here too
2365 * and check for the presence of the attribute-specific data
2366 * before referencing it.
2370 * XXX if ufs_iupdat is changed to sandbagged write fix
2371 * ufs_acl_setattr to push ip to keep acls consistent
2373 * Suppress out of inodes messages if we will retry.
2376 ip
->i_flag
|= IQUIET
;
2377 error
= ufs_acl_setattr(ip
, vap
, cr
);
2378 ip
->i_flag
&= ~IQUIET
;
2383 * Setattr always increases the sequence number
2388 * if nfsd and not logging; push synchronously
2390 if ((curthread
->t_flag
& T_DONTPEND
) && !TRANS_ISTRANS(ufsvfsp
)) {
2396 rw_exit(&ip
->i_contents
);
2398 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
2401 rw_exit(&ip
->i_rwlock
);
2406 TRANS_END_CSYNC(ufsvfsp
, terr
, issync
, TOP_SETATTR
,
2411 ufs_lockfs_end(ulp
);
2415 * If out of inodes or blocks, see if we can free something
2416 * up from the delete queue.
2418 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
2419 ufs_delete_drain_wait(ufsvfsp
, 1);
2421 if (errmsg1
!= NULL
)
2422 kmem_free(errmsg1
, len1
);
2423 if (errmsg2
!= NULL
)
2424 kmem_free(errmsg2
, len2
);
2427 if (errmsg1
!= NULL
) {
2429 kmem_free(errmsg1
, len1
);
2431 if (errmsg2
!= NULL
) {
2433 kmem_free(errmsg2
, len2
);
2440 ufs_access(struct vnode
*vp
, int mode
, int flags
, struct cred
*cr
,
2441 caller_context_t
*ct
)
2443 struct inode
*ip
= VTOI(vp
);
2445 if (ip
->i_ufsvfs
== NULL
)
2449 * The ufs_iaccess function wants to be called with
2450 * mode bits expressed as "ufs specific" bits.
2451 * I.e., VWRITE|VREAD|VEXEC do not make sense to
2452 * ufs_iaccess() but IWRITE|IREAD|IEXEC do.
2453 * But since they're the same we just pass the vnode mode
2454 * bit but just verify that assumption at compile time.
2456 #if IWRITE != VWRITE || IREAD != VREAD || IEXEC != VEXEC
2457 #error "ufs_access needs to map Vmodes to Imodes"
2459 return (ufs_iaccess(ip
, mode
, cr
, 1));
2464 ufs_readlink(struct vnode
*vp
, struct uio
*uiop
, struct cred
*cr
,
2465 caller_context_t
*ct
)
2467 struct inode
*ip
= VTOI(vp
);
2468 struct ufsvfs
*ufsvfsp
;
2469 struct ulockfs
*ulp
;
2473 if (vp
->v_type
!= VLNK
) {
2479 * If the symbolic link is empty there is nothing to read.
2480 * Fast-track these empty symbolic links
2482 if (ip
->i_size
== 0) {
2487 ufsvfsp
= ip
->i_ufsvfs
;
2488 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READLINK_MASK
);
2492 * The ip->i_rwlock protects the data blocks used for FASTSYMLINK
2496 if (ip
->i_flag
& IFASTSYMLNK
) {
2497 rw_enter(&ip
->i_rwlock
, RW_READER
);
2498 rw_enter(&ip
->i_contents
, RW_READER
);
2499 if (ip
->i_flag
& IFASTSYMLNK
) {
2500 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) &&
2501 (ip
->i_fs
->fs_ronly
== 0) &&
2502 (!ufsvfsp
->vfs_noatime
)) {
2503 mutex_enter(&ip
->i_tlock
);
2505 mutex_exit(&ip
->i_tlock
);
2507 error
= uiomove((caddr_t
)&ip
->i_db
[1],
2508 MIN(ip
->i_size
, uiop
->uio_resid
),
2513 rw_exit(&ip
->i_contents
);
2514 rw_exit(&ip
->i_rwlock
);
2517 ssize_t size
; /* number of bytes read */
2518 caddr_t basep
; /* pointer to input data */
2521 struct uio tuio
; /* temp uio struct */
2523 iovec_t tiov
; /* temp iovec struct */
2524 char kbuf
[FSL_SIZE
]; /* buffer to hold fast symlink */
2525 int tflag
= 0; /* flag to indicate temp vars used */
2529 size
= uiop
->uio_resid
;
2530 basep
= uiop
->uio_iov
->iov_base
;
2533 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
2534 rw_enter(&ip
->i_contents
, RW_WRITER
);
2535 if (ip
->i_flag
& IFASTSYMLNK
) {
2536 rw_exit(&ip
->i_contents
);
2537 rw_exit(&ip
->i_rwlock
);
2541 /* can this be a fast symlink and is it a user buffer? */
2542 if (ip
->i_size
<= FSL_SIZE
&&
2543 (uiop
->uio_segflg
== UIO_USERSPACE
||
2544 uiop
->uio_segflg
== UIO_USERISPACE
)) {
2546 bzero(&tuio
, sizeof (struct uio
));
2548 * setup a kernel buffer to read link into. this
2549 * is to fix a race condition where the user buffer
2550 * got corrupted before copying it into the inode.
2553 tiov
.iov_len
= size
;
2554 tiov
.iov_base
= kbuf
;
2555 tuio
.uio_iov
= &tiov
;
2556 tuio
.uio_iovcnt
= 1;
2557 tuio
.uio_offset
= uiop
->uio_offset
;
2558 tuio
.uio_segflg
= UIO_SYSSPACE
;
2559 tuio
.uio_fmode
= uiop
->uio_fmode
;
2560 tuio
.uio_extflg
= uiop
->uio_extflg
;
2561 tuio
.uio_limit
= uiop
->uio_limit
;
2562 tuio
.uio_resid
= size
;
2564 basep
= tuio
.uio_iov
->iov_base
;
2569 error
= rdip(ip
, tuiop
, 0, cr
);
2570 if (!(error
== 0 && ip
->i_number
== ino
&& ip
->i_gen
== igen
)) {
2571 rw_exit(&ip
->i_contents
);
2572 rw_exit(&ip
->i_rwlock
);
2577 size
-= uiop
->uio_resid
;
2579 if ((tflag
== 0 && ip
->i_size
<= FSL_SIZE
&&
2580 ip
->i_size
== size
) || (tflag
== 1 &&
2581 tuio
.uio_resid
== 0)) {
2582 error
= kcopy(basep
, &ip
->i_db
[1], ip
->i_size
);
2584 ip
->i_flag
|= IFASTSYMLNK
;
2588 (void) VOP_PUTPAGE(ITOV(ip
),
2589 (offset_t
)0, PAGESIZE
,
2590 (B_DONTNEED
| B_FREE
| B_FORCE
| B_ASYNC
),
2594 /* error, clear garbage left behind */
2595 for (i
= 1; i
< NDADDR
; i
++)
2597 for (i
= 0; i
< NIADDR
; i
++)
2602 /* now, copy it into the user buffer */
2603 error
= uiomove((caddr_t
)kbuf
,
2604 MIN(size
, uiop
->uio_resid
),
2607 rw_exit(&ip
->i_contents
);
2608 rw_exit(&ip
->i_rwlock
);
2612 ufs_lockfs_end(ulp
);
2620 ufs_fsync(struct vnode
*vp
, int syncflag
, struct cred
*cr
,
2621 caller_context_t
*ct
)
2623 struct inode
*ip
= VTOI(vp
);
2624 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
2625 struct ulockfs
*ulp
;
2628 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_FSYNC_MASK
);
2632 if (TRANS_ISTRANS(ufsvfsp
)) {
2634 * First push out any data pages
2636 if (vn_has_cached_data(vp
) && !(syncflag
& FNODSYNC
) &&
2637 (vp
->v_type
!= VCHR
) && !(IS_SWAPVP(vp
))) {
2638 error
= VOP_PUTPAGE(vp
, (offset_t
)0, (size_t)0,
2645 * Delta any delayed inode times updates
2646 * and push inode to log.
2647 * All other inode deltas will have already been delta'd
2648 * and will be pushed during the commit.
2650 if (!(syncflag
& FDSYNC
) &&
2651 ((ip
->i_flag
& (IMOD
|IMODACC
)) == IMODACC
)) {
2653 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_FSYNC
,
2656 rw_enter(&ip
->i_contents
, RW_READER
);
2657 mutex_enter(&ip
->i_tlock
);
2658 ip
->i_flag
&= ~IMODTIME
;
2659 mutex_exit(&ip
->i_tlock
);
2660 ufs_iupdat(ip
, I_SYNC
);
2661 rw_exit(&ip
->i_contents
);
2663 TRANS_END_ASYNC(ufsvfsp
, TOP_FSYNC
,
2669 * Commit the Moby transaction
2671 * Deltas have already been made so we just need to
2672 * commit them with a synchronous transaction.
2673 * TRANS_BEGIN_SYNC() will return an error
2674 * if there are no deltas to commit, for an
2675 * empty transaction.
2678 TRANS_BEGIN_SYNC(ufsvfsp
, TOP_FSYNC
, TOP_COMMIT_SIZE
,
2681 error
= 0; /* commit wasn't needed */
2684 TRANS_END_SYNC(ufsvfsp
, error
, TOP_FSYNC
,
2687 } else { /* not logging */
2688 if (!(IS_SWAPVP(vp
)))
2689 if (syncflag
& FNODSYNC
) {
2690 /* Just update the inode only */
2691 TRANS_IUPDAT(ip
, 1);
2693 } else if (syncflag
& FDSYNC
)
2694 /* Do data-synchronous writes */
2695 error
= TRANS_SYNCIP(ip
, 0, I_DSYNC
, TOP_FSYNC
);
2697 /* Do synchronous writes */
2698 error
= TRANS_SYNCIP(ip
, 0, I_SYNC
, TOP_FSYNC
);
2700 rw_enter(&ip
->i_contents
, RW_WRITER
);
2702 error
= ufs_sync_indir(ip
);
2703 rw_exit(&ip
->i_contents
);
2707 ufs_lockfs_end(ulp
);
2714 ufs_inactive(struct vnode
*vp
, struct cred
*cr
, caller_context_t
*ct
)
2716 ufs_iinactive(VTOI(vp
));
2720 * Unix file system operations having to do with directory manipulation.
2722 int ufs_lookup_idle_count
= 2; /* Number of inodes to idle each time */
2725 ufs_lookup(struct vnode
*dvp
, char *nm
, struct vnode
**vpp
,
2726 struct pathname
*pnp
, int flags
, struct vnode
*rdir
, struct cred
*cr
,
2727 caller_context_t
*ct
, int *direntflags
, pathname_t
*realpnp
)
2732 struct ufsvfs
*ufsvfsp
;
2733 struct ulockfs
*ulp
;
2738 * Check flags for type of lookup (regular file or attribute file)
2743 if (flags
& LOOKUP_XATTR
) {
2746 * If not mounted with XATTR support then return EINVAL
2749 if (!(ip
->i_ufsvfs
->vfs_vfs
->vfs_flag
& VFS_XATTR
))
2752 * We don't allow recursive attributes...
2753 * Maybe someday we will.
2755 if ((ip
->i_cflags
& IXATTR
)) {
2759 if ((vp
= dnlc_lookup(dvp
, XATTR_DIR_NAME
)) == NULL
) {
2760 error
= ufs_xattr_getattrdir(dvp
, &sip
, flags
, cr
);
2767 dnlc_update(dvp
, XATTR_DIR_NAME
, vp
);
2771 * Check accessibility of directory.
2773 if (vp
== DNLC_NO_VNODE
) {
2778 if ((error
= ufs_iaccess(VTOI(vp
), IEXEC
, cr
, 1)) != 0) {
2788 * Check for a null component, which we should treat as
2789 * looking at dvp from within it's parent, so we don't
2790 * need a call to ufs_iaccess(), as it has already been
2801 * Check for "." ie itself. this is a quick check and
2802 * avoids adding "." into the dnlc (which have been seen
2803 * to occupy >10% of the cache).
2805 if ((nm
[0] == '.') && (nm
[1] == 0)) {
2807 * Don't return without checking accessibility
2808 * of the directory. We only need the lock if
2809 * we are going to return it.
2811 if ((error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) == 0) {
2819 * Fast path: Check the directory name lookup cache.
2821 if (vp
= dnlc_lookup(dvp
, nm
)) {
2823 * Check accessibility of directory.
2825 if ((error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) != 0) {
2829 if (vp
== DNLC_NO_VNODE
) {
2840 * Keep the idle queue from getting too long by
2841 * idling two inodes before attempting to allocate another.
2842 * This operation must be performed before entering
2843 * lockfs or a transaction.
2845 if (ufs_idle_q
.uq_ne
> ufs_idle_q
.uq_hiwat
)
2846 if ((curthread
->t_flag
& T_DONTBLOCK
) == 0) {
2847 ins
.in_lidles
.value
.ul
+= ufs_lookup_idle_count
;
2848 ufs_idle_some(ufs_lookup_idle_count
);
2853 * Check accessibility of directory.
2855 if (error
= ufs_diraccess(ip
, IEXEC
, cr
))
2858 ufsvfsp
= ip
->i_ufsvfs
;
2859 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LOOKUP_MASK
);
2863 error
= ufs_dirlook(ip
, nm
, &xip
, cr
, 1, 0);
2871 * If vnode is a device return special vnode instead.
2873 if (IS_DEVVP(*vpp
)) {
2874 struct vnode
*newvp
;
2876 newvp
= specvp(*vpp
, (*vpp
)->v_rdev
, (*vpp
)->v_type
,
2883 } else if (ip
->i_cflags
& ICOMPRESS
) {
2884 struct vnode
*newvp
;
2887 * Compressed file, substitute dcfs vnode
2889 newvp
= decompvp(*vpp
, cr
, ct
);
2898 ufs_lockfs_end(ulp
);
2901 if (error
== EAGAIN
)
2910 ufs_create(struct vnode
*dvp
, char *name
, struct vattr
*vap
, enum vcexcl excl
,
2911 int mode
, struct vnode
**vpp
, struct cred
*cr
, int flag
,
2912 caller_context_t
*ct
, vsecattr_t
*vsecp
)
2918 struct ufsvfs
*ufsvfsp
;
2919 struct ulockfs
*ulp
;
2925 int defer_dip_seq_update
= 0; /* need to defer update of dip->i_seq */
2931 ufsvfsp
= ip
->i_ufsvfs
;
2934 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_CREATE_MASK
);
2939 trans_size
= (int)TOP_CREATE_SIZE(ip
);
2940 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_CREATE
, trans_size
);
2943 if ((vap
->va_mode
& VSVTX
) && secpolicy_vnode_stky_modify(cr
) != 0)
2944 vap
->va_mode
&= ~VSVTX
;
2946 if (*name
== '\0') {
2948 * Null component name refers to the directory itself.
2952 * Even though this is an error case, we need to grab the
2953 * quota lock since the error handling code below is common.
2955 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
2956 rw_enter(&ip
->i_contents
, RW_WRITER
);
2962 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
2963 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2964 * possible, retries the operation.
2966 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_CREATE
,
2971 xvp
= dnlc_lookup(dvp
, name
);
2972 if (xvp
== DNLC_NO_VNODE
) {
2978 rw_exit(&ip
->i_rwlock
);
2979 if (error
= ufs_iaccess(ip
, IEXEC
, cr
, 1)) {
2987 * Suppress file system full message if we will retry
2989 error
= ufs_direnter_cm(ip
, name
, DE_CREATE
,
2990 vap
, &xip
, cr
, (noentry
| (retry
? IQUIET
: 0)));
2991 if (error
== EAGAIN
) {
2993 TRANS_END_CSYNC(ufsvfsp
, error
, issync
,
2994 TOP_CREATE
, trans_size
);
2995 ufs_lockfs_end(ulp
);
2999 rw_exit(&ip
->i_rwlock
);
3003 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
3004 rw_enter(&ip
->i_contents
, RW_WRITER
);
3009 * If the file already exists and this is a non-exclusive create,
3010 * check permissions and allow access for non-directories.
3011 * Read-only create of an existing directory is also allowed.
3012 * We fail an exclusive create of anything which already exists.
3014 if (error
== EEXIST
) {
3016 if (excl
== NONEXCL
) {
3017 if ((((ip
->i_mode
& IFMT
) == IFDIR
) ||
3018 ((ip
->i_mode
& IFMT
) == IFATTRDIR
)) &&
3022 error
= ufs_iaccess(ip
, mode
, cr
, 0);
3027 rw_exit(&ip
->i_contents
);
3028 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3033 * If the error EEXIST was set, then i_seq can not
3034 * have been updated. The sequence number interface
3035 * is defined such that a non-error VOP_CREATE must
3036 * increase the dir va_seq it by at least one. If we
3037 * have cleared the error, increase i_seq. Note that
3038 * we are increasing the dir i_seq and in rare cases
3039 * ip may actually be from the dvp, so we already have
3040 * the locks and it will not be subject to truncation.
3041 * In case we have to update i_seq of the parent
3042 * directory dip, we have to defer it till we have
3043 * released our locks on ip due to lock ordering requirements.
3046 defer_dip_seq_update
= 1;
3050 if (((ip
->i_mode
& IFMT
) == IFREG
) &&
3051 (vap
->va_mask
& AT_SIZE
) && vap
->va_size
== 0) {
3053 * Truncate regular files, if requested by caller.
3054 * Grab i_rwlock to make sure no one else is
3055 * currently writing to the file (we promised
3056 * bmap we would do this).
3057 * Must get the locks in the correct order.
3059 if (ip
->i_size
== 0) {
3060 ip
->i_flag
|= ICHG
| IUPD
;
3062 TRANS_INODE(ufsvfsp
, ip
);
3065 * Large Files: Why this check here?
3066 * Though we do it in vn_create() we really
3067 * want to guarantee that we do not destroy
3068 * Large file data by atomically checking
3069 * the size while holding the contents
3072 if (flag
&& !(flag
& FOFFMAX
) &&
3073 ((ip
->i_mode
& IFMT
) == IFREG
) &&
3074 (ip
->i_size
> (offset_t
)MAXOFF32_T
)) {
3075 rw_exit(&ip
->i_contents
);
3076 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3080 if (TRANS_ISTRANS(ufsvfsp
))
3083 rw_exit(&ip
->i_contents
);
3084 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3085 ufs_tryirwlock_trans(&ip
->i_rwlock
,
3086 RW_WRITER
, TOP_CREATE
,
3092 rw_enter(&ufsvfsp
->vfs_dqrwlock
,
3094 rw_enter(&ip
->i_contents
, RW_WRITER
);
3095 (void) ufs_itrunc(ip
, (u_offset_t
)0, 0,
3097 rw_exit(&ip
->i_rwlock
);
3102 vnevent_create(ITOV(ip
), ct
);
3109 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3110 rw_exit(&ip
->i_contents
);
3117 rw_exit(&ip
->i_contents
);
3118 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
3121 * If vnode is a device return special vnode instead.
3123 if (!error
&& IS_DEVVP(*vpp
)) {
3124 struct vnode
*newvp
;
3126 newvp
= specvp(*vpp
, (*vpp
)->v_rdev
, (*vpp
)->v_type
, cr
);
3128 if (newvp
== NULL
) {
3138 * Do the deferred update of the parent directory's sequence
3141 if (defer_dip_seq_update
== 1) {
3142 rw_enter(&dip
->i_contents
, RW_READER
);
3143 mutex_enter(&dip
->i_tlock
);
3145 mutex_exit(&dip
->i_tlock
);
3146 rw_exit(&dip
->i_contents
);
3152 TRANS_END_CSYNC(ufsvfsp
, terr
, issync
, TOP_CREATE
,
3156 * If we haven't had a more interesting failure
3157 * already, then anything that might've happened
3158 * here should be reported.
3164 if (!error
&& truncflag
) {
3165 ufs_tryirwlock(&ip
->i_rwlock
, RW_WRITER
, retry_trunc
);
3168 ufs_lockfs_end(ulp
);
3172 (void) TRANS_ITRUNC(ip
, (u_offset_t
)0, 0, cr
);
3173 rw_exit(&ip
->i_rwlock
);
3177 ufs_lockfs_end(ulp
);
3180 * If no inodes available, try to free one up out of the
3181 * pending delete queue.
3183 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
3184 ufs_delete_drain_wait(ufsvfsp
, 1);
3193 extern int ufs_idle_max
;
3196 ufs_remove(struct vnode
*vp
, char *nm
, struct cred
*cr
,
3197 caller_context_t
*ct
, int flags
)
3199 struct inode
*ip
= VTOI(vp
);
3200 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
3201 struct ulockfs
*ulp
;
3202 vnode_t
*rmvp
= NULL
; /* Vnode corresponding to name being removed */
3209 * don't let the delete queue get too long
3211 if (ufsvfsp
== NULL
) {
3215 if (ufsvfsp
->vfs_delete
.uq_ne
> ufs_idle_max
)
3216 ufs_delete_drain(vp
->v_vfsp
, 1, 1);
3218 error
= ufs_eventlookup(vp
, nm
, cr
, &rmvp
);
3220 /* Only send the event if there were no errors */
3222 vnevent_remove(rmvp
, vp
, nm
, ct
);
3227 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_REMOVE_MASK
);
3232 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_REMOVE
,
3233 trans_size
= (int)TOP_REMOVE_SIZE(VTOI(vp
)));
3236 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3237 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3238 * possible, retries the operation.
3240 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_REMOVE
, retry
);
3243 error
= ufs_dirremove(ip
, nm
, (struct inode
*)0, (struct vnode
*)0,
3245 rw_exit(&ip
->i_rwlock
);
3248 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_REMOVE
, trans_size
);
3249 ufs_lockfs_end(ulp
);
3257 * Link a file or a directory. Only privileged processes are allowed to
3258 * make links to directories.
3262 ufs_link(struct vnode
*tdvp
, struct vnode
*svp
, char *tnm
, struct cred
*cr
,
3263 caller_context_t
*ct
, int flags
)
3266 struct inode
*tdp
= VTOI(tdvp
);
3267 struct ufsvfs
*ufsvfsp
= tdp
->i_ufsvfs
;
3268 struct ulockfs
*ulp
;
3269 struct vnode
*realvp
;
3277 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LINK_MASK
);
3282 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_LINK
,
3283 trans_size
= (int)TOP_LINK_SIZE(VTOI(tdvp
)));
3285 if (VOP_REALVP(svp
, &realvp
, ct
) == 0)
3289 * Make sure link for extended attributes is valid
3290 * We only support hard linking of attr in ATTRDIR to ATTRDIR
3292 * Make certain we don't attempt to look at a device node as
3296 isdev
= IS_DEVVP(svp
);
3297 if (((isdev
== 0) && ((VTOI(svp
)->i_cflags
& IXATTR
) == 0) &&
3298 ((tdp
->i_mode
& IFMT
) == IFATTRDIR
)) ||
3299 ((isdev
== 0) && (VTOI(svp
)->i_cflags
& IXATTR
) &&
3300 ((tdp
->i_mode
& IFMT
) == IFDIR
))) {
3306 if ((svp
->v_type
== VDIR
&&
3307 secpolicy_fs_linkdir(cr
, ufsvfsp
->vfs_vfs
) != 0) ||
3308 (sip
->i_uid
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0)) {
3314 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3315 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3316 * possible, retries the operation.
3318 ufs_tryirwlock_trans(&tdp
->i_rwlock
, RW_WRITER
, TOP_LINK
, retry
);
3321 error
= ufs_direnter_lr(tdp
, tnm
, DE_LINK
, (struct inode
*)0,
3323 rw_exit(&tdp
->i_rwlock
);
3327 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_LINK
, trans_size
);
3328 ufs_lockfs_end(ulp
);
3332 vnevent_link(svp
, ct
);
3338 uint64_t ufs_rename_retry_cnt
;
3339 uint64_t ufs_rename_upgrade_retry_cnt
;
3340 uint64_t ufs_rename_dircheck_retry_cnt
;
3341 clock_t ufs_rename_backoff_delay
= 1;
3344 * Rename a file or directory.
3345 * We are given the vnode and entry string of the source and the
3346 * vnode and entry string of the place we want to move the source
3347 * to (the target). The essential operation is:
3349 * link(source, target);
3351 * but "atomically". Can't do full commit without saving state in
3352 * the inode on disk, which isn't feasible at this time. Best we
3353 * can do is always guarantee that the TARGET exists.
3359 struct vnode
*sdvp
, /* old (source) parent vnode */
3360 char *snm
, /* old (source) entry name */
3361 struct vnode
*tdvp
, /* new (target) parent vnode */
3362 char *tnm
, /* new (target) entry name */
3364 caller_context_t
*ct
,
3367 struct inode
*sip
= NULL
; /* source inode */
3368 struct inode
*ip
= NULL
; /* check inode */
3369 struct inode
*sdp
; /* old (source) parent inode */
3370 struct inode
*tdp
; /* new (target) parent inode */
3371 struct vnode
*svp
= NULL
; /* source vnode */
3372 struct vnode
*tvp
= NULL
; /* target vnode, if it exists */
3373 struct vnode
*realvp
;
3374 struct ufsvfs
*ufsvfsp
;
3375 struct ulockfs
*ulp
= NULL
;
3376 struct ufs_slot slot
;
3381 krwlock_t
*first_lock
;
3382 krwlock_t
*second_lock
;
3383 krwlock_t
*reverse_lock
;
3388 ufsvfsp
= sdp
->i_ufsvfs
;
3390 if (VOP_REALVP(tdvp
, &realvp
, ct
) == 0)
3393 /* Must do this before taking locks in case of DNLC miss */
3394 terr
= ufs_eventlookup(tdvp
, tnm
, cr
, &tvp
);
3395 serr
= ufs_eventlookup(sdvp
, snm
, cr
, &svp
);
3397 if ((serr
== 0) && ((terr
== 0) || (terr
== ENOENT
))) {
3399 vnevent_pre_rename_dest(tvp
, tdvp
, tnm
, ct
);
3402 * Notify the target directory of the rename event
3403 * if source and target directories are not the same.
3406 vnevent_pre_rename_dest_dir(tdvp
, svp
, tnm
, ct
);
3409 vnevent_pre_rename_src(svp
, sdvp
, snm
, ct
);
3416 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_RENAME_MASK
);
3421 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_RENAME
,
3422 trans_size
= (int)TOP_RENAME_SIZE(sdp
));
3424 if (VOP_REALVP(tdvp
, &realvp
, ct
) == 0)
3430 * We only allow renaming of attributes from ATTRDIR to ATTRDIR.
3432 if ((tdp
->i_mode
& IFMT
) != (sdp
->i_mode
& IFMT
)) {
3438 * Check accessibility of directory.
3440 if (error
= ufs_diraccess(sdp
, IEXEC
, cr
))
3444 * Look up inode of file we're supposed to rename.
3447 if (error
= ufs_dirlook(sdp
, snm
, &sip
, cr
, 0, 0)) {
3448 if (error
== EAGAIN
) {
3450 TRANS_END_CSYNC(ufsvfsp
, error
, issync
,
3451 TOP_RENAME
, trans_size
);
3452 ufs_lockfs_end(ulp
);
3461 * Lock both the source and target directories (they may be
3462 * the same) to provide the atomicity semantics that was
3463 * previously provided by the per file system vfs_rename_lock
3465 * with vfs_rename_lock removed to allow simultaneous renames
3466 * within a file system, ufs_dircheckpath can deadlock while
3467 * traversing back to ensure that source is not a parent directory
3468 * of target parent directory. This is because we get into
3469 * ufs_dircheckpath with the sdp and tdp locks held as RW_WRITER.
3470 * If the tdp and sdp of the simultaneous renames happen to be
3471 * in the path of each other, it can lead to a deadlock. This
3472 * can be avoided by getting the locks as RW_READER here and then
3473 * upgrading to RW_WRITER after completing the ufs_dircheckpath.
3475 * We hold the target directory's i_rwlock after calling
3476 * ufs_lockfs_begin but in many other operations (like ufs_readdir)
3477 * VOP_RWLOCK is explicitly called by the filesystem independent code
3478 * before calling the file system operation. In these cases the order
3479 * is reversed (i.e i_rwlock is taken first and then ufs_lockfs_begin
3480 * is called). This is fine as long as ufs_lockfs_begin acts as a VOP
3481 * counter but with ufs_quiesce setting the SLOCK bit this becomes a
3482 * synchronizing object which might lead to a deadlock. So we use
3483 * rw_tryenter instead of rw_enter. If we fail to get this lock and
3484 * find that SLOCK bit is set, we call ufs_lockfs_end and restart the
3488 first_lock
= &tdp
->i_rwlock
;
3489 second_lock
= &sdp
->i_rwlock
;
3491 if (!rw_tryenter(first_lock
, RW_READER
)) {
3493 * We didn't get the lock. Check if the SLOCK is set in the
3494 * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3495 * and wait for SLOCK to be cleared.
3498 if (ulp
&& ULOCKFS_IS_SLOCK(ulp
)) {
3499 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_RENAME
,
3501 ufs_lockfs_end(ulp
);
3506 * SLOCK isn't set so this is a genuine synchronization
3507 * case. Let's try again after giving them a breather.
3509 delay(RETRY_LOCK_DELAY
);
3510 goto retry_firstlock
;
3514 * Need to check if the tdp and sdp are same !!!
3516 if ((tdp
!= sdp
) && (!rw_tryenter(second_lock
, RW_READER
))) {
3518 * We didn't get the lock. Check if the SLOCK is set in the
3519 * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3520 * and wait for SLOCK to be cleared.
3523 rw_exit(first_lock
);
3524 if (ulp
&& ULOCKFS_IS_SLOCK(ulp
)) {
3525 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_RENAME
,
3527 ufs_lockfs_end(ulp
);
3532 * So we couldn't get the second level peer lock *and*
3533 * the SLOCK bit isn't set. Too bad we can be
3534 * contentding with someone wanting these locks otherway
3535 * round. Reverse the locks in case there is a heavy
3536 * contention for the second level lock.
3538 reverse_lock
= first_lock
;
3539 first_lock
= second_lock
;
3540 second_lock
= reverse_lock
;
3541 ufs_rename_retry_cnt
++;
3542 goto retry_firstlock
;
3551 * Make sure we can delete the source entry. This requires
3552 * write permission on the containing directory.
3553 * Check for sticky directories.
3555 rw_enter(&sdp
->i_contents
, RW_READER
);
3556 rw_enter(&sip
->i_contents
, RW_READER
);
3557 if ((error
= ufs_iaccess(sdp
, IWRITE
, cr
, 0)) != 0 ||
3558 (error
= ufs_sticky_remove_access(sdp
, sip
, cr
)) != 0) {
3559 rw_exit(&sip
->i_contents
);
3560 rw_exit(&sdp
->i_contents
);
3565 * If this is a rename of a directory and the parent is
3566 * different (".." must be changed), then the source
3567 * directory must not be in the directory hierarchy
3568 * above the target, as this would orphan everything
3569 * below the source directory. Also the user must have
3570 * write permission in the source so as to be able to
3573 if ((((sip
->i_mode
& IFMT
) == IFDIR
) ||
3574 ((sip
->i_mode
& IFMT
) == IFATTRDIR
)) && sdp
!= tdp
) {
3577 if (error
= ufs_iaccess(sip
, IWRITE
, cr
, 0)) {
3578 rw_exit(&sip
->i_contents
);
3579 rw_exit(&sdp
->i_contents
);
3582 inum
= sip
->i_number
;
3583 rw_exit(&sip
->i_contents
);
3584 rw_exit(&sdp
->i_contents
);
3585 if ((error
= ufs_dircheckpath(inum
, tdp
, sdp
, cr
))) {
3587 * If we got EAGAIN ufs_dircheckpath detected a
3588 * potential deadlock and backed out. We need
3589 * to retry the operation since sdp and tdp have
3590 * to be released to avoid the deadlock.
3592 if (error
== EAGAIN
) {
3593 rw_exit(&tdp
->i_rwlock
);
3595 rw_exit(&sdp
->i_rwlock
);
3596 delay(ufs_rename_backoff_delay
);
3597 ufs_rename_dircheck_retry_cnt
++;
3603 rw_exit(&sip
->i_contents
);
3604 rw_exit(&sdp
->i_contents
);
3609 * Check for renaming '.' or '..' or alias of '.'
3611 if (strcmp(snm
, ".") == 0 || strcmp(snm
, "..") == 0 || sdp
== sip
) {
3617 * Simultaneous renames can deadlock in ufs_dircheckpath since it
3618 * tries to traverse back the file tree with both tdp and sdp held
3619 * as RW_WRITER. To avoid that we have to hold the tdp and sdp locks
3620 * as RW_READERS till ufs_dircheckpath is done.
3621 * Now that ufs_dircheckpath is done with, we can upgrade the locks
3624 if (!rw_tryupgrade(&tdp
->i_rwlock
)) {
3626 * The upgrade failed. We got to give away the lock
3627 * as to avoid deadlocking with someone else who is
3628 * waiting for writer lock. With the lock gone, we
3629 * cannot be sure the checks done above will hold
3630 * good when we eventually get them back as writer.
3631 * So if we can't upgrade we drop the locks and retry
3634 rw_exit(&tdp
->i_rwlock
);
3636 rw_exit(&sdp
->i_rwlock
);
3637 delay(ufs_rename_backoff_delay
);
3638 ufs_rename_upgrade_retry_cnt
++;
3642 if (!rw_tryupgrade(&sdp
->i_rwlock
)) {
3644 * The upgrade failed. We got to give away the lock
3645 * as to avoid deadlocking with someone else who is
3646 * waiting for writer lock. With the lock gone, we
3647 * cannot be sure the checks done above will hold
3648 * good when we eventually get them back as writer.
3649 * So if we can't upgrade we drop the locks and retry
3652 rw_exit(&tdp
->i_rwlock
);
3653 rw_exit(&sdp
->i_rwlock
);
3654 delay(ufs_rename_backoff_delay
);
3655 ufs_rename_upgrade_retry_cnt
++;
3661 * Now that all the locks are held check to make sure another thread
3662 * didn't slip in and take out the sip.
3665 if ((sip
->i_ctime
.tv_usec
* 1000) > now
.tv_nsec
||
3666 sip
->i_ctime
.tv_sec
> now
.tv_sec
) {
3667 rw_enter(&sdp
->i_ufsvfs
->vfs_dqrwlock
, RW_READER
);
3668 rw_enter(&sdp
->i_contents
, RW_WRITER
);
3669 error
= ufs_dircheckforname(sdp
, snm
, strlen(snm
), &slot
,
3671 rw_exit(&sdp
->i_contents
);
3672 rw_exit(&sdp
->i_ufsvfs
->vfs_dqrwlock
);
3681 * If the inode was found need to drop the v_count
3682 * so as not to keep the filesystem from being
3683 * unmounted at a later time.
3689 * Release the slot.fbp that has the page mapped and
3690 * locked SE_SHARED, and could be used in in
3691 * ufs_direnter_lr() which needs to get the SE_EXCL lock
3695 fbrelse(slot
.fbp
, S_OTHER
);
3701 * Link source to the target.
3703 if (error
= ufs_direnter_lr(tdp
, tnm
, DE_RENAME
, sdp
, sip
, cr
)) {
3705 * ESAME isn't really an error; it indicates that the
3706 * operation should not be done because the source and target
3707 * are the same file, but that no error should be reported.
3714 if (error
== 0 && tvp
!= NULL
)
3715 vnevent_rename_dest(tvp
, tdvp
, tnm
, ct
);
3718 * Unlink the source.
3719 * Remove the source entry. ufs_dirremove() checks that the entry
3720 * still reflects sip, and returns an error if it doesn't.
3721 * If the entry has changed just forget about it. Release
3724 if ((error
= ufs_dirremove(sdp
, snm
, sip
, (struct vnode
*)0,
3725 DR_RENAME
, cr
)) == ENOENT
)
3729 vnevent_rename_src(ITOV(sip
), sdvp
, snm
, ct
);
3731 * Notify the target directory of the rename event
3732 * if source and target directories are not the same.
3735 vnevent_rename_dest_dir(tdvp
, ct
);
3740 fbrelse(slot
.fbp
, S_OTHER
);
3742 rw_exit(&tdp
->i_rwlock
);
3744 rw_exit(&sdp
->i_rwlock
);
3754 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_RENAME
, trans_size
);
3755 ufs_lockfs_end(ulp
);
3763 ufs_mkdir(struct vnode
*dvp
, char *dirname
, struct vattr
*vap
,
3764 struct vnode
**vpp
, struct cred
*cr
, caller_context_t
*ct
, int flags
,
3769 struct ufsvfs
*ufsvfsp
;
3770 struct ulockfs
*ulp
;
3777 ASSERT((vap
->va_mask
& (AT_TYPE
|AT_MODE
)) == (AT_TYPE
|AT_MODE
));
3780 * Can't make directory in attr hidden dir
3782 if ((VTOI(dvp
)->i_mode
& IFMT
) == IFATTRDIR
)
3787 ufsvfsp
= ip
->i_ufsvfs
;
3788 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_MKDIR_MASK
);
3792 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_MKDIR
,
3793 trans_size
= (int)TOP_MKDIR_SIZE(ip
));
3796 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3797 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3798 * possible, retries the operation.
3800 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_MKDIR
, retry
);
3804 error
= ufs_direnter_cm(ip
, dirname
, DE_MKDIR
, vap
, &xip
, cr
,
3805 (retry
? IQUIET
: 0));
3806 if (error
== EAGAIN
) {
3808 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_MKDIR
,
3810 ufs_lockfs_end(ulp
);
3815 rw_exit(&ip
->i_rwlock
);
3819 } else if (error
== EEXIST
)
3824 TRANS_END_CSYNC(ufsvfsp
, terr
, issync
, TOP_MKDIR
, trans_size
);
3825 ufs_lockfs_end(ulp
);
3830 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
3831 ufs_delete_drain_wait(ufsvfsp
, 1);
3841 ufs_rmdir(struct vnode
*vp
, char *nm
, struct vnode
*cdir
, struct cred
*cr
,
3842 caller_context_t
*ct
, int flags
)
3844 struct inode
*ip
= VTOI(vp
);
3845 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
3846 struct ulockfs
*ulp
;
3847 vnode_t
*rmvp
= NULL
; /* Vnode of removed directory */
3854 * don't let the delete queue get too long
3856 if (ufsvfsp
== NULL
) {
3860 if (ufsvfsp
->vfs_delete
.uq_ne
> ufs_idle_max
)
3861 ufs_delete_drain(vp
->v_vfsp
, 1, 1);
3863 error
= ufs_eventlookup(vp
, nm
, cr
, &rmvp
);
3865 /* Only send the event if there were no errors */
3867 vnevent_rmdir(rmvp
, vp
, nm
, ct
);
3872 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_RMDIR_MASK
);
3877 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_RMDIR
,
3878 trans_size
= TOP_RMDIR_SIZE
);
3881 * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3882 * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3883 * possible, retries the operation.
3885 ufs_tryirwlock_trans(&ip
->i_rwlock
, RW_WRITER
, TOP_RMDIR
, retry
);
3888 error
= ufs_dirremove(ip
, nm
, (struct inode
*)0, cdir
, DR_RMDIR
, cr
);
3890 rw_exit(&ip
->i_rwlock
);
3893 TRANS_END_CSYNC(ufsvfsp
, error
, issync
, TOP_RMDIR
,
3895 ufs_lockfs_end(ulp
);
3909 caller_context_t
*ct
,
3915 struct dirent64
*odp
;
3917 struct ufsvfs
*ufsvfsp
;
3918 struct ulockfs
*ulp
;
3922 uint_t bytes_wanted
, total_bytes_wanted
;
3928 ASSERT(RW_READ_HELD(&ip
->i_rwlock
));
3930 if (uiop
->uio_loffset
>= MAXOFF32_T
) {
3937 * Check if we have been called with a valid iov_len
3938 * and bail out if not, otherwise we may potentially loop
3939 * forever further down.
3941 if (uiop
->uio_iov
->iov_len
<= 0) {
3947 * Large Files: When we come here we are guaranteed that
3948 * uio_offset can be used safely. The high word is zero.
3951 ufsvfsp
= ip
->i_ufsvfs
;
3952 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_READDIR_MASK
);
3956 iovp
= uiop
->uio_iov
;
3957 total_bytes_wanted
= iovp
->iov_len
;
3959 /* Large Files: directory files should not be "large" */
3961 ASSERT(ip
->i_size
<= MAXOFF32_T
);
3963 /* Force offset to be valid (to guard against bogus lseek() values) */
3964 offset
= (uint_t
)uiop
->uio_offset
& ~(DIRBLKSIZ
- 1);
3966 /* Quit if at end of file or link count of zero (posix) */
3967 if (offset
>= (uint_t
)ip
->i_size
|| ip
->i_nlink
<= 0) {
3975 * Get space to change directory entries into fs independent format.
3976 * Do fast alloc for the most commonly used-request size (filesystem
3979 if (uiop
->uio_segflg
!= UIO_SYSSPACE
|| uiop
->uio_iovcnt
!= 1) {
3980 bufsize
= total_bytes_wanted
;
3981 outbuf
= kmem_alloc(bufsize
, KM_SLEEP
);
3982 odp
= (struct dirent64
*)outbuf
;
3984 bufsize
= total_bytes_wanted
;
3985 odp
= (struct dirent64
*)iovp
->iov_base
;
3989 bytes_wanted
= total_bytes_wanted
;
3991 /* Truncate request to file size */
3992 if (offset
+ bytes_wanted
> (int)ip
->i_size
)
3993 bytes_wanted
= (int)(ip
->i_size
- offset
);
3995 /* Comply with MAXBSIZE boundary restrictions of fbread() */
3996 if ((offset
& MAXBOFFSET
) + bytes_wanted
> MAXBSIZE
)
3997 bytes_wanted
= MAXBSIZE
- (offset
& MAXBOFFSET
);
4000 * Read in the next chunk.
4001 * We are still holding the i_rwlock.
4003 error
= fbread(vp
, (offset_t
)offset
, bytes_wanted
, S_OTHER
, &fbp
);
4007 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) && (ip
->i_fs
->fs_ronly
== 0) &&
4008 (!ufsvfsp
->vfs_noatime
)) {
4012 idp
= (struct direct
*)fbp
->fb_addr
;
4013 if (idp
->d_ino
== 0 && idp
->d_reclen
== 0 && idp
->d_namlen
== 0) {
4014 cmn_err(CE_WARN
, "ufs_readdir: bad dir, inumber = %llu, "
4016 (u_longlong_t
)ip
->i_number
, ufsvfsp
->vfs_fs
->fs_fsmnt
);
4017 fbrelse(fbp
, S_OTHER
);
4021 /* Transform to file-system independent format */
4022 while (incount
< bytes_wanted
) {
4024 * If the current directory entry is mangled, then skip
4025 * to the next block. It would be nice to set the FSBAD
4026 * flag in the super-block so that a fsck is forced on
4027 * next reboot, but locking is a problem.
4029 if (idp
->d_reclen
& 0x3) {
4030 offset
= (offset
+ DIRBLKSIZ
) & ~(DIRBLKSIZ
-1);
4034 /* Skip to requested offset and skip empty entries */
4035 if (idp
->d_ino
!= 0 && offset
>= (uint_t
)uiop
->uio_offset
) {
4036 ushort_t this_reclen
=
4037 DIRENT64_RECLEN(idp
->d_namlen
);
4038 /* Buffer too small for any entries */
4039 if (!outcount
&& this_reclen
> bufsize
) {
4040 fbrelse(fbp
, S_OTHER
);
4044 /* If would overrun the buffer, quit */
4045 if (outcount
+ this_reclen
> bufsize
) {
4048 /* Take this entry */
4049 odp
->d_ino
= (ino64_t
)idp
->d_ino
;
4050 odp
->d_reclen
= (ushort_t
)this_reclen
;
4051 odp
->d_off
= (offset_t
)(offset
+ idp
->d_reclen
);
4053 /* use strncpy(9f) to zero out uninitialized bytes */
4055 ASSERT(strlen(idp
->d_name
) + 1 <=
4056 DIRENT64_NAMELEN(this_reclen
));
4057 (void) strncpy(odp
->d_name
, idp
->d_name
,
4058 DIRENT64_NAMELEN(this_reclen
));
4059 outcount
+= odp
->d_reclen
;
4060 odp
= (struct dirent64
*)
4061 ((intptr_t)odp
+ odp
->d_reclen
);
4062 ASSERT(outcount
<= bufsize
);
4064 if (idp
->d_reclen
) {
4065 incount
+= idp
->d_reclen
;
4066 offset
+= idp
->d_reclen
;
4067 idp
= (struct direct
*)((intptr_t)idp
+ idp
->d_reclen
);
4069 offset
= (offset
+ DIRBLKSIZ
) & ~(DIRBLKSIZ
-1);
4073 /* Release the chunk */
4074 fbrelse(fbp
, S_OTHER
);
4076 /* Read whole block, but got no entries, read another if not eof */
4079 * Large Files: casting i_size to int here is not a problem
4080 * because directory sizes are always less than MAXOFF32_T.
4081 * See assertion above.
4084 if (offset
< (int)ip
->i_size
&& !outcount
)
4087 /* Copy out the entry data */
4088 if (uiop
->uio_segflg
== UIO_SYSSPACE
&& uiop
->uio_iovcnt
== 1) {
4089 iovp
->iov_base
+= outcount
;
4090 iovp
->iov_len
-= outcount
;
4091 uiop
->uio_resid
-= outcount
;
4092 uiop
->uio_offset
= offset
;
4093 } else if ((error
= uiomove(outbuf
, (long)outcount
, UIO_READ
,
4095 uiop
->uio_offset
= offset
;
4098 if (uiop
->uio_segflg
!= UIO_SYSSPACE
|| uiop
->uio_iovcnt
!= 1)
4099 kmem_free(outbuf
, bufsize
);
4101 if (eofp
&& error
== 0)
4102 *eofp
= (uiop
->uio_offset
>= (int)ip
->i_size
);
4105 ufs_lockfs_end(ulp
);
4114 struct vnode
*dvp
, /* ptr to parent dir vnode */
4115 char *linkname
, /* name of symbolic link */
4116 struct vattr
*vap
, /* attributes */
4117 char *target
, /* target path */
4118 struct cred
*cr
, /* user credentials */
4119 caller_context_t
*ct
,
4122 struct inode
*ip
, *dip
= VTOI(dvp
);
4123 struct ufsvfs
*ufsvfsp
= dip
->i_ufsvfs
;
4124 struct ulockfs
*ulp
;
4133 * No symlinks in attrdirs at this time
4135 if ((VTOI(dvp
)->i_mode
& IFMT
) == IFATTRDIR
)
4139 ip
= (struct inode
*)NULL
;
4140 vap
->va_type
= VLNK
;
4143 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SYMLINK_MASK
);
4148 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
, TOP_SYMLINK
,
4149 trans_size
= (int)TOP_SYMLINK_SIZE(dip
));
4152 * We must create the inode before the directory entry, to avoid
4153 * racing with readlink(). ufs_dirmakeinode requires that we
4154 * hold the quota lock as reader, and directory locks as writer.
4157 rw_enter(&dip
->i_rwlock
, RW_WRITER
);
4158 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4159 rw_enter(&dip
->i_contents
, RW_WRITER
);
4162 * Suppress any out of inodes messages if we will retry on
4166 dip
->i_flag
|= IQUIET
;
4168 error
= ufs_dirmakeinode(dip
, &ip
, vap
, DE_SYMLINK
, cr
);
4170 dip
->i_flag
&= ~IQUIET
;
4172 rw_exit(&dip
->i_contents
);
4173 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4174 rw_exit(&dip
->i_rwlock
);
4180 * OK. The inode has been created. Write out the data of the
4181 * symbolic link. Since symbolic links are metadata, and should
4182 * remain consistent across a system crash, we need to force the
4183 * data out synchronously.
4185 * (This is a change from the semantics in earlier releases, which
4186 * only created symbolic links synchronously if the semi-documented
4187 * 'syncdir' option was set, or if we were being invoked by the NFS
4188 * server, which requires symbolic links to be created synchronously.)
4190 * We need to pass in a pointer for the residual length; otherwise
4191 * ufs_rdwri() will always return EIO if it can't write the data,
4192 * even if the error was really ENOSPC or EDQUOT.
4195 ioflag
= FWRITE
| FDSYNC
;
4198 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4199 rw_enter(&ip
->i_contents
, RW_WRITER
);
4202 * Suppress file system full messages if we will retry
4205 ip
->i_flag
|= IQUIET
;
4207 error
= ufs_rdwri(UIO_WRITE
, ioflag
, ip
, target
, strlen(target
),
4208 (offset_t
)0, UIO_SYSSPACE
, &residual
, cr
);
4210 ip
->i_flag
&= ~IQUIET
;
4213 rw_exit(&ip
->i_contents
);
4214 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4219 * If the link's data is small enough, we can cache it in the inode.
4220 * This is a "fast symbolic link". We don't use the first direct
4221 * block because that's actually used to point at the symbolic link's
4222 * contents on disk; but we know that none of the other direct or
4223 * indirect blocks can be used because symbolic links are restricted
4224 * to be smaller than a file system block.
4227 ASSERT(MAXPATHLEN
<= VBSIZE(ITOV(ip
)));
4229 if (ip
->i_size
> 0 && ip
->i_size
<= FSL_SIZE
) {
4230 if (kcopy(target
, &ip
->i_db
[1], ip
->i_size
) == 0) {
4231 ip
->i_flag
|= IFASTSYMLNK
;
4234 /* error, clear garbage left behind */
4235 for (i
= 1; i
< NDADDR
; i
++)
4237 for (i
= 0; i
< NIADDR
; i
++)
4242 rw_exit(&ip
->i_contents
);
4243 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4246 * OK. We've successfully created the symbolic link. All that
4247 * remains is to insert it into the appropriate directory.
4250 rw_enter(&dip
->i_rwlock
, RW_WRITER
);
4251 error
= ufs_direnter_lr(dip
, linkname
, DE_SYMLINK
, NULL
, ip
, cr
);
4252 rw_exit(&dip
->i_rwlock
);
4255 * Fall through into remove-on-error code. We're either done, or we
4256 * need to remove the inode (if we couldn't insert it).
4260 if (error
&& (ip
!= NULL
)) {
4261 rw_enter(&ip
->i_contents
, RW_WRITER
);
4266 rw_exit(&ip
->i_contents
);
4276 TRANS_END_CSYNC(ufsvfsp
, terr
, issync
, TOP_SYMLINK
,
4278 ufs_lockfs_end(ulp
);
4284 * We may have failed due to lack of an inode or of a block to
4285 * store the target in. Try flushing the delete queue to free
4286 * logically-available things up and try again.
4288 if ((error
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
4289 ufs_delete_drain_wait(ufsvfsp
, 1);
4299 * Ufs specific routine used to do ufs io.
4302 ufs_rdwri(enum uio_rw rw
, int ioflag
, struct inode
*ip
, caddr_t base
,
4303 ssize_t len
, offset_t offset
, enum uio_seg seg
, int *aresid
,
4310 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
4312 bzero((caddr_t
)&auio
, sizeof (uio_t
));
4313 bzero((caddr_t
)&aiov
, sizeof (iovec_t
));
4315 aiov
.iov_base
= base
;
4317 auio
.uio_iov
= &aiov
;
4318 auio
.uio_iovcnt
= 1;
4319 auio
.uio_loffset
= offset
;
4320 auio
.uio_segflg
= (short)seg
;
4321 auio
.uio_resid
= len
;
4323 if (rw
== UIO_WRITE
) {
4324 auio
.uio_fmode
= FWRITE
;
4325 auio
.uio_extflg
= UIO_COPY_DEFAULT
;
4326 auio
.uio_llimit
= curproc
->p_fsz_ctl
;
4327 error
= wrip(ip
, &auio
, ioflag
, cr
);
4329 auio
.uio_fmode
= FREAD
;
4330 auio
.uio_extflg
= UIO_COPY_CACHED
;
4331 auio
.uio_llimit
= MAXOFFSET_T
;
4332 error
= rdip(ip
, &auio
, ioflag
, cr
);
4336 *aresid
= auio
.uio_resid
;
4337 } else if (auio
.uio_resid
) {
4345 ufs_fid(struct vnode
*vp
, struct fid
*fidp
, caller_context_t
*ct
)
4348 struct inode
*ip
= VTOI(vp
);
4350 if (ip
->i_ufsvfs
== NULL
)
4353 if (fidp
->fid_len
< (sizeof (struct ufid
) - sizeof (ushort_t
))) {
4354 fidp
->fid_len
= sizeof (struct ufid
) - sizeof (ushort_t
);
4358 ufid
= (struct ufid
*)fidp
;
4359 bzero((char *)ufid
, sizeof (struct ufid
));
4360 ufid
->ufid_len
= sizeof (struct ufid
) - sizeof (ushort_t
);
4361 ufid
->ufid_ino
= ip
->i_number
;
4362 ufid
->ufid_gen
= ip
->i_gen
;
4369 ufs_rwlock(struct vnode
*vp
, int write_lock
, caller_context_t
*ctp
)
4371 struct inode
*ip
= VTOI(vp
);
4372 struct ufsvfs
*ufsvfsp
;
4376 * Read case is easy.
4379 rw_enter(&ip
->i_rwlock
, RW_READER
);
4380 return (V_WRITELOCK_FALSE
);
4384 * Caller has requested a writer lock, but that inhibits any
4385 * concurrency in the VOPs that follow. Acquire the lock shared
4386 * and defer exclusive access until it is known to be needed in
4387 * other VOP handlers. Some cases can be determined here.
4391 * If directio is not set, there is no chance of concurrency,
4392 * so just acquire the lock exclusive. Beware of a forced
4393 * unmount before looking at the mount option.
4395 ufsvfsp
= ip
->i_ufsvfs
;
4396 forcedirectio
= ufsvfsp
? ufsvfsp
->vfs_forcedirectio
: 0;
4397 if (!(ip
->i_flag
& IDIRECTIO
|| forcedirectio
) ||
4398 !ufs_allow_shared_writes
) {
4399 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4400 return (V_WRITELOCK_TRUE
);
4404 * Mandatory locking forces acquiring i_rwlock exclusive.
4406 if (MANDLOCK(vp
, ip
->i_mode
)) {
4407 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4408 return (V_WRITELOCK_TRUE
);
4412 * Acquire the lock shared in case a concurrent write follows.
4413 * Mandatory locking could have become enabled before the lock
4414 * was acquired. Re-check and upgrade if needed.
4416 rw_enter(&ip
->i_rwlock
, RW_READER
);
4417 if (MANDLOCK(vp
, ip
->i_mode
)) {
4418 rw_exit(&ip
->i_rwlock
);
4419 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
4420 return (V_WRITELOCK_TRUE
);
4422 return (V_WRITELOCK_FALSE
);
4427 ufs_rwunlock(struct vnode
*vp
, int write_lock
, caller_context_t
*ctp
)
4429 struct inode
*ip
= VTOI(vp
);
4431 rw_exit(&ip
->i_rwlock
);
4436 ufs_seek(struct vnode
*vp
, offset_t ooff
, offset_t
*noffp
,
4437 caller_context_t
*ct
)
4439 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
4444 ufs_frlock(struct vnode
*vp
, int cmd
, struct flock64
*bfp
, int flag
,
4445 offset_t offset
, struct flk_callback
*flk_cbp
, struct cred
*cr
,
4446 caller_context_t
*ct
)
4448 struct inode
*ip
= VTOI(vp
);
4450 if (ip
->i_ufsvfs
== NULL
)
4454 * If file is being mapped, disallow frlock.
4455 * XXX I am not holding tlock while checking i_mapcnt because the
4456 * current locking strategy drops all locks before calling fs_frlock.
4457 * So, mapcnt could change before we enter fs_frlock making is
4458 * meaningless to have held tlock in the first place.
4460 if (ip
->i_mapcnt
> 0 && MANDLOCK(vp
, ip
->i_mode
))
4462 return (fs_frlock(vp
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
, ct
));
4467 ufs_space(struct vnode
*vp
, int cmd
, struct flock64
*bfp
, int flag
,
4468 offset_t offset
, cred_t
*cr
, caller_context_t
*ct
)
4470 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
4471 struct ulockfs
*ulp
;
4474 if ((error
= convoff(vp
, bfp
, 0, offset
)) == 0) {
4475 if (cmd
== F_FREESP
) {
4476 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
4477 ULOCKFS_SPACE_MASK
);
4480 error
= ufs_freesp(vp
, bfp
, flag
, cr
);
4482 if (error
== 0 && bfp
->l_start
== 0)
4483 vnevent_truncate(vp
, ct
);
4484 } else if (cmd
== F_ALLOCSP
) {
4485 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
4486 ULOCKFS_FALLOCATE_MASK
);
4489 error
= ufs_allocsp(vp
, bfp
, cr
);
4491 return (EINVAL
); /* Command not handled here */
4494 ufs_lockfs_end(ulp
);
4501 * Used to determine if read ahead should be done. Also used to
4502 * to determine when write back occurs.
4504 #define CLUSTSZ(ip) ((ip)->i_ufsvfs->vfs_ioclustsz)
4507 * A faster version of ufs_getpage.
4509 * We optimize by inlining the pvn_getpages iterator, eliminating
4510 * calls to bmap_read if file doesn't have UFS holes, and avoiding
4511 * the overhead of page_exists().
4513 * When files has UFS_HOLES and ufs_getpage is called with S_READ,
4514 * we set *protp to PROT_READ to avoid calling bmap_read. This approach
4515 * victimizes performance when a file with UFS holes is faulted
4516 * first in the S_READ mode, and then in the S_WRITE mode. We will get
4517 * two MMU faults in this case.
4519 * XXX - the inode fields which control the sequential mode are not
4520 * protected by any mutex. The read ahead will act wild if
4521 * multiple processes will access the file concurrently and
4522 * some of them in sequential mode. One particulary bad case
4523 * is if another thread will change the value of i_nextrio between
4524 * the time this thread tests the i_nextrio value and then reads it
4525 * again to use it as the offset for the read ahead.
4529 ufs_getpage(struct vnode
*vp
, offset_t off
, size_t len
, uint_t
*protp
,
4530 page_t
*plarr
[], size_t plsz
, struct seg
*seg
, caddr_t addr
,
4531 enum seg_rw rw
, struct cred
*cr
, caller_context_t
*ct
)
4533 u_offset_t uoff
= (u_offset_t
)off
; /* type conversion */
4536 struct inode
*ip
= VTOI(vp
);
4537 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
4539 struct ulockfs
*ulp
;
4547 int pgsize
= PAGESIZE
;
4552 ASSERT((uoff
& PAGEOFFSET
) == 0);
4558 * Obey the lockfs protocol
4560 err
= ufs_lockfs_begin_getpage(ufsvfsp
, &ulp
, seg
,
4561 rw
== S_READ
|| rw
== S_EXEC
, protp
);
4565 fs
= ufsvfsp
->vfs_fs
;
4567 if (ulp
&& (rw
== S_CREATE
|| rw
== S_WRITE
) &&
4568 !(vp
->v_flag
& VISSWAP
)) {
4570 * Try to start a transaction, will return if blocking is
4571 * expected to occur and the address space is not the
4572 * kernel address space.
4574 trans_size
= TOP_GETPAGE_SIZE(ip
);
4575 if (seg
->s_as
!= &kas
) {
4576 TRANS_TRY_BEGIN_ASYNC(ufsvfsp
, TOP_GETPAGE
,
4578 if (err
== EWOULDBLOCK
) {
4580 * Use EDEADLK here because the VM code
4581 * can normally never see this error.
4584 ufs_lockfs_end(ulp
);
4588 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_GETPAGE
, trans_size
);
4592 if (vp
->v_flag
& VNOMAP
) {
4597 seqmode
= ip
->i_nextr
== uoff
&& rw
!= S_CREATE
;
4599 rwtype
= RW_READER
; /* start as a reader */
4600 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
4602 * If this thread owns the lock, i.e., this thread grabbed it
4603 * as writer somewhere above, then we don't need to grab the
4604 * lock as reader in this routine.
4606 do_qlock
= (rw_owner(&ufsvfsp
->vfs_dqrwlock
) != curthread
);
4611 * Grab the quota lock if we need to call
4612 * bmap_write() below (with i_contents as writer).
4614 if (do_qlock
&& rwtype
== RW_WRITER
)
4615 rw_enter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
);
4616 rw_enter(&ip
->i_contents
, rwtype
);
4620 * We may be getting called as a side effect of a bmap using
4621 * fbread() when the blocks might be being allocated and the
4622 * size has not yet been up'ed. In this case we want to be
4623 * able to return zero pages if we get back UFS_HOLE from
4624 * calling bmap for a non write case here. We also might have
4625 * to read some frags from the disk into a page if we are
4626 * extending the number of frags for a given lbn in bmap().
4627 * Large Files: The read of i_size here is atomic because
4628 * i_contents is held here. If dolock is zero, the lock
4629 * is held in bmap routines.
4631 beyond_eof
= uoff
+ len
>
4632 P2ROUNDUP_TYPED(ip
->i_size
, PAGESIZE
, u_offset_t
);
4633 if (beyond_eof
&& seg
!= segkmap
) {
4635 rw_exit(&ip
->i_contents
);
4636 if (do_qlock
&& rwtype
== RW_WRITER
)
4637 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4644 * Must hold i_contents lock throughout the call to pvn_getpages
4645 * since locked pages are returned from each call to ufs_getapage.
4646 * Must *not* return locked pages and then try for contents lock
4647 * due to lock ordering requirements (inode > page)
4650 has_holes
= bmap_has_holes(ip
);
4652 if ((rw
== S_WRITE
|| rw
== S_CREATE
) && has_holes
&& !beyond_eof
) {
4657 * We must acquire the RW_WRITER lock in order to
4658 * call bmap_write().
4660 if (dolock
&& rwtype
== RW_READER
) {
4664 * Grab the quota lock before
4665 * upgrading i_contents, but if we can't grab it
4666 * don't wait here due to lock order:
4667 * vfs_dqrwlock > i_contents.
4670 rw_tryenter(&ufsvfsp
->vfs_dqrwlock
, RW_READER
)
4672 rw_exit(&ip
->i_contents
);
4675 if (!rw_tryupgrade(&ip
->i_contents
)) {
4676 rw_exit(&ip
->i_contents
);
4678 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4684 * May be allocating disk blocks for holes here as
4685 * a result of mmap faults. write(2) does the bmap_write
4686 * in rdip/wrip, not here. We are not dealing with frags
4690 * Large Files: We cast fs_bmask field to offset_t
4691 * just as we do for MAXBMASK because uoff is a 64-bit
4692 * data type. fs_bmask will still be a 32-bit type
4693 * as we cannot change any ondisk data structures.
4696 offset
= uoff
& (offset_t
)fs
->fs_bmask
;
4697 while (offset
< uoff
+ len
) {
4698 blk_size
= (int)blksize(fs
, ip
, lblkno(fs
, offset
));
4699 err
= bmap_write(ip
, offset
, blk_size
,
4700 BI_NORMAL
, NULL
, cr
);
4701 if (ip
->i_flag
& (ICHG
|IUPD
))
4705 offset
+= blk_size
; /* XXX - make this contig */
4710 * Can be a reader from now on.
4712 if (dolock
&& rwtype
== RW_WRITER
) {
4713 rw_downgrade(&ip
->i_contents
);
4715 * We can release vfs_dqrwlock early so do it, but make
4716 * sure we don't try to release it again at the bottom.
4719 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4725 * We remove PROT_WRITE in cases when the file has UFS holes
4726 * because we don't want to call bmap_read() to check each
4727 * page if it is backed with a disk block.
4729 if (protp
&& has_holes
&& rw
!= S_WRITE
&& rw
!= S_CREATE
)
4730 *protp
&= ~PROT_WRITE
;
4735 * The loop looks up pages in the range [off, off + len).
4736 * For each page, we first check if we should initiate an asynchronous
4737 * read ahead before we call page_lookup (we may sleep in page_lookup
4738 * for a previously initiated disk read).
4740 eoff
= (uoff
+ len
);
4741 for (pgoff
= uoff
, pgaddr
= addr
, pl
= plarr
;
4742 pgoff
< eoff
; /* empty */) {
4748 se
= ((rw
== S_CREATE
|| rw
== S_OTHER
) ? SE_EXCL
: SE_SHARED
);
4750 /* Handle async getpage (faultahead) */
4751 if (plarr
== NULL
) {
4752 ip
->i_nextrio
= pgoff
;
4753 (void) ufs_getpage_ra(vp
, pgoff
, seg
, pgaddr
);
4759 * Check if we should initiate read ahead of next cluster.
4760 * We call page_exists only when we need to confirm that
4761 * we have the current page before we initiate the read ahead.
4763 nextrio
= ip
->i_nextrio
;
4765 pgoff
+ CLUSTSZ(ip
) >= nextrio
&& pgoff
<= nextrio
&&
4766 nextrio
< ip
->i_size
&& page_exists(vp
, pgoff
)) {
4767 retval
= ufs_getpage_ra(vp
, pgoff
, seg
, pgaddr
);
4769 * We always read ahead the next cluster of data
4770 * starting from i_nextrio. If the page (vp,nextrio)
4771 * is actually in core at this point, the routine
4772 * ufs_getpage_ra() will stop pre-fetching data
4773 * until we read that page in a synchronized manner
4774 * through ufs_getpage_miss(). So, we should increase
4775 * i_nextrio if the page (vp, nextrio) exists.
4777 if ((retval
== 0) && page_exists(vp
, nextrio
)) {
4778 ip
->i_nextrio
= nextrio
+ pgsize
;
4782 if ((pp
= page_lookup(vp
, pgoff
, se
)) != NULL
) {
4784 * We found the page in the page cache.
4793 * We have to create the page, or read it from disk.
4795 if (err
= ufs_getpage_miss(vp
, pgoff
, len
, seg
, pgaddr
,
4796 pl
, plsz
, rw
, seqmode
))
4799 while (*pl
!= NULL
) {
4810 * Return pages up to plsz if they are in the page cache.
4811 * We cannot return pages if there is a chance that they are
4812 * backed with a UFS hole and rw is S_WRITE or S_CREATE.
4814 if (plarr
&& !(has_holes
&& (rw
== S_WRITE
|| rw
== S_CREATE
))) {
4816 ASSERT((protp
== NULL
) ||
4817 !(has_holes
&& (*protp
& PROT_WRITE
)));
4819 eoff
= pgoff
+ plsz
;
4820 while (pgoff
< eoff
) {
4823 if ((pp
= page_lookup_nowait(vp
, pgoff
,
4824 SE_SHARED
)) == NULL
)
4834 *pl
= NULL
; /* Terminate page list */
4835 ip
->i_nextr
= pgoff
;
4840 * Release any pages we have locked.
4842 while (pl
> &plarr
[0])
4850 * If the inode is not already marked for IACC (in rdip() for read)
4851 * and the inode is not marked for no access time update (in wrip()
4852 * for write) then update the inode access time and mod time now.
4854 if ((ip
->i_flag
& (IACC
| INOACC
)) == 0) {
4855 if ((rw
!= S_OTHER
) && (ip
->i_mode
& IFMT
) != IFDIR
) {
4856 if (!ULOCKFS_IS_NOIACC(ITOUL(ip
)) &&
4857 (fs
->fs_ronly
== 0) &&
4858 (!ufsvfsp
->vfs_noatime
)) {
4859 mutex_enter(&ip
->i_tlock
);
4862 mutex_exit(&ip
->i_tlock
);
4868 rw_exit(&ip
->i_contents
);
4869 if (do_qlock
&& rwtype
== RW_WRITER
)
4870 rw_exit(&ufsvfsp
->vfs_dqrwlock
);
4875 if ((rw
== S_CREATE
|| rw
== S_WRITE
) &&
4876 !(vp
->v_flag
& VISSWAP
)) {
4877 TRANS_END_ASYNC(ufsvfsp
, TOP_GETPAGE
, trans_size
);
4879 ufs_lockfs_end(ulp
);
4886 * ufs_getpage_miss is called when ufs_getpage missed the page in the page
4887 * cache. The page is either read from the disk, or it's created.
4888 * A page is created (without disk read) if rw == S_CREATE, or if
4889 * the page is not backed with a real disk block (UFS hole).
4893 ufs_getpage_miss(struct vnode
*vp
, u_offset_t off
, size_t len
, struct seg
*seg
,
4894 caddr_t addr
, page_t
*pl
[], size_t plsz
, enum seg_rw rw
, int seq
)
4896 struct inode
*ip
= VTOI(vp
);
4903 int bsize
= ip
->i_fs
->fs_bsize
;
4906 * Figure out whether the page can be created, or must be
4907 * must be read from the disk.
4913 if (err
= bmap_read(ip
, off
, &bn
, &contig
))
4916 crpage
= (bn
== UFS_HOLE
);
4919 * If its also a fallocated block that hasn't been written to
4920 * yet, we will treat it just like a UFS_HOLE and create
4921 * a zero page for it
4923 if (ISFALLOCBLK(ip
, bn
))
4928 if ((pp
= page_create_va(vp
, off
, PAGESIZE
, PG_WAIT
, seg
,
4930 return (ufs_fault(vp
,
4931 "ufs_getpage_miss: page_create == NULL"));
4935 pagezero(pp
, 0, PAGESIZE
);
4942 ufsvfs_t
*ufsvfsp
= ip
->i_ufsvfs
;
4945 * If access is not in sequential order, we read from disk
4948 * We limit the size of the transfer to bsize if we are reading
4949 * from the beginning of the file. Note in this situation we
4950 * will hedge our bets and initiate an async read ahead of
4953 if (!seq
|| off
== 0)
4954 contig
= MIN(contig
, bsize
);
4956 pp
= pvn_read_kluster(vp
, off
, seg
, addr
, &io_off
,
4957 &io_len
, off
, contig
, 0);
4960 * Some other thread has entered the page.
4961 * ufs_getpage will retry page_lookup.
4969 * Zero part of the page which we are not
4970 * going to read from the disk.
4972 xlen
= io_len
& PAGEOFFSET
;
4974 pagezero(pp
->p_prev
, xlen
, PAGESIZE
- xlen
);
4976 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_READ
);
4977 bp
->b_edev
= ip
->i_dev
;
4978 bp
->b_dev
= cmpdev(ip
->i_dev
);
4980 bp
->b_un
.b_addr
= (caddr_t
)0;
4981 bp
->b_file
= ip
->i_vnode
;
4984 if (ufsvfsp
->vfs_log
) {
4985 lufs_read_strategy(ufsvfsp
->vfs_log
, bp
);
4986 } else if (ufsvfsp
->vfs_snapshot
) {
4987 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
4989 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
4990 ub
.ub_getpages
.value
.ul
++;
4991 (void) bdev_strategy(bp
);
4992 lwp_stat_update(LWP_STAT_INBLK
, 1);
4995 ip
->i_nextrio
= off
+ ((io_len
+ PAGESIZE
- 1) & PAGEMASK
);
4998 * If the file access is sequential, initiate read ahead
4999 * of the next cluster.
5001 if (seq
&& ip
->i_nextrio
< ip
->i_size
)
5002 (void) ufs_getpage_ra(vp
, off
, seg
, addr
);
5007 pvn_read_done(pp
, B_ERROR
);
5012 pvn_plist_init(pp
, pl
, plsz
, off
, io_len
, rw
);
5017 * Read ahead a cluster from the disk. Returns the length in bytes.
5020 ufs_getpage_ra(struct vnode
*vp
, u_offset_t off
, struct seg
*seg
, caddr_t addr
)
5022 struct inode
*ip
= VTOI(vp
);
5024 u_offset_t io_off
= ip
->i_nextrio
;
5026 caddr_t addr2
= addr
+ (io_off
- off
);
5033 int bsize
= ip
->i_fs
->fs_bsize
;
5036 * If the directio advisory is in effect on this file,
5037 * then do not do buffered read ahead. Read ahead makes
5038 * it more difficult on threads using directio as they
5039 * will be forced to flush the pages from this vnode.
5041 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
5043 if (ip
->i_flag
& IDIRECTIO
|| ufsvfsp
->vfs_forcedirectio
)
5047 * Is this test needed?
5049 if (addr2
>= seg
->s_base
+ seg
->s_size
)
5053 err
= bmap_read(ip
, io_off
, &bn
, &contig
);
5055 * If its a UFS_HOLE or a fallocated block, do not perform
5056 * any read ahead's since there probably is nothing to read ahead
5058 if (err
|| bn
== UFS_HOLE
|| ISFALLOCBLK(ip
, bn
))
5062 * Limit the transfer size to bsize if this is the 2nd block.
5064 if (io_off
== (u_offset_t
)bsize
)
5065 contig
= MIN(contig
, bsize
);
5067 if ((pp
= pvn_read_kluster(vp
, io_off
, seg
, addr2
, &io_off
,
5068 &io_len
, io_off
, contig
, 1)) == NULL
)
5072 * Zero part of page which we are not going to read from disk
5074 if ((xlen
= (io_len
& PAGEOFFSET
)) > 0)
5075 pagezero(pp
->p_prev
, xlen
, PAGESIZE
- xlen
);
5077 ip
->i_nextrio
= (io_off
+ io_len
+ PAGESIZE
- 1) & PAGEMASK
;
5079 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_READ
| B_ASYNC
);
5080 bp
->b_edev
= ip
->i_dev
;
5081 bp
->b_dev
= cmpdev(ip
->i_dev
);
5083 bp
->b_un
.b_addr
= (caddr_t
)0;
5084 bp
->b_file
= ip
->i_vnode
;
5087 if (ufsvfsp
->vfs_log
) {
5088 lufs_read_strategy(ufsvfsp
->vfs_log
, bp
);
5089 } else if (ufsvfsp
->vfs_snapshot
) {
5090 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5092 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5093 ub
.ub_getras
.value
.ul
++;
5094 (void) bdev_strategy(bp
);
5095 lwp_stat_update(LWP_STAT_INBLK
, 1);
5103 * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE, B_ASYNC}
5105 * LMXXX - the inode really ought to contain a pointer to one of these
5106 * async args. Stuff gunk in there and just hand the whole mess off.
5107 * This would replace i_delaylen, i_delayoff.
5111 ufs_putpage(struct vnode
*vp
, offset_t off
, size_t len
, int flags
,
5112 struct cred
*cr
, caller_context_t
*ct
)
5114 struct inode
*ip
= VTOI(vp
);
5117 if (vp
->v_count
== 0) {
5118 return (ufs_fault(vp
, "ufs_putpage: bad v_count == 0"));
5122 * XXX - Why should this check be made here?
5124 if (vp
->v_flag
& VNOMAP
) {
5129 if (ip
->i_ufsvfs
== NULL
) {
5134 if (flags
& B_ASYNC
) {
5135 if (ufs_delay
&& len
&&
5136 (flags
& ~(B_ASYNC
|B_DONTNEED
|B_FREE
)) == 0) {
5137 mutex_enter(&ip
->i_tlock
);
5139 * If nobody stalled, start a new cluster.
5141 if (ip
->i_delaylen
== 0) {
5142 ip
->i_delayoff
= off
;
5143 ip
->i_delaylen
= len
;
5144 mutex_exit(&ip
->i_tlock
);
5148 * If we have a full cluster or they are not contig,
5149 * then push last cluster and start over.
5151 if (ip
->i_delaylen
>= CLUSTSZ(ip
) ||
5152 ip
->i_delayoff
+ ip
->i_delaylen
!= off
) {
5156 doff
= ip
->i_delayoff
;
5157 dlen
= ip
->i_delaylen
;
5158 ip
->i_delayoff
= off
;
5159 ip
->i_delaylen
= len
;
5160 mutex_exit(&ip
->i_tlock
);
5161 err
= ufs_putpages(vp
, doff
, dlen
,
5163 /* LMXXX - flags are new val, not old */
5167 * There is something there, it's not full, and
5170 ip
->i_delaylen
+= len
;
5171 mutex_exit(&ip
->i_tlock
);
5175 * Must have weird flags or we are not clustering.
5179 err
= ufs_putpages(vp
, off
, len
, flags
, cr
);
5186 * If len == 0, do from off to EOF.
5188 * The normal cases should be len == 0 & off == 0 (entire vp list),
5189 * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
5203 struct inode
*ip
= VTOI(vp
);
5209 if (vp
->v_count
== 0)
5210 return (ufs_fault(vp
, "ufs_putpages: v_count == 0"));
5212 * Acquire the readers/write inode lock before locking
5213 * any pages in this inode.
5214 * The inode lock is held during i/o.
5217 mutex_enter(&ip
->i_tlock
);
5218 ip
->i_delayoff
= ip
->i_delaylen
= 0;
5219 mutex_exit(&ip
->i_tlock
);
5221 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
5224 * Must synchronize this thread and any possible thread
5225 * operating in the window of vulnerability in wrip().
5226 * It is dangerous to allow both a thread doing a putpage
5227 * and a thread writing, so serialize them. The exception
5228 * is when the thread in wrip() does something which causes
5229 * a putpage operation. Then, the thread must be allowed
5230 * to continue. It may encounter a bmap_read problem in
5231 * ufs_putapage, but that is handled in ufs_putapage.
5232 * Allow async writers to proceed, we don't want to block
5233 * the pageout daemon.
5235 if (ip
->i_writer
== curthread
)
5236 rw_enter(&ip
->i_contents
, RW_READER
);
5239 rw_enter(&ip
->i_contents
, RW_READER
);
5240 mutex_enter(&ip
->i_tlock
);
5242 * If there is no thread in the critical
5243 * section of wrip(), then proceed.
5244 * Otherwise, wait until there isn't one.
5246 if (ip
->i_writer
== NULL
) {
5247 mutex_exit(&ip
->i_tlock
);
5250 rw_exit(&ip
->i_contents
);
5252 * Bounce async writers when we have a writer
5253 * working on this file so we don't deadlock
5254 * the pageout daemon.
5256 if (flags
& B_ASYNC
) {
5257 mutex_exit(&ip
->i_tlock
);
5260 cv_wait(&ip
->i_wrcv
, &ip
->i_tlock
);
5261 mutex_exit(&ip
->i_tlock
);
5266 if (!vn_has_cached_data(vp
)) {
5268 rw_exit(&ip
->i_contents
);
5274 * Search the entire vp list for pages >= off.
5276 err
= pvn_vplist_dirty(vp
, (u_offset_t
)off
, ufs_putapage
,
5280 * Loop over all offsets in the range looking for
5281 * pages to deal with.
5283 if ((eoff
= blkroundup(ip
->i_fs
, ip
->i_size
)) != 0)
5284 eoff
= MIN(off
+ len
, eoff
);
5288 for (io_off
= off
; io_off
< eoff
; io_off
+= io_len
) {
5290 * If we are not invalidating, synchronously
5291 * freeing or writing pages, use the routine
5292 * page_lookup_nowait() to prevent reclaiming
5293 * them from the free list.
5295 if ((flags
& B_INVAL
) || ((flags
& B_ASYNC
) == 0)) {
5296 pp
= page_lookup(vp
, io_off
,
5297 (flags
& (B_INVAL
| B_FREE
)) ?
5298 SE_EXCL
: SE_SHARED
);
5300 pp
= page_lookup_nowait(vp
, io_off
,
5301 (flags
& B_FREE
) ? SE_EXCL
: SE_SHARED
);
5304 if (pp
== NULL
|| pvn_getdirty(pp
, flags
) == 0)
5307 u_offset_t
*io_offp
= &io_off
;
5309 err
= ufs_putapage(vp
, pp
, io_offp
, &io_len
,
5314 * "io_off" and "io_len" are returned as
5315 * the range of pages we actually wrote.
5316 * This allows us to skip ahead more quickly
5317 * since several pages may've been dealt
5318 * with by this iteration of the loop.
5323 if (err
== 0 && off
== 0 && (len
== 0 || len
>= ip
->i_size
)) {
5325 * We have just sync'ed back all the pages on
5326 * the inode, turn off the IMODTIME flag.
5328 mutex_enter(&ip
->i_tlock
);
5329 ip
->i_flag
&= ~IMODTIME
;
5330 mutex_exit(&ip
->i_tlock
);
5333 rw_exit(&ip
->i_contents
);
5338 ufs_iodone(buf_t
*bp
)
5342 ASSERT((bp
->b_pages
->p_vnode
!= NULL
) && !(bp
->b_flags
& B_READ
));
5344 bp
->b_iodone
= NULL
;
5346 ip
= VTOI(bp
->b_pages
->p_vnode
);
5348 mutex_enter(&ip
->i_tlock
);
5349 if (ip
->i_writes
>= ufs_LW
) {
5350 if ((ip
->i_writes
-= bp
->b_bcount
) <= ufs_LW
)
5352 cv_broadcast(&ip
->i_wrcv
); /* wake all up */
5354 ip
->i_writes
-= bp
->b_bcount
;
5357 mutex_exit(&ip
->i_tlock
);
5362 * Write out a single page, possibly klustering adjacent
5363 * dirty pages. The inode lock must be held.
5365 * LMXXX - bsize < pagesize not done.
5373 size_t *lenp
, /* return values */
5379 struct inode
*ip
= VTOI(vp
);
5380 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
5389 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
5391 if (ufsvfsp
== NULL
) {
5397 ASSERT(fs
->fs_ronly
== 0);
5400 * If the modified time on the inode has not already been
5401 * set elsewhere (e.g. for write/setattr) we set the time now.
5402 * This gives us approximate modified times for mmap'ed files
5403 * which are modified via stores in the user address space.
5405 if ((ip
->i_flag
& IMODTIME
) == 0) {
5406 mutex_enter(&ip
->i_tlock
);
5410 mutex_exit(&ip
->i_tlock
);
5414 * Align the request to a block boundry (for old file systems),
5415 * and go ask bmap() how contiguous things are for this file.
5417 off
= pp
->p_offset
& (offset_t
)fs
->fs_bmask
; /* block align it */
5419 err
= bmap_read(ip
, off
, &bn
, &contig
);
5422 if (bn
== UFS_HOLE
) { /* putpage never allocates */
5424 * logging device is in error mode; simply return EIO
5426 if (TRANS_ISERROR(ufsvfsp
)) {
5431 * Oops, the thread in the window in wrip() did some
5432 * sort of operation which caused a putpage in the bad
5433 * range. In this case, just return an error which will
5434 * cause the software modified bit on the page to set
5435 * and the page will get written out again later.
5437 if (ip
->i_writer
== curthread
) {
5442 * If the pager is trying to push a page in the bad range
5443 * just tell it to try again later when things are better.
5445 if (flags
& B_ASYNC
) {
5449 err
= ufs_fault(ITOV(ip
), "ufs_putapage: bn == UFS_HOLE");
5454 * If it is an fallocate'd block, reverse the negativity since
5455 * we are now writing to it
5457 if (ISFALLOCBLK(ip
, bn
)) {
5458 err
= bmap_set_bn(vp
, off
, dbtofsb(fs
, -bn
));
5466 * Take the length (of contiguous bytes) passed back from bmap()
5467 * and _try_ and get a set of pages covering that extent.
5469 pp
= pvn_write_kluster(vp
, pp
, &io_off
, &io_len
, off
, contig
, flags
);
5472 * May have run out of memory and not clustered backwards.
5476 * We told bmap off, so we have to adjust the bn accordingly.
5479 bn
+= btod(io_off
- off
);
5480 contig
-= (io_off
- off
);
5484 * bmap was carefull to tell us the right size so use that.
5485 * There might be unallocated frags at the end.
5486 * LMXXX - bzero the end of the page? We must be writing after EOF.
5488 if (io_len
> contig
) {
5489 ASSERT(io_len
- contig
< fs
->fs_bsize
);
5490 io_len
-= (io_len
- contig
);
5494 * Handle the case where we are writing the last page after EOF.
5496 * XXX - just a patch for i-mt3.
5499 ASSERT(pp
->p_offset
>=
5500 (u_offset_t
)(roundup(ip
->i_size
, PAGESIZE
)));
5504 bp
= pageio_setup(pp
, io_len
, ip
->i_devvp
, B_WRITE
| flags
);
5506 ULOCKFS_SET_MOD(ITOUL(ip
));
5508 bp
->b_edev
= ip
->i_dev
;
5509 bp
->b_dev
= cmpdev(ip
->i_dev
);
5511 bp
->b_un
.b_addr
= (caddr_t
)0;
5512 bp
->b_file
= ip
->i_vnode
;
5515 * File contents of shadow or quota inodes are metadata, and updates
5516 * to these need to be put into a logging transaction. All direct
5517 * callers in UFS do that, but fsflush can come here _before_ the
5518 * normal codepath. An example would be updating ACL information, for
5519 * which the normal codepath would be:
5525 * Here, fsflush can pick up the dirty page before segmap_release()
5526 * forces it out. If that happens, there's no transaction.
5527 * We therefore need to test whether a transaction exists, and if not
5528 * create one - for fsflush.
5531 (((ip
->i_mode
& IFMT
) == IFSHAD
|| ufsvfsp
->vfs_qinod
== ip
) &&
5532 ((curthread
->t_flag
& T_DONTBLOCK
) == 0) &&
5533 (TRANS_ISTRANS(ufsvfsp
)));
5536 curthread
->t_flag
|= T_DONTBLOCK
;
5537 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_PUTPAGE
, TOP_PUTPAGE_SIZE(ip
));
5539 if (TRANS_ISTRANS(ufsvfsp
)) {
5540 if ((ip
->i_mode
& IFMT
) == IFSHAD
) {
5541 TRANS_BUF(ufsvfsp
, 0, io_len
, bp
, DT_SHAD
);
5542 } else if (ufsvfsp
->vfs_qinod
== ip
) {
5543 TRANS_DELTA(ufsvfsp
, ldbtob(bn
), bp
->b_bcount
, DT_QR
,
5548 TRANS_END_ASYNC(ufsvfsp
, TOP_PUTPAGE
, TOP_PUTPAGE_SIZE(ip
));
5549 curthread
->t_flag
&= ~T_DONTBLOCK
;
5552 /* write throttle */
5554 ASSERT(bp
->b_iodone
== NULL
);
5555 bp
->b_iodone
= (int (*)())ufs_iodone
;
5556 mutex_enter(&ip
->i_tlock
);
5557 ip
->i_writes
+= bp
->b_bcount
;
5558 mutex_exit(&ip
->i_tlock
);
5560 if (bp
->b_flags
& B_ASYNC
) {
5561 if (ufsvfsp
->vfs_log
) {
5562 lufs_write_strategy(ufsvfsp
->vfs_log
, bp
);
5563 } else if (ufsvfsp
->vfs_snapshot
) {
5564 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5566 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5567 ub
.ub_putasyncs
.value
.ul
++;
5568 (void) bdev_strategy(bp
);
5569 lwp_stat_update(LWP_STAT_OUBLK
, 1);
5572 if (ufsvfsp
->vfs_log
) {
5573 lufs_write_strategy(ufsvfsp
->vfs_log
, bp
);
5574 } else if (ufsvfsp
->vfs_snapshot
) {
5575 fssnap_strategy(&ufsvfsp
->vfs_snapshot
, bp
);
5577 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
5578 ub
.ub_putsyncs
.value
.ul
++;
5579 (void) bdev_strategy(bp
);
5580 lwp_stat_update(LWP_STAT_OUBLK
, 1);
5584 pvn_write_done(pp
, ((err
) ? B_ERROR
: 0) | B_WRITE
| flags
);
5590 if (err
!= 0 && pp
!= NULL
)
5591 pvn_write_done(pp
, B_ERROR
| B_WRITE
| flags
);
5601 uint64_t ufs_map_alock_retry_cnt
;
5602 uint64_t ufs_map_lockfs_retry_cnt
;
5606 ufs_map(struct vnode
*vp
,
5615 caller_context_t
*ct
)
5617 struct segvn_crargs vn_a
;
5618 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5619 struct ulockfs
*ulp
;
5622 caddr_t hint
= *addrp
;
5624 if (vp
->v_flag
& VNOMAP
) {
5629 if (off
< (offset_t
)0 || (offset_t
)(off
+ len
) < (offset_t
)0) {
5634 if (vp
->v_type
!= VREG
) {
5642 * If file is being locked, disallow mapping.
5644 if (vn_has_mandatory_locks(vp
, VTOI(vp
)->i_mode
)) {
5651 * Note that if we are retrying (because ufs_lockfs_trybegin failed in
5652 * the previous attempt), some other thread could have grabbed
5653 * the same VA range if MAP_FIXED is set. In that case, choose_addr
5654 * would unmap the valid VA range, that is ok.
5656 error
= choose_addr(as
, addrp
, len
, off
, ADDR_VACALIGN
, flags
);
5663 * a_lock has to be acquired before entering the lockfs protocol
5664 * because that is the order in which pagefault works. Also we cannot
5665 * block on a_lock here because this waiting writer will prevent
5666 * further readers like ufs_read from progressing and could cause
5667 * deadlock between ufs_read/ufs_map/pagefault when a quiesce is
5670 while (!AS_LOCK_TRYENTER(as
, RW_WRITER
)) {
5671 ufs_map_alock_retry_cnt
++;
5672 delay(RETRY_LOCK_DELAY
);
5676 * We can't hold as->a_lock and wait for lockfs to succeed because
5677 * the proc tools might hang on a_lock, so call ufs_lockfs_trybegin()
5680 if (error
= ufs_lockfs_trybegin(ufsvfsp
, &ulp
, ULOCKFS_MAP_MASK
)) {
5682 * ufs_lockfs_trybegin() did not succeed. It is safer to give up
5683 * as->a_lock and wait for ulp->ul_fs_lock status to change.
5685 ufs_map_lockfs_retry_cnt
++;
5691 mutex_enter(&ulp
->ul_lock
);
5692 while (ulp
->ul_fs_lock
& ULOCKFS_MAP_MASK
) {
5693 if (ULOCKFS_IS_SLOCK(ulp
) || ufsvfsp
->vfs_nointr
) {
5694 cv_wait(&ulp
->ul_cv
, &ulp
->ul_lock
);
5697 sig
= cv_wait_sig(&ulp
->ul_cv
, &ulp
->ul_lock
);
5699 if (((ulp
->ul_fs_lock
& ULOCKFS_MAP_MASK
) &&
5700 !sig
) || ufsvfsp
->vfs_dontblock
) {
5701 mutex_exit(&ulp
->ul_lock
);
5706 mutex_exit(&ulp
->ul_lock
);
5711 vn_a
.offset
= (u_offset_t
)off
;
5712 vn_a
.type
= flags
& MAP_TYPE
;
5714 vn_a
.maxprot
= maxprot
;
5717 vn_a
.flags
= flags
& ~MAP_TYPE
;
5719 vn_a
.lgrp_mem_policy_flags
= 0;
5721 error
= as_map_locked(as
, *addrp
, len
, segvn_create
, &vn_a
);
5723 ufs_lockfs_end(ulp
);
5731 ufs_addmap(struct vnode
*vp
,
5740 caller_context_t
*ct
)
5742 struct inode
*ip
= VTOI(vp
);
5744 if (vp
->v_flag
& VNOMAP
) {
5748 mutex_enter(&ip
->i_tlock
);
5749 ip
->i_mapcnt
+= btopr(len
);
5750 mutex_exit(&ip
->i_tlock
);
5756 ufs_delmap(struct vnode
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
5757 size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
,
5758 struct cred
*cr
, caller_context_t
*ct
)
5760 struct inode
*ip
= VTOI(vp
);
5762 if (vp
->v_flag
& VNOMAP
) {
5766 mutex_enter(&ip
->i_tlock
);
5767 ip
->i_mapcnt
-= btopr(len
); /* Count released mappings */
5768 ASSERT(ip
->i_mapcnt
>= 0);
5769 mutex_exit(&ip
->i_tlock
);
5773 * Return the answer requested to poll() for non-device files
5775 struct pollhead ufs_pollhd
;
5779 ufs_poll(vnode_t
*vp
, short ev
, int any
, short *revp
, struct pollhead
**phpp
,
5780 caller_context_t
*ct
)
5782 struct ufsvfs
*ufsvfsp
;
5785 ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5792 if (ULOCKFS_IS_HLOCK(&ufsvfsp
->vfs_ulockfs
) ||
5793 ULOCKFS_IS_ELOCK(&ufsvfsp
->vfs_ulockfs
)) {
5797 if ((ev
& POLLOUT
) && !ufsvfsp
->vfs_fs
->fs_ronly
&&
5798 !ULOCKFS_IS_WLOCK(&ufsvfsp
->vfs_ulockfs
))
5801 if ((ev
& POLLWRBAND
) && !ufsvfsp
->vfs_fs
->fs_ronly
&&
5802 !ULOCKFS_IS_WLOCK(&ufsvfsp
->vfs_ulockfs
))
5803 *revp
|= POLLWRBAND
;
5808 if (ev
& POLLRDNORM
)
5809 *revp
|= POLLRDNORM
;
5811 if (ev
& POLLRDBAND
)
5812 *revp
|= POLLRDBAND
;
5815 if ((ev
& POLLPRI
) && (*revp
& (POLLERR
|POLLHUP
)))
5818 *phpp
= !any
&& !*revp
? &ufs_pollhd
: (struct pollhead
*)NULL
;
5825 ufs_l_pathconf(struct vnode
*vp
, int cmd
, ulong_t
*valp
, struct cred
*cr
,
5826 caller_context_t
*ct
)
5828 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
5829 struct ulockfs
*ulp
= NULL
;
5830 struct inode
*sip
= NULL
;
5832 struct inode
*ip
= VTOI(vp
);
5835 error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_PATHCONF_MASK
);
5841 * Have to handle _PC_NAME_MAX here, because the normal way
5842 * [fs_pathconf() -> VOP_STATVFS() -> ufs_statvfs()]
5843 * results in a lock ordering reversal between
5844 * ufs_lockfs_{begin,end}() and
5845 * ufs_thread_{suspend,continue}().
5847 * Keep in sync with ufs_statvfs().
5853 case _PC_FILESIZEBITS
:
5854 if (ufsvfsp
->vfs_lfflags
& UFS_LARGEFILES
)
5855 *valp
= UFS_FILESIZE_BITS
;
5860 case _PC_XATTR_EXISTS
:
5861 if (vp
->v_vfsp
->vfs_flag
& VFS_XATTR
) {
5864 ufs_xattr_getattrdir(vp
, &sip
, LOOKUP_XATTR
, cr
);
5865 if (error
== 0 && sip
!= NULL
) {
5866 /* Start transaction */
5868 TRANS_BEGIN_CSYNC(ufsvfsp
, issync
,
5869 TOP_RMDIR
, TOP_RMDIR_SIZE
);
5872 * Is directory empty
5874 rw_enter(&sip
->i_rwlock
, RW_WRITER
);
5875 rw_enter(&sip
->i_contents
, RW_WRITER
);
5876 if (ufs_xattrdirempty(sip
,
5877 sip
->i_number
, CRED())) {
5878 rw_enter(&ip
->i_contents
, RW_WRITER
);
5879 ufs_unhook_shadow(ip
, sip
);
5880 rw_exit(&ip
->i_contents
);
5886 rw_exit(&sip
->i_contents
);
5887 rw_exit(&sip
->i_rwlock
);
5889 TRANS_END_CSYNC(ufsvfsp
, error
, issync
,
5890 TOP_RMDIR
, TOP_RMDIR_SIZE
);
5893 } else if (error
== ENOENT
) {
5898 error
= fs_pathconf(vp
, cmd
, valp
, cr
, ct
);
5902 case _PC_ACL_ENABLED
:
5903 *valp
= _ACL_ACLENT_ENABLED
;
5906 case _PC_MIN_HOLE_SIZE
:
5907 *valp
= (ulong_t
)ip
->i_fs
->fs_bsize
;
5910 case _PC_SATTR_ENABLED
:
5911 case _PC_SATTR_EXISTS
:
5912 *valp
= vfs_has_feature(vp
->v_vfsp
, VFSFT_SYSATTR_VIEWS
) &&
5913 (vp
->v_type
== VREG
|| vp
->v_type
== VDIR
);
5916 case _PC_TIMESTAMP_RESOLUTION
:
5918 * UFS keeps only microsecond timestamp resolution.
5919 * This is historical and will probably never change.
5925 error
= fs_pathconf(vp
, cmd
, valp
, cr
, ct
);
5930 ufs_lockfs_end(ulp
);
5935 int ufs_pageio_writes
, ufs_pageio_reads
;
5939 ufs_pageio(struct vnode
*vp
, page_t
*pp
, u_offset_t io_off
, size_t io_len
,
5940 int flags
, struct cred
*cr
, caller_context_t
*ct
)
5942 struct inode
*ip
= VTOI(vp
);
5943 struct ufsvfs
*ufsvfsp
;
5944 page_t
*npp
= NULL
, *opp
= NULL
, *cpp
= pp
;
5947 size_t done_len
= 0, cur_len
= 0;
5952 struct ulockfs
*ulp
;
5954 if ((flags
& B_READ
) && pp
!= NULL
&& pp
->p_vnode
== vp
&&
5955 vp
->v_mpssdata
!= NULL
) {
5959 dolock
= (rw_owner(&ip
->i_contents
) != curthread
);
5961 * We need a better check. Ideally, we would use another
5962 * vnodeops so that hlocked and forcibly unmounted file
5963 * systems would return EIO where appropriate and w/o the
5964 * need for these checks.
5966 if ((ufsvfsp
= ip
->i_ufsvfs
) == NULL
)
5970 * For vmpss (pp can be NULL) case respect the quiesce protocol.
5971 * ul_lock must be taken before locking pages so we can't use it here
5972 * if pp is non NULL because segvn already locked pages
5973 * SE_EXCL. Instead we rely on the fact that a forced umount or
5974 * applying a filesystem lock via ufs_fiolfs() will block in the
5975 * implicit call to ufs_flush() until we unlock the pages after the
5976 * return to segvn. Other ufs_quiesce() callers keep ufs_quiesce_pend
5977 * above 0 until they are done. We have to be careful not to increment
5978 * ul_vnops_cnt here after forceful unmount hlocks the file system.
5980 * If pp is NULL use ul_lock to make sure we don't increment
5981 * ul_vnops_cnt after forceful unmount hlocks the file system.
5983 if (vmpss
|| pp
== NULL
) {
5984 ulp
= &ufsvfsp
->vfs_ulockfs
;
5986 mutex_enter(&ulp
->ul_lock
);
5987 if (ulp
->ul_fs_lock
& ULOCKFS_GETREAD_MASK
) {
5989 mutex_exit(&ulp
->ul_lock
);
5991 return (vmpss
? EIO
: EINVAL
);
5993 atomic_inc_ulong(&ulp
->ul_vnops_cnt
);
5995 mutex_exit(&ulp
->ul_lock
);
5996 if (ufs_quiesce_pend
) {
5997 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
5998 cv_broadcast(&ulp
->ul_cv
);
5999 return (vmpss
? EIO
: EINVAL
);
6005 * segvn may call VOP_PAGEIO() instead of VOP_GETPAGE() to
6006 * handle a fault against a segment that maps vnode pages with
6007 * large mappings. Segvn creates pages and holds them locked
6008 * SE_EXCL during VOP_PAGEIO() call. In this case we have to
6009 * use rw_tryenter() to avoid a potential deadlock since in
6010 * lock order i_contents needs to be taken first.
6011 * Segvn will retry via VOP_GETPAGE() if VOP_PAGEIO() fails.
6014 rw_enter(&ip
->i_contents
, RW_READER
);
6015 } else if (!rw_tryenter(&ip
->i_contents
, RW_READER
)) {
6016 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6017 cv_broadcast(&ulp
->ul_cv
);
6023 * Return an error to segvn because the pagefault request is beyond
6024 * PAGESIZE rounded EOF.
6026 if (vmpss
&& btopr(io_off
+ io_len
) > btopr(ip
->i_size
)) {
6028 rw_exit(&ip
->i_contents
);
6029 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6030 cv_broadcast(&ulp
->ul_cv
);
6035 if (bmap_has_holes(ip
)) {
6041 rw_exit(&ip
->i_contents
);
6042 if (!atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6043 cv_broadcast(&ulp
->ul_cv
);
6048 * Break the io request into chunks, one for each contiguous
6049 * stretch of disk blocks in the target file.
6051 while (done_len
< io_len
) {
6054 if (err
= bmap_read(ip
, (u_offset_t
)(io_off
+ done_len
),
6058 if (bn
== UFS_HOLE
) { /* No holey swapfiles */
6063 err
= ufs_fault(ITOV(ip
), "ufs_pageio: bn == UFS_HOLE");
6067 cur_len
= MIN(io_len
- done_len
, contig
);
6069 * Zero out a page beyond EOF, when the last block of
6070 * a file is a UFS fragment so that ufs_pageio() can be used
6071 * instead of ufs_getpage() to handle faults against
6072 * segvn segments that use large pages.
6074 page_list_break(&cpp
, &npp
, btopr(cur_len
));
6075 if ((flags
& B_READ
) && (cur_len
& PAGEOFFSET
)) {
6076 size_t xlen
= cur_len
& PAGEOFFSET
;
6077 pagezero(cpp
->p_prev
, xlen
, PAGESIZE
- xlen
);
6080 bp
= pageio_setup(cpp
, cur_len
, ip
->i_devvp
, flags
);
6083 bp
->b_edev
= ip
->i_dev
;
6084 bp
->b_dev
= cmpdev(ip
->i_dev
);
6086 bp
->b_un
.b_addr
= (caddr_t
)0;
6087 bp
->b_file
= ip
->i_vnode
;
6089 ufsvfsp
->vfs_iotstamp
= ddi_get_lbolt();
6090 ub
.ub_pageios
.value
.ul
++;
6091 if (ufsvfsp
->vfs_snapshot
)
6092 fssnap_strategy(&(ufsvfsp
->vfs_snapshot
), bp
);
6094 (void) bdev_strategy(bp
);
6099 ufs_pageio_writes
++;
6101 lwp_stat_update(LWP_STAT_INBLK
, 1);
6103 lwp_stat_update(LWP_STAT_OUBLK
, 1);
6105 * If the request is not B_ASYNC, wait for i/o to complete
6106 * and re-assemble the page list to return to the caller.
6107 * If it is B_ASYNC we leave the page list in pieces and
6108 * cleanup() will dispose of them.
6110 if ((flags
& B_ASYNC
) == 0) {
6115 page_list_concat(&opp
, &cpp
);
6120 cur_len
= P2ROUNDUP_TYPED(cur_len
, PAGESIZE
, size_t);
6121 done_len
+= cur_len
;
6123 ASSERT(err
|| (cpp
== NULL
&& npp
== NULL
&& done_len
== io_len
));
6125 if (flags
& B_ASYNC
) {
6126 /* Cleanup unprocessed parts of list */
6127 page_list_concat(&cpp
, &npp
);
6129 pvn_read_done(cpp
, B_ERROR
);
6131 pvn_write_done(cpp
, B_ERROR
);
6133 /* Re-assemble list and let caller clean up */
6134 page_list_concat(&opp
, &cpp
);
6135 page_list_concat(&opp
, &npp
);
6139 if (vmpss
&& !(ip
->i_flag
& IACC
) && !ULOCKFS_IS_NOIACC(ulp
) &&
6140 ufsvfsp
->vfs_fs
->fs_ronly
== 0 && !ufsvfsp
->vfs_noatime
) {
6141 mutex_enter(&ip
->i_tlock
);
6144 mutex_exit(&ip
->i_tlock
);
6148 rw_exit(&ip
->i_contents
);
6149 if (vmpss
&& !atomic_dec_ulong_nv(&ulp
->ul_vnops_cnt
))
6150 cv_broadcast(&ulp
->ul_cv
);
6155 * Called when the kernel is in a frozen state to dump data
6156 * directly to the device. It uses a private dump data structure,
6157 * set up by dump_ctl, to locate the correct disk block to which to dump.
6161 ufs_dump(vnode_t
*vp
, caddr_t addr
, offset_t ldbn
, offset_t dblks
,
6162 caller_context_t
*ct
)
6164 u_offset_t file_size
;
6165 struct inode
*ip
= VTOI(vp
);
6166 struct fs
*fs
= ip
->i_fs
;
6168 int disk_blks
= fs
->fs_bsize
>> DEV_BSHIFT
;
6173 * forced unmount case
6175 if (ip
->i_ufsvfs
== NULL
)
6178 * Validate the inode that it has not been modified since
6179 * the dump structure is allocated.
6181 mutex_enter(&ip
->i_tlock
);
6182 if ((dump_info
== NULL
) ||
6183 (dump_info
->ip
!= ip
) ||
6184 (dump_info
->time
.tv_sec
!= ip
->i_mtime
.tv_sec
) ||
6185 (dump_info
->time
.tv_usec
!= ip
->i_mtime
.tv_usec
)) {
6186 mutex_exit(&ip
->i_tlock
);
6189 mutex_exit(&ip
->i_tlock
);
6192 * See that the file has room for this write
6194 UFS_GET_ISIZE(&file_size
, ip
);
6196 if (ldbtob(ldbn
+ dblks
) > file_size
)
6200 * Find the physical disk block numbers from the dump
6201 * private data structure directly and write out the data
6202 * in contiguous block lumps
6204 while (dblks
> 0 && !error
) {
6205 lfsbn
= (daddr_t
)lblkno(fs
, ldbtob(ldbn
));
6206 dbn
= fsbtodb(fs
, dump_info
->dblk
[lfsbn
]) + ldbn
% disk_blks
;
6208 ndbs
= disk_blks
- ldbn
% disk_blks
;
6209 while (ndbs
< dblks
&& fsbtodb(fs
, dump_info
->dblk
[lfsbn
+
6210 nfsbs
]) == dbn
+ ndbs
) {
6216 error
= bdev_dump(ip
->i_dev
, addr
, dbn
, ndbs
);
6217 addr
+= ldbtob((offset_t
)ndbs
);
6226 * Prepare the file system before and after the dump operation.
6228 * action = DUMP_ALLOC:
6229 * Preparation before dump, allocate dump private data structure
6230 * to hold all the direct and indirect block info for dump.
6232 * action = DUMP_FREE:
6233 * Clean up after dump, deallocate the dump private data structure.
6235 * action = DUMP_SCAN:
6236 * Scan dump_info for *blkp DEV_BSIZE blocks of contig fs space;
6237 * if found, the starting file-relative DEV_BSIZE lbn is written
6238 * to *bklp; that lbn is intended for use with VOP_DUMP()
6242 ufs_dumpctl(vnode_t
*vp
, int action
, offset_t
*blkp
, caller_context_t
*ct
)
6244 struct inode
*ip
= VTOI(vp
);
6245 ufsvfs_t
*ufsvfsp
= ip
->i_ufsvfs
;
6247 daddr32_t
*dblk
, *storeblk
;
6248 daddr32_t
*nextblk
, *endblk
;
6250 int i
, entry
, entries
;
6254 * check for forced unmount
6256 if (ufsvfsp
== NULL
)
6259 if (action
== DUMP_ALLOC
) {
6261 * alloc and record dump_info
6263 if (dump_info
!= NULL
)
6266 ASSERT(vp
->v_type
== VREG
);
6267 fs
= ufsvfsp
->vfs_fs
;
6269 rw_enter(&ip
->i_contents
, RW_READER
);
6271 if (bmap_has_holes(ip
)) {
6272 rw_exit(&ip
->i_contents
);
6277 * calculate and allocate space needed according to i_size
6279 entries
= (int)lblkno(fs
, blkroundup(fs
, ip
->i_size
));
6280 dump_info
= kmem_alloc(sizeof (struct dump
) +
6281 (entries
- 1) * sizeof (daddr32_t
), KM_NOSLEEP
);
6282 if (dump_info
== NULL
) {
6283 rw_exit(&ip
->i_contents
);
6287 /* Start saving the info */
6288 dump_info
->fsbs
= entries
;
6290 storeblk
= &dump_info
->dblk
[0];
6293 for (entry
= 0; entry
< NDADDR
&& entry
< entries
; entry
++)
6294 *storeblk
++ = ip
->i_db
[entry
];
6296 /* Indirect Blocks */
6297 for (i
= 0; i
< NIADDR
; i
++) {
6300 bp
= UFS_BREAD(ufsvfsp
,
6301 ip
->i_dev
, fsbtodb(fs
, ip
->i_ib
[i
]), fs
->fs_bsize
);
6302 if (bp
->b_flags
& B_ERROR
)
6305 dblk
= bp
->b_un
.b_daddr
;
6306 if ((storeblk
= save_dblks(ip
, ufsvfsp
,
6307 storeblk
, dblk
, i
, entries
)) == NULL
)
6314 kmem_free(dump_info
, sizeof (struct dump
) +
6315 (entries
- 1) * sizeof (daddr32_t
));
6316 rw_exit(&ip
->i_contents
);
6321 /* and time stamp the information */
6322 mutex_enter(&ip
->i_tlock
);
6323 dump_info
->time
= ip
->i_mtime
;
6324 mutex_exit(&ip
->i_tlock
);
6326 rw_exit(&ip
->i_contents
);
6327 } else if (action
== DUMP_FREE
) {
6331 if (dump_info
== NULL
)
6333 entries
= dump_info
->fsbs
- 1;
6334 kmem_free(dump_info
, sizeof (struct dump
) +
6335 entries
* sizeof (daddr32_t
));
6337 } else if (action
== DUMP_SCAN
) {
6341 if (dump_info
== NULL
)
6344 dblk
= dump_info
->dblk
;
6346 endblk
= dblk
+ dump_info
->fsbs
- 1;
6347 fs
= ufsvfsp
->vfs_fs
;
6348 ncontig
= *blkp
>> (fs
->fs_bshift
- DEV_BSHIFT
);
6351 * scan dblk[] entries; contig fs space is found when:
6352 * ((current blkno + frags per block) == next blkno)
6355 while (n
< ncontig
&& dblk
< endblk
) {
6356 if ((*dblk
+ fs
->fs_frag
) == *nextblk
)
6365 * index is where size bytes of contig space begins;
6366 * conversion from index to the file's DEV_BSIZE lbn
6367 * is equivalent to: (index * fs_bsize) / DEV_BSIZE
6370 i
= (dblk
- dump_info
->dblk
) - ncontig
;
6371 *blkp
= i
<< (fs
->fs_bshift
- DEV_BSHIFT
);
6379 * Recursive helper function for ufs_dumpctl(). It follows the indirect file
6380 * system blocks until it reaches the the disk block addresses, which are
6381 * then stored into the given buffer, storeblk.
6384 save_dblks(struct inode
*ip
, struct ufsvfs
*ufsvfsp
, daddr32_t
*storeblk
,
6385 daddr32_t
*dblk
, int level
, int entries
)
6387 struct fs
*fs
= ufsvfsp
->vfs_fs
;
6392 for (i
= 0; i
< NINDIR(fs
); i
++) {
6393 if (storeblk
- dump_info
->dblk
>= entries
)
6395 *storeblk
++ = dblk
[i
];
6399 for (i
= 0; i
< NINDIR(fs
); i
++) {
6400 if (storeblk
- dump_info
->dblk
>= entries
)
6402 bp
= UFS_BREAD(ufsvfsp
,
6403 ip
->i_dev
, fsbtodb(fs
, dblk
[i
]), fs
->fs_bsize
);
6404 if (bp
->b_flags
& B_ERROR
) {
6408 storeblk
= save_dblks(ip
, ufsvfsp
, storeblk
, bp
->b_un
.b_daddr
,
6409 level
- 1, entries
);
6412 if (storeblk
== NULL
)
6420 ufs_getsecattr(struct vnode
*vp
, vsecattr_t
*vsap
, int flag
,
6421 struct cred
*cr
, caller_context_t
*ct
)
6423 struct inode
*ip
= VTOI(vp
);
6424 struct ulockfs
*ulp
;
6425 struct ufsvfs
*ufsvfsp
= ip
->i_ufsvfs
;
6426 ulong_t vsa_mask
= vsap
->vsa_mask
;
6429 vsa_mask
&= (VSA_ACL
| VSA_ACLCNT
| VSA_DFACL
| VSA_DFACLCNT
);
6432 * Only grab locks if needed - they're not needed to check vsa_mask
6433 * or if the mask contains no acl flags.
6435 if (vsa_mask
!= 0) {
6436 if (err
= ufs_lockfs_begin(ufsvfsp
, &ulp
,
6437 ULOCKFS_GETATTR_MASK
))
6440 rw_enter(&ip
->i_contents
, RW_READER
);
6441 err
= ufs_acl_get(ip
, vsap
, flag
, cr
);
6442 rw_exit(&ip
->i_contents
);
6445 ufs_lockfs_end(ulp
);
6452 ufs_setsecattr(struct vnode
*vp
, vsecattr_t
*vsap
, int flag
, struct cred
*cr
,
6453 caller_context_t
*ct
)
6455 struct inode
*ip
= VTOI(vp
);
6456 struct ulockfs
*ulp
= NULL
;
6457 struct ufsvfs
*ufsvfsp
= VTOI(vp
)->i_ufsvfs
;
6458 ulong_t vsa_mask
= vsap
->vsa_mask
;
6465 ASSERT(RW_LOCK_HELD(&ip
->i_rwlock
));
6467 /* Abort now if the request is either empty or invalid. */
6468 vsa_mask
&= (VSA_ACL
| VSA_ACLCNT
| VSA_DFACL
| VSA_DFACLCNT
);
6469 if ((vsa_mask
== 0) ||
6470 ((vsap
->vsa_aclentp
== NULL
) &&
6471 (vsap
->vsa_dfaclentp
== NULL
))) {
6477 * Following convention, if this is a directory then we acquire the
6478 * inode's i_rwlock after starting a UFS logging transaction;
6479 * otherwise, we acquire it beforehand. Since we were called (and
6480 * must therefore return) with the lock held, we will have to drop it,
6481 * and later reacquire it, if operating on a directory.
6483 if (vp
->v_type
== VDIR
) {
6484 rw_exit(&ip
->i_rwlock
);
6487 /* Upgrade the lock if required. */
6488 if (!rw_write_held(&ip
->i_rwlock
)) {
6489 rw_exit(&ip
->i_rwlock
);
6490 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
6495 ASSERT(!(vp
->v_type
== VDIR
&& haverwlock
));
6496 if (err
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_SETATTR_MASK
)) {
6503 * Check that the file system supports this operation. Note that
6504 * ufs_lockfs_begin() will have checked that the file system had
6505 * not been forcibly unmounted.
6507 if (ufsvfsp
->vfs_fs
->fs_ronly
) {
6511 if (ufsvfsp
->vfs_nosetsec
) {
6517 TRANS_BEGIN_ASYNC(ufsvfsp
, TOP_SETSECATTR
,
6518 trans_size
= TOP_SETSECATTR_SIZE(VTOI(vp
)));
6522 if (vp
->v_type
== VDIR
) {
6523 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
6529 /* Do the actual work. */
6530 rw_enter(&ip
->i_contents
, RW_WRITER
);
6532 * Suppress out of inodes messages if we will retry.
6535 ip
->i_flag
|= IQUIET
;
6536 err
= ufs_acl_set(ip
, vsap
, flag
, cr
);
6537 ip
->i_flag
&= ~IQUIET
;
6538 rw_exit(&ip
->i_contents
);
6544 * top_end_async() can eventually call
6545 * top_end_sync(), which can block. We must
6546 * therefore observe the lock-ordering protocol
6549 if (vp
->v_type
== VDIR
) {
6550 rw_exit(&ip
->i_rwlock
);
6553 TRANS_END_ASYNC(ufsvfsp
, TOP_SETSECATTR
, trans_size
);
6555 ufs_lockfs_end(ulp
);
6558 * If no inodes available, try scaring a logically-
6559 * free one out of the delete queue to someplace
6560 * that we can find it.
6562 if ((err
== ENOSPC
) && retry
&& TRANS_ISTRANS(ufsvfsp
)) {
6563 ufs_delete_drain_wait(ufsvfsp
, 1);
6565 if (vp
->v_type
== VDIR
&& haverwlock
) {
6566 rw_exit(&ip
->i_rwlock
);
6572 * If we need to reacquire the lock then it is safe to do so
6573 * as a reader. This is because ufs_rwunlock(), which will be
6574 * called by our caller after we return, does not differentiate
6575 * between shared and exclusive locks.
6578 ASSERT(vp
->v_type
== VDIR
);
6579 rw_enter(&ip
->i_rwlock
, RW_READER
);
6586 * Locate the vnode to be used for an event notification. As this will
6587 * be called prior to the name space change perform basic verification
6588 * that the change will be allowed.
6592 ufs_eventlookup(struct vnode
*dvp
, char *nm
, struct cred
*cr
,
6600 struct ufsvfs
*ufsvfsp
;
6601 struct ulockfs
*ulp
;
6606 if ((namlen
= strlen(nm
)) == 0)
6612 else if ((namlen
== 2) && nm
[1] == '.') {
6618 * Check accessibility and write access of parent directory as we
6619 * only want to post the event if we're able to make a change.
6621 if (error
= ufs_diraccess(ip
, IEXEC
|IWRITE
, cr
))
6624 if (vp
= dnlc_lookup(dvp
, nm
)) {
6625 if (vp
== DNLC_NO_VNODE
) {
6635 * Keep the idle queue from getting too long by idling two
6636 * inodes before attempting to allocate another.
6637 * This operation must be performed before entering lockfs
6640 if (ufs_idle_q
.uq_ne
> ufs_idle_q
.uq_hiwat
)
6641 if ((curthread
->t_flag
& T_DONTBLOCK
) == 0) {
6642 ins
.in_lidles
.value
.ul
+= ufs_lookup_idle_count
;
6643 ufs_idle_some(ufs_lookup_idle_count
);
6646 ufsvfsp
= ip
->i_ufsvfs
;
6649 if (error
= ufs_lockfs_begin(ufsvfsp
, &ulp
, ULOCKFS_LOOKUP_MASK
))
6652 if ((error
= ufs_dirlook(ip
, nm
, &xip
, cr
, 1, 1)) == 0) {
6658 ufs_lockfs_end(ulp
);
6661 if (error
== EAGAIN
)