4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2017 by Delphix. All rights reserved.
27 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/thread.h>
35 #include <sys/t_lock.h>
37 #include <sys/vnode.h>
39 #include <sys/errno.h>
44 #include <sys/debug.h>
46 #include <sys/vmsystm.h>
47 #include <sys/flock.h>
48 #include <sys/share.h>
49 #include <sys/cmn_err.h>
50 #include <sys/tiuser.h>
51 #include <sys/sysmacros.h>
52 #include <sys/callb.h>
54 #include <sys/kstat.h>
55 #include <sys/signal.h>
57 #include <sys/atomic.h>
61 #include <rpc/types.h>
67 #include <nfs/nfs_clnt.h>
68 #include <nfs/nfs_acl.h>
71 #include <nfs/rnode4.h>
72 #include <nfs/nfs4_clnt.h>
79 #include <vm/seg_map.h>
80 #include <vm/seg_vn.h>
85 * Arguments to page-flush thread.
93 int nfs4_client_lease_debug
;
94 int nfs4_sharedfh_debug
;
97 /* temporary: panic if v_type is inconsistent with r_attr va_type */
103 static time_t nfs4_client_resumed
= 0;
104 static callb_id_t cid
= 0;
106 static int nfs4renew(nfs4_server_t
*);
107 static void nfs4_attrcache_va(vnode_t
*, nfs4_ga_res_t
*, int);
108 static void nfs4_pgflush_thread(pgflush_t
*);
110 static boolean_t
nfs4_client_cpr_callb(void *, int);
113 kmutex_t mig_lock
; /* lock protecting mig_list */
114 list_t mig_list
; /* list of NFS v4 mounts in zone */
115 boolean_t mig_destructor_called
;
118 static zone_key_t mi4_list_key
;
121 * Attributes caching:
123 * Attributes are cached in the rnode in struct vattr form.
124 * There is a time associated with the cached attributes (r_time_attr_inval)
125 * which tells whether the attributes are valid. The time is initialized
126 * to the difference between current time and the modify time of the vnode
127 * when new attributes are cached. This allows the attributes for
128 * files that have changed recently to be timed out sooner than for files
129 * that have not changed for a long time. There are minimum and maximum
130 * timeout values that can be set per mount point.
134 * If a cache purge is in progress, wait for it to finish.
136 * The current thread must not be in the middle of an
137 * nfs4_start_op/nfs4_end_op region. Otherwise, there could be a deadlock
138 * between this thread, a recovery thread, and the page flush thread.
141 nfs4_waitfor_purge_complete(vnode_t
*vp
)
147 if ((rp
->r_serial
!= NULL
&& rp
->r_serial
!= curthread
) ||
148 ((rp
->r_flags
& R4PGFLUSH
) && rp
->r_pgflush
!= curthread
)) {
149 mutex_enter(&rp
->r_statelock
);
150 sigintr(&smask
, VTOMI4(vp
)->mi_flags
& MI4_INT
);
151 while ((rp
->r_serial
!= NULL
&& rp
->r_serial
!= curthread
) ||
152 ((rp
->r_flags
& R4PGFLUSH
) &&
153 rp
->r_pgflush
!= curthread
)) {
154 if (!cv_wait_sig(&rp
->r_cv
, &rp
->r_statelock
)) {
156 mutex_exit(&rp
->r_statelock
);
161 mutex_exit(&rp
->r_statelock
);
167 * Validate caches by checking cached attributes. If they have timed out,
168 * then get new attributes from the server. As a side effect, cache
169 * invalidation is done if the attributes have changed.
171 * If the attributes have not timed out and if there is a cache
172 * invalidation being done by some other thread, then wait until that
173 * thread has completed the cache invalidation.
176 nfs4_validate_caches(vnode_t
*vp
, cred_t
*cr
)
181 if (ATTRCACHE4_VALID(vp
)) {
182 error
= nfs4_waitfor_purge_complete(vp
);
188 return (nfs4_getattr_otw(vp
, &gar
, cr
, 0));
192 * Fill in attribute from the cache.
193 * If valid, then return 0 to indicate that no error occurred,
194 * otherwise return 1 to indicate that an error occurred.
197 nfs4_getattr_cache(vnode_t
*vp
, struct vattr
*vap
)
202 mutex_enter(&rp
->r_statelock
);
203 mutex_enter(&rp
->r_statev4_lock
);
204 if (ATTRCACHE4_VALID(vp
)) {
205 mutex_exit(&rp
->r_statev4_lock
);
207 * Cached attributes are valid
210 mutex_exit(&rp
->r_statelock
);
213 mutex_exit(&rp
->r_statev4_lock
);
214 mutex_exit(&rp
->r_statelock
);
220 * If returned error is ESTALE flush all caches. The nfs4_purge_caches()
221 * call is synchronous because all the pages were invalidated by the
222 * nfs4_invalidate_pages() call.
225 nfs4_purge_stale_fh(int errno
, vnode_t
*vp
, cred_t
*cr
)
227 struct rnode4
*rp
= VTOR4(vp
);
229 /* Ensure that the ..._end_op() call has been done */
230 ASSERT(tsd_get(nfs4_tsd_key
) == NULL
);
235 mutex_enter(&rp
->r_statelock
);
236 rp
->r_flags
|= R4STALE
;
239 mutex_exit(&rp
->r_statelock
);
240 if (nfs4_has_pages(vp
))
241 nfs4_invalidate_pages(vp
, 0, cr
);
242 nfs4_purge_caches(vp
, NFS4_PURGE_DNLC
, cr
, FALSE
);
246 * Purge all of the various NFS `data' caches. If "asyncpg" is TRUE, the
247 * page purge is done asynchronously.
250 nfs4_purge_caches(vnode_t
*vp
, int purge_dnlc
, cred_t
*cr
, int asyncpg
)
256 int pgflush
; /* are we the page flush thread? */
259 * Purge the DNLC for any entries which refer to this file.
261 if (vp
->v_count
> 1 &&
262 (vp
->v_type
== VDIR
|| purge_dnlc
== NFS4_PURGE_DNLC
))
266 * Clear any readdir state bits and purge the readlink response cache.
269 mutex_enter(&rp
->r_statelock
);
270 rp
->r_flags
&= ~R4LOOKUP
;
271 contents
= rp
->r_symlink
.contents
;
272 size
= rp
->r_symlink
.size
;
273 rp
->r_symlink
.contents
= NULL
;
275 xattr
= rp
->r_xattr_dir
;
276 rp
->r_xattr_dir
= NULL
;
279 * Purge pathconf cache too.
281 rp
->r_pathconf
.pc4_xattr_valid
= 0;
282 rp
->r_pathconf
.pc4_cache_valid
= 0;
284 pgflush
= (curthread
== rp
->r_pgflush
);
285 mutex_exit(&rp
->r_statelock
);
287 if (contents
!= NULL
) {
289 kmem_free((void *)contents
, size
);
296 * Flush the page cache. If the current thread is the page flush
297 * thread, don't initiate a new page flush. There's no need for
298 * it, and doing it correctly is hard.
300 if (nfs4_has_pages(vp
) && !pgflush
) {
302 (void) nfs4_waitfor_purge_complete(vp
);
303 nfs4_flush_pages(vp
, cr
);
308 * We don't hold r_statelock while creating the
309 * thread, in case the call blocks. So we use a
310 * flag to indicate that a page flush thread is
313 mutex_enter(&rp
->r_statelock
);
314 if (rp
->r_flags
& R4PGFLUSH
) {
315 mutex_exit(&rp
->r_statelock
);
317 rp
->r_flags
|= R4PGFLUSH
;
318 mutex_exit(&rp
->r_statelock
);
320 args
= kmem_alloc(sizeof (pgflush_t
),
326 (void) zthread_create(NULL
, 0,
327 nfs4_pgflush_thread
, args
, 0,
334 * Flush the readdir response cache.
336 nfs4_purge_rddir_cache(vp
);
340 * Invalidate all pages for the given file, after writing back the dirty
345 nfs4_flush_pages(vnode_t
*vp
, cred_t
*cr
)
348 rnode4_t
*rp
= VTOR4(vp
);
350 error
= fop_putpage(vp
, 0, 0, B_INVAL
, cr
, NULL
);
351 if (error
== ENOSPC
|| error
== EDQUOT
) {
352 mutex_enter(&rp
->r_statelock
);
355 mutex_exit(&rp
->r_statelock
);
364 nfs4_pgflush_thread(pgflush_t
*args
)
366 rnode4_t
*rp
= VTOR4(args
->vp
);
368 /* remember which thread we are, so we don't deadlock ourselves */
369 mutex_enter(&rp
->r_statelock
);
370 ASSERT(rp
->r_pgflush
== NULL
);
371 rp
->r_pgflush
= curthread
;
372 mutex_exit(&rp
->r_statelock
);
374 nfs4_flush_pages(args
->vp
, args
->cr
);
376 mutex_enter(&rp
->r_statelock
);
377 rp
->r_pgflush
= NULL
;
378 rp
->r_flags
&= ~R4PGFLUSH
;
379 cv_broadcast(&rp
->r_cv
);
380 mutex_exit(&rp
->r_statelock
);
384 kmem_free(args
, sizeof (pgflush_t
));
389 * Purge the readdir cache of all entries which are not currently
393 nfs4_purge_rddir_cache(vnode_t
*vp
)
399 mutex_enter(&rp
->r_statelock
);
401 rp
->r_flags
&= ~R4LOOKUP
;
402 rp
->r_flags
|= R4READDIRWATTR
;
403 rddir4_cache_purge(rp
);
404 mutex_exit(&rp
->r_statelock
);
408 * Set attributes cache for given vnode using virtual attributes. There is
409 * no cache validation, but if the attributes are deemed to be stale, they
410 * are ignored. This corresponds to nfs3_attrcache().
412 * Set the timeout value on the attribute cache and fill it
413 * with the passed in attributes.
416 nfs4_attrcache_noinval(vnode_t
*vp
, nfs4_ga_res_t
*garp
, hrtime_t t
)
418 rnode4_t
*rp
= VTOR4(vp
);
420 mutex_enter(&rp
->r_statelock
);
421 if (rp
->r_time_attr_saved
<= t
)
422 nfs4_attrcache_va(vp
, garp
, FALSE
);
423 mutex_exit(&rp
->r_statelock
);
427 * Use the passed in virtual attributes to check to see whether the
428 * data and metadata caches are valid, cache the new attributes, and
429 * then do the cache invalidation if required.
431 * The cache validation and caching of the new attributes is done
432 * atomically via the use of the mutex, r_statelock. If required,
433 * the cache invalidation is done atomically w.r.t. the cache
434 * validation and caching of the attributes via the pseudo lock,
437 * This routine is used to do cache validation and attributes caching
438 * for operations with a single set of post operation attributes.
442 nfs4_attr_cache(vnode_t
*vp
, nfs4_ga_res_t
*garp
,
443 hrtime_t t
, cred_t
*cr
, int async
,
447 int mtime_changed
= 0;
448 int ctime_changed
= 0;
450 int was_serial
, set_time_cache_inval
, recov
;
451 vattr_t
*vap
= &garp
->n4g_va
;
452 mntinfo4_t
*mi
= VTOMI4(vp
);
454 boolean_t writemodify_set
= B_FALSE
;
455 boolean_t cachepurge_set
= B_FALSE
;
457 ASSERT(mi
->mi_vfsp
->vfs_dev
== garp
->n4g_va
.va_fsid
);
459 /* Is curthread the recovery thread? */
460 mutex_enter(&mi
->mi_lock
);
461 recov
= (VTOMI4(vp
)->mi_recovthread
== curthread
);
462 mutex_exit(&mi
->mi_lock
);
465 mutex_enter(&rp
->r_statelock
);
466 was_serial
= (rp
->r_serial
== curthread
);
467 if (rp
->r_serial
&& !was_serial
) {
468 klwp_t
*lwp
= ttolwp(curthread
);
471 * If we're the recovery thread, then purge current attrs
472 * and bail out to avoid potential deadlock between another
473 * thread caching attrs (r_serial thread), recov thread,
474 * and an async writer thread.
477 PURGE_ATTRCACHE4_LOCKED(rp
);
478 mutex_exit(&rp
->r_statelock
);
484 while (rp
->r_serial
!= NULL
) {
485 if (!cv_wait_sig(&rp
->r_cv
, &rp
->r_statelock
)) {
486 mutex_exit(&rp
->r_statelock
);
497 * If there is a page flush thread, the current thread needs to
498 * bail out, to prevent a possible deadlock between the current
499 * thread (which might be in a start_op/end_op region), the
500 * recovery thread, and the page flush thread. Expire the
501 * attribute cache, so that any attributes the current thread was
502 * going to set are not lost.
504 if ((rp
->r_flags
& R4PGFLUSH
) && rp
->r_pgflush
!= curthread
) {
505 PURGE_ATTRCACHE4_LOCKED(rp
);
506 mutex_exit(&rp
->r_statelock
);
510 if (rp
->r_time_attr_saved
> t
) {
512 * Attributes have been cached since these attributes were
513 * probably made. If there is an inconsistency in what is
514 * cached, mark them invalid. If not, don't act on them.
516 if (!CACHE4_VALID(rp
, vap
->va_mtime
, vap
->va_size
))
517 PURGE_ATTRCACHE4_LOCKED(rp
);
518 mutex_exit(&rp
->r_statelock
);
521 set_time_cache_inval
= 0;
524 * Only directory modifying callers pass non-NULL cinfo.
526 ASSERT(vp
->v_type
== VDIR
);
528 * If the cache timeout either doesn't exist or hasn't expired,
529 * and dir didn't changed on server before dirmod op
530 * and dir didn't change after dirmod op but before getattr
531 * then there's a chance that the client's cached data for
532 * this object is current (not stale). No immediate cache
536 if ((! rp
->r_time_cache_inval
|| t
< rp
->r_time_cache_inval
) &&
537 cinfo
->before
== rp
->r_change
&&
538 (garp
->n4g_change_valid
&&
539 cinfo
->after
== garp
->n4g_change
)) {
542 * If atomic isn't set, then the before/after info
543 * cannot be blindly trusted. For this case, we tell
544 * nfs4_attrcache_va to cache the attrs but also
545 * establish an absolute maximum cache timeout. When
546 * the timeout is reached, caches will be flushed.
549 set_time_cache_inval
= 1;
553 * We're not sure exactly what changed, but we know
554 * what to do. flush all caches for dir. remove the
557 * a) timeout expired. flush all caches.
558 * b) r_change != cinfo.before. flush all caches.
559 * c) r_change == cinfo.before, but cinfo.after !=
560 * post-op getattr(change). flush all caches.
561 * d) post-op getattr(change) not provided by server.
566 rp
->r_time_cache_inval
= 0;
570 * Write thread after writing data to file on remote server,
571 * will always set R4WRITEMODIFIED to indicate that file on
572 * remote server was modified with a WRITE operation and would
573 * have marked attribute cache as timed out. If R4WRITEMODIFIED
574 * is set, then do not check for mtime and ctime change.
576 if (!(rp
->r_flags
& R4WRITEMODIFIED
)) {
577 if (!CACHE4_VALID(rp
, vap
->va_mtime
, vap
->va_size
))
580 if (rp
->r_attr
.va_ctime
.tv_sec
!=
581 vap
->va_ctime
.tv_sec
||
582 rp
->r_attr
.va_ctime
.tv_nsec
!=
583 vap
->va_ctime
.tv_nsec
)
587 * If the change attribute was not provided by server
588 * or it differs, then flush all caches.
590 if (!garp
->n4g_change_valid
||
591 rp
->r_change
!= garp
->n4g_change
) {
596 writemodify_set
= B_TRUE
;
600 preattr_rsize
= rp
->r_size
;
602 nfs4_attrcache_va(vp
, garp
, set_time_cache_inval
);
605 * If we have updated filesize in nfs4_attrcache_va, as soon as we
606 * drop statelock we will be in transition of purging all
607 * our caches and updating them. It is possible for another
608 * thread to pick this new file size and read in zeroed data.
609 * stall other threads till cache purge is complete.
611 if ((!cinfo
) && (rp
->r_size
!= preattr_rsize
)) {
613 * If R4WRITEMODIFIED was set and we have updated the file
614 * size, Server's returned file size need not necessarily
615 * be because of this Client's WRITE. We need to purge
621 if (mtime_changed
&& !(rp
->r_flags
& R4INCACHEPURGE
)) {
622 rp
->r_flags
|= R4INCACHEPURGE
;
623 cachepurge_set
= B_TRUE
;
627 if (!mtime_changed
&& !ctime_changed
) {
628 mutex_exit(&rp
->r_statelock
);
632 rp
->r_serial
= curthread
;
634 mutex_exit(&rp
->r_statelock
);
637 * If we're the recov thread, then force async nfs4_purge_caches
638 * to avoid potential deadlock.
641 nfs4_purge_caches(vp
, NFS4_NOPURGE_DNLC
, cr
, recov
? 1 : async
);
643 if ((rp
->r_flags
& R4INCACHEPURGE
) && cachepurge_set
) {
644 mutex_enter(&rp
->r_statelock
);
645 rp
->r_flags
&= ~R4INCACHEPURGE
;
646 cv_broadcast(&rp
->r_cv
);
647 mutex_exit(&rp
->r_statelock
);
648 cachepurge_set
= B_FALSE
;
652 (void) nfs4_access_purge_rp(rp
);
653 if (rp
->r_secattr
!= NULL
) {
654 mutex_enter(&rp
->r_statelock
);
656 rp
->r_secattr
= NULL
;
657 mutex_exit(&rp
->r_statelock
);
659 nfs4_acl_free_cache(vsp
);
664 mutex_enter(&rp
->r_statelock
);
666 cv_broadcast(&rp
->r_cv
);
667 mutex_exit(&rp
->r_statelock
);
672 * Set attributes cache for given vnode using virtual attributes.
674 * Set the timeout value on the attribute cache and fill it
675 * with the passed in attributes.
677 * The caller must be holding r_statelock.
680 nfs4_attrcache_va(vnode_t
*vp
, nfs4_ga_res_t
*garp
, int set_cache_timeout
)
686 vattr_t
*vap
= &garp
->n4g_va
;
690 ASSERT(MUTEX_HELD(&rp
->r_statelock
));
691 ASSERT(vap
->va_mask
== AT_ALL
);
693 /* Switch to master before checking v_flag */
694 if (IS_SHADOW(vp
, rp
))
702 * Only establish a new cache timeout (if requested). Never
703 * extend a timeout. Never clear a timeout. Clearing a timeout
704 * is done by nfs4_update_dircaches (ancestor in our call chain)
706 if (set_cache_timeout
&& ! rp
->r_time_cache_inval
)
707 rp
->r_time_cache_inval
= now
+ mi
->mi_acdirmax
;
710 * Delta is the number of nanoseconds that we will
711 * cache the attributes of the file. It is based on
712 * the number of nanoseconds since the last time that
713 * we detected a change. The assumption is that files
714 * that changed recently are likely to change again.
715 * There is a minimum and a maximum for regular files
716 * and for directories which is enforced though.
718 * Using the time since last change was detected
719 * eliminates direct comparison or calculation
720 * using mixed client and server times. NFS does
721 * not make any assumptions regarding the client
722 * and server clocks being synchronized.
724 if (vap
->va_mtime
.tv_sec
!= rp
->r_attr
.va_mtime
.tv_sec
||
725 vap
->va_mtime
.tv_nsec
!= rp
->r_attr
.va_mtime
.tv_nsec
||
726 vap
->va_size
!= rp
->r_attr
.va_size
) {
727 rp
->r_time_attr_saved
= now
;
730 if ((mi
->mi_flags
& MI4_NOAC
) || (vp
->v_flag
& VNOCACHE
))
733 delta
= now
- rp
->r_time_attr_saved
;
734 if (vp
->v_type
== VDIR
) {
735 if (delta
< mi
->mi_acdirmin
)
736 delta
= mi
->mi_acdirmin
;
737 else if (delta
> mi
->mi_acdirmax
)
738 delta
= mi
->mi_acdirmax
;
740 if (delta
< mi
->mi_acregmin
)
741 delta
= mi
->mi_acregmin
;
742 else if (delta
> mi
->mi_acregmax
)
743 delta
= mi
->mi_acregmax
;
746 rp
->r_time_attr_inval
= now
+ delta
;
749 if (garp
->n4g_change_valid
)
750 rp
->r_change
= garp
->n4g_change
;
753 * The attributes that were returned may be valid and can
754 * be used, but they may not be allowed to be cached.
755 * Reset the timers to cause immediate invalidation and
756 * clear r_change so no VERIFY operations will suceed
758 if (garp
->n4g_attrwhy
== NFS4_GETATTR_NOCACHE_OK
) {
759 rp
->r_time_attr_inval
= now
;
760 rp
->r_time_attr_saved
= now
;
765 * If mounted_on_fileid returned AND the object is a stub,
766 * then set object's va_nodeid to the mounted over fid
767 * returned by server.
769 * If mounted_on_fileid not provided/supported, then
770 * just set it to 0 for now. Eventually it would be
771 * better to set it to a hashed version of FH. This
772 * would probably be good enough to provide a unique
773 * fid/d_ino within a dir.
775 * We don't need to carry mounted_on_fileid in the
776 * rnode as long as the client never requests fileid
777 * without also requesting mounted_on_fileid. For
780 if (garp
->n4g_mon_fid_valid
) {
781 rp
->r_mntd_fid
= garp
->n4g_mon_fid
;
784 rp
->r_attr
.va_nodeid
= rp
->r_mntd_fid
;
788 * Check to see if there are valid pathconf bits to
789 * cache in the rnode.
791 if (garp
->n4g_ext_res
) {
792 if (garp
->n4g_ext_res
->n4g_pc4
.pc4_cache_valid
) {
793 rp
->r_pathconf
= garp
->n4g_ext_res
->n4g_pc4
;
795 if (garp
->n4g_ext_res
->n4g_pc4
.pc4_xattr_valid
) {
796 rp
->r_pathconf
.pc4_xattr_valid
= TRUE
;
797 rp
->r_pathconf
.pc4_xattr_exists
=
798 garp
->n4g_ext_res
->n4g_pc4
.pc4_xattr_exists
;
803 * Update the size of the file if there is no cached data or if
804 * the cached data is clean and there is no data being written
807 if (rp
->r_size
!= vap
->va_size
&&
808 (!vn_has_cached_data(vp
) ||
809 (!(rp
->r_flags
& R4DIRTY
) && rp
->r_count
== 0))) {
810 rp
->r_size
= vap
->va_size
;
812 nfs_setswaplike(vp
, vap
);
813 rp
->r_flags
&= ~R4WRITEMODIFIED
;
817 * Get attributes over-the-wire and update attributes cache
818 * if no error occurred in the over-the-wire operation.
819 * Return 0 if successful, otherwise error.
822 nfs4_getattr_otw(vnode_t
*vp
, nfs4_ga_res_t
*garp
, cred_t
*cr
, int get_acl
)
824 mntinfo4_t
*mi
= VTOMI4(vp
);
826 nfs4_recov_state_t recov_state
;
827 nfs4_error_t e
= { 0, NFS4_OK
, RPC_SUCCESS
};
829 recov_state
.rs_flags
= 0;
830 recov_state
.rs_num_retry_despite_err
= 0;
832 /* Save the original mount point security flavor */
833 (void) save_mnt_secinfo(mi
->mi_curr_serv
);
837 if ((e
.error
= nfs4_start_fop(mi
, vp
, NULL
, OH_GETATTR
,
838 &recov_state
, NULL
))) {
839 (void) check_mnt_secinfo(mi
->mi_curr_serv
, vp
);
845 nfs4_getattr_otw_norecovery(vp
, garp
, &e
, cr
, get_acl
);
847 if (nfs4_needs_recovery(&e
, FALSE
, vp
->v_vfsp
)) {
848 if (nfs4_start_recovery(&e
, VTOMI4(vp
), vp
, NULL
, NULL
,
849 NULL
, OP_GETATTR
, NULL
, NULL
, NULL
) == FALSE
) {
850 nfs4_end_fop(VTOMI4(vp
), vp
, NULL
, OH_GETATTR
,
856 nfs4_end_fop(VTOMI4(vp
), vp
, NULL
, OH_GETATTR
, &recov_state
, 0);
859 if (e
.stat
== NFS4_OK
) {
860 nfs4_attr_cache(vp
, garp
, t
, cr
, FALSE
, NULL
);
862 e
.error
= geterrno4(e
.stat
);
864 nfs4_purge_stale_fh(e
.error
, vp
, cr
);
869 * If getattr a node that is a stub for a crossed
870 * mount point, keep the original secinfo flavor for
871 * the current file system, not the crossed one.
873 (void) check_mnt_secinfo(mi
->mi_curr_serv
, vp
);
879 * Generate a compound to get attributes over-the-wire.
882 nfs4_getattr_otw_norecovery(vnode_t
*vp
, nfs4_ga_res_t
*garp
,
883 nfs4_error_t
*ep
, cred_t
*cr
, int get_acl
)
885 COMPOUND4args_clnt args
;
886 COMPOUND4res_clnt res
;
888 rnode4_t
*rp
= VTOR4(vp
);
891 args
.ctag
= TAG_GETATTR
;
897 argop
[0].argop
= OP_CPUTFH
;
898 argop
[0].nfs_argop4_u
.opcputfh
.sfh
= rp
->r_fh
;
902 * Unlike nfs version 2 and 3, where getattr returns all the
903 * attributes, nfs version 4 returns only the ones explicitly
904 * asked for. This creates problems, as some system functions
905 * (e.g. cache check) require certain attributes and if the
906 * cached node lacks some attributes such as uid/gid, it can
907 * affect system utilities (e.g. "ls") that rely on the information
908 * to be there. This can lead to anything from system crashes to
909 * corrupted information processed by user apps.
910 * So to ensure that all bases are covered, request at least
911 * the AT_ALL attribute mask.
913 argop
[1].argop
= OP_GETATTR
;
914 argop
[1].nfs_argop4_u
.opgetattr
.attr_request
= NFS4_VATTR_MASK
;
916 argop
[1].nfs_argop4_u
.opgetattr
.attr_request
|= FATTR4_ACL_MASK
;
917 argop
[1].nfs_argop4_u
.opgetattr
.mi
= VTOMI4(vp
);
921 rfs4call(VTOMI4(vp
), &args
, &res
, cr
, &doqueue
, 0, ep
);
926 if (res
.status
!= NFS4_OK
) {
927 xdr_free(xdr_COMPOUND4res_clnt
, (caddr_t
)&res
);
931 *garp
= res
.array
[1].nfs_resop4_u
.opgetattr
.ga_res
;
933 xdr_free(xdr_COMPOUND4res_clnt
, (caddr_t
)&res
);
937 * Return either cached or remote attributes. If get remote attr
938 * use them to check and invalidate caches, then cache the new attributes.
941 nfs4getattr(vnode_t
*vp
, vattr_t
*vap
, cred_t
*cr
)
947 ASSERT(nfs4_consistent_type(vp
));
950 * If we've got cached attributes, we're done, otherwise go
951 * to the server to get attributes, which will update the cache
952 * in the process. Either way, use the cached attributes for
953 * the caller's vattr_t.
955 * Note that we ignore the gar set by the OTW call: the attr caching
956 * code may make adjustments when storing to the rnode, and we want
957 * to see those changes here.
961 mutex_enter(&rp
->r_statelock
);
962 if (!ATTRCACHE4_VALID(vp
)) {
963 mutex_exit(&rp
->r_statelock
);
964 error
= nfs4_getattr_otw(vp
, &gar
, cr
, 0);
965 mutex_enter(&rp
->r_statelock
);
971 /* Return the client's view of file size */
972 vap
->va_size
= rp
->r_size
;
974 mutex_exit(&rp
->r_statelock
);
976 ASSERT(nfs4_consistent_type(vp
));
982 nfs4_attr_otw(vnode_t
*vp
, nfs4_tag_type_t tag_type
,
983 nfs4_ga_res_t
*garp
, bitmap4 reqbitmap
, cred_t
*cr
)
985 COMPOUND4args_clnt args
;
986 COMPOUND4res_clnt res
;
989 mntinfo4_t
*mi
= VTOMI4(vp
);
990 bool_t needrecov
= FALSE
;
991 nfs4_recov_state_t recov_state
;
992 nfs4_error_t e
= { 0, NFS4_OK
, RPC_SUCCESS
};
993 nfs4_ga_ext_res_t
*gerp
;
995 recov_state
.rs_flags
= 0;
996 recov_state
.rs_num_retry_despite_err
= 0;
999 args
.ctag
= tag_type
;
1004 e
.error
= nfs4_start_fop(mi
, vp
, NULL
, OH_GETATTR
, &recov_state
, NULL
);
1009 argop
[0].argop
= OP_CPUTFH
;
1010 argop
[0].nfs_argop4_u
.opcputfh
.sfh
= VTOR4(vp
)->r_fh
;
1013 argop
[1].argop
= OP_GETATTR
;
1014 argop
[1].nfs_argop4_u
.opgetattr
.attr_request
= reqbitmap
;
1015 argop
[1].nfs_argop4_u
.opgetattr
.mi
= mi
;
1019 NFS4_DEBUG(nfs4_client_call_debug
, (CE_NOTE
,
1020 "nfs4_attr_otw: %s call, rp %s", needrecov
? "recov" : "first",
1021 rnode4info(VTOR4(vp
))));
1023 rfs4call(mi
, &args
, &res
, cr
, &doqueue
, 0, &e
);
1025 needrecov
= nfs4_needs_recovery(&e
, FALSE
, vp
->v_vfsp
);
1026 if (!needrecov
&& e
.error
) {
1027 nfs4_end_fop(VTOMI4(vp
), vp
, NULL
, OH_GETATTR
, &recov_state
,
1035 NFS4_DEBUG(nfs4_client_recov_debug
, (CE_NOTE
,
1036 "nfs4_attr_otw: initiating recovery\n"));
1038 abort
= nfs4_start_recovery(&e
, VTOMI4(vp
), vp
, NULL
, NULL
,
1039 NULL
, OP_GETATTR
, NULL
, NULL
, NULL
);
1040 nfs4_end_fop(VTOMI4(vp
), vp
, NULL
, OH_GETATTR
, &recov_state
,
1043 xdr_free(xdr_COMPOUND4res_clnt
, (caddr_t
)&res
);
1044 e
.error
= geterrno4(res
.status
);
1052 e
.error
= geterrno4(res
.status
);
1054 gerp
= garp
->n4g_ext_res
;
1055 bcopy(&res
.array
[1].nfs_resop4_u
.opgetattr
.ga_res
,
1056 garp
, sizeof (nfs4_ga_res_t
));
1057 garp
->n4g_ext_res
= gerp
;
1058 if (garp
->n4g_ext_res
&&
1059 res
.array
[1].nfs_resop4_u
.opgetattr
.ga_res
.n4g_ext_res
)
1060 bcopy(res
.array
[1].nfs_resop4_u
.opgetattr
.
1062 garp
->n4g_ext_res
, sizeof (nfs4_ga_ext_res_t
));
1064 xdr_free(xdr_COMPOUND4res_clnt
, (caddr_t
)&res
);
1065 nfs4_end_fop(VTOMI4(vp
), vp
, NULL
, OH_GETATTR
, &recov_state
,
1071 * Asynchronous I/O parameters. nfs_async_threads is the high-water mark
1072 * for the demand-based allocation of async threads per-mount. The
1073 * nfs_async_timeout is the amount of time a thread will live after it
1074 * becomes idle, unless new I/O requests are received before the thread
1075 * dies. See nfs4_async_putpage and nfs4_async_start.
1078 static void nfs4_async_start(struct vfs
*);
1079 static void nfs4_async_pgops_start(struct vfs
*);
1080 static void nfs4_async_common_start(struct vfs
*, int);
1083 free_async_args4(struct nfs4_async_reqs
*args
)
1087 if (args
->a_io
!= NFS4_INACTIVE
) {
1088 rp
= VTOR4(args
->a_vp
);
1089 mutex_enter(&rp
->r_statelock
);
1091 if (args
->a_io
== NFS4_PUTAPAGE
||
1092 args
->a_io
== NFS4_PAGEIO
)
1094 cv_broadcast(&rp
->r_cv
);
1095 mutex_exit(&rp
->r_statelock
);
1096 VN_RELE(args
->a_vp
);
1098 crfree(args
->a_cred
);
1099 kmem_free(args
, sizeof (*args
));
1103 * Cross-zone thread creation and NFS access is disallowed, yet fsflush() and
1104 * pageout(), running in the global zone, have legitimate reasons to do
1105 * fop_putpage(B_ASYNC) on other zones' NFS mounts. We avoid the problem by
1106 * use of a a per-mount "asynchronous requests manager thread" which is
1107 * signaled by the various asynchronous work routines when there is
1108 * asynchronous work to be done. It is responsible for creating new
1109 * worker threads if necessary, and notifying existing worker threads
1110 * that there is work to be done.
1112 * In other words, it will "take the specifications from the customers and
1113 * give them to the engineers."
1115 * Worker threads die off of their own accord if they are no longer
1118 * This thread is killed when the zone is going away or the filesystem
1119 * is being unmounted.
1122 nfs4_async_manager(vfs_t
*vfsp
)
1124 callb_cpr_t cprinfo
;
1130 CALLB_CPR_INIT(&cprinfo
, &mi
->mi_async_lock
, callb_generic_cpr
,
1131 "nfs4_async_manager");
1133 mutex_enter(&mi
->mi_async_lock
);
1135 * We want to stash the max number of threads that this mount was
1136 * allowed so we can use it later when the variable is set to zero as
1137 * part of the zone/mount going away.
1139 * We want to be able to create at least one thread to handle
1140 * asynchronous inactive calls.
1142 max_threads
= MAX(mi
->mi_max_threads
, 1);
1144 * We don't want to wait for mi_max_threads to go to zero, since that
1145 * happens as part of a failed unmount, but this thread should only
1146 * exit when the mount is really going away.
1148 * Once MI4_ASYNC_MGR_STOP is set, no more async operations will be
1149 * attempted: the various _async_*() functions know to do things
1150 * inline if mi_max_threads == 0. Henceforth we just drain out the
1151 * outstanding requests.
1153 * Note that we still create zthreads even if we notice the zone is
1154 * shutting down (MI4_ASYNC_MGR_STOP is set); this may cause the zone
1155 * shutdown sequence to take slightly longer in some cases, but
1156 * doesn't violate the protocol, as all threads will exit as soon as
1157 * they're done processing the remaining requests.
1160 while (mi
->mi_async_req_count
> 0) {
1162 * Paranoia: If the mount started out having
1163 * (mi->mi_max_threads == 0), and the value was
1164 * later changed (via a debugger or somesuch),
1165 * we could be confused since we will think we
1166 * can't create any threads, and the calling
1167 * code (which looks at the current value of
1168 * mi->mi_max_threads, now non-zero) thinks we
1171 * So, because we're paranoid, we create threads
1172 * up to the maximum of the original and the
1173 * current value. This means that future
1174 * (debugger-induced) alterations of
1175 * mi->mi_max_threads are ignored for our
1176 * purposes, but who told them they could change
1177 * random values on a live kernel anyhow?
1179 if (mi
->mi_threads
[NFS4_ASYNC_QUEUE
] <
1180 MAX(mi
->mi_max_threads
, max_threads
)) {
1181 mi
->mi_threads
[NFS4_ASYNC_QUEUE
]++;
1182 mutex_exit(&mi
->mi_async_lock
);
1184 VFS_HOLD(vfsp
); /* hold for new thread */
1185 (void) zthread_create(NULL
, 0, nfs4_async_start
,
1186 vfsp
, 0, minclsyspri
);
1187 mutex_enter(&mi
->mi_async_lock
);
1188 } else if (mi
->mi_threads
[NFS4_ASYNC_PGOPS_QUEUE
] <
1189 NUM_ASYNC_PGOPS_THREADS
) {
1190 mi
->mi_threads
[NFS4_ASYNC_PGOPS_QUEUE
]++;
1191 mutex_exit(&mi
->mi_async_lock
);
1193 VFS_HOLD(vfsp
); /* hold for new thread */
1194 (void) zthread_create(NULL
, 0,
1195 nfs4_async_pgops_start
, vfsp
, 0,
1197 mutex_enter(&mi
->mi_async_lock
);
1199 NFS4_WAKE_ASYNC_WORKER(mi
->mi_async_work_cv
);
1200 ASSERT(mi
->mi_async_req_count
!= 0);
1201 mi
->mi_async_req_count
--;
1204 mutex_enter(&mi
->mi_lock
);
1205 if (mi
->mi_flags
& MI4_ASYNC_MGR_STOP
) {
1206 mutex_exit(&mi
->mi_lock
);
1209 mutex_exit(&mi
->mi_lock
);
1211 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
1212 cv_wait(&mi
->mi_async_reqs_cv
, &mi
->mi_async_lock
);
1213 CALLB_CPR_SAFE_END(&cprinfo
, &mi
->mi_async_lock
);
1216 NFS4_DEBUG(nfs4_client_zone_debug
, (CE_NOTE
,
1217 "nfs4_async_manager exiting for vfs %p\n", (void *)mi
->mi_vfsp
));
1219 * Let everyone know we're done.
1221 mi
->mi_manager_thread
= NULL
;
1223 * Wake up the inactive thread.
1225 cv_broadcast(&mi
->mi_inact_req_cv
);
1227 * Wake up anyone sitting in nfs4_async_manager_stop()
1229 cv_broadcast(&mi
->mi_async_cv
);
1231 * There is no explicit call to mutex_exit(&mi->mi_async_lock)
1232 * since CALLB_CPR_EXIT is actually responsible for releasing
1235 CALLB_CPR_EXIT(&cprinfo
);
1236 VFS_RELE(vfsp
); /* release thread's hold */
1242 * Signal (and wait for) the async manager thread to clean up and go away.
1245 nfs4_async_manager_stop(vfs_t
*vfsp
)
1247 mntinfo4_t
*mi
= VFTOMI4(vfsp
);
1249 mutex_enter(&mi
->mi_async_lock
);
1250 mutex_enter(&mi
->mi_lock
);
1251 mi
->mi_flags
|= MI4_ASYNC_MGR_STOP
;
1252 mutex_exit(&mi
->mi_lock
);
1253 cv_broadcast(&mi
->mi_async_reqs_cv
);
1255 * Wait for the async manager thread to die.
1257 while (mi
->mi_manager_thread
!= NULL
)
1258 cv_wait(&mi
->mi_async_cv
, &mi
->mi_async_lock
);
1259 mutex_exit(&mi
->mi_async_lock
);
1263 nfs4_async_readahead(vnode_t
*vp
, uoff_t blkoff
, caddr_t addr
,
1264 struct seg
*seg
, cred_t
*cr
, void (*readahead
)(vnode_t
*,
1265 uoff_t
, caddr_t
, struct seg
*, cred_t
*))
1269 struct nfs4_async_reqs
*args
;
1272 ASSERT(rp
->r_freef
== NULL
);
1277 * If addr falls in a different segment, don't bother doing readahead.
1279 if (addr
>= seg
->s_base
+ seg
->s_size
)
1283 * If we can't allocate a request structure, punt on the readahead.
1285 if ((args
= kmem_alloc(sizeof (*args
), KM_NOSLEEP
)) == NULL
)
1289 * If a lock operation is pending, don't initiate any new
1290 * readaheads. Otherwise, bump r_count to indicate the new
1293 if (!nfs_rw_tryenter(&rp
->r_lkserlock
, RW_READER
)) {
1294 kmem_free(args
, sizeof (*args
));
1297 mutex_enter(&rp
->r_statelock
);
1299 mutex_exit(&rp
->r_statelock
);
1300 nfs_rw_exit(&rp
->r_lkserlock
);
1302 args
->a_next
= NULL
;
1304 args
->a_queuer
= curthread
;
1311 args
->a_io
= NFS4_READ_AHEAD
;
1312 args
->a_nfs4_readahead
= readahead
;
1313 args
->a_nfs4_blkoff
= blkoff
;
1314 args
->a_nfs4_seg
= seg
;
1315 args
->a_nfs4_addr
= addr
;
1317 mutex_enter(&mi
->mi_async_lock
);
1320 * If asyncio has been disabled, don't bother readahead.
1322 if (mi
->mi_max_threads
== 0) {
1323 mutex_exit(&mi
->mi_async_lock
);
1328 * Link request structure into the async list and
1329 * wakeup async thread to do the i/o.
1331 if (mi
->mi_async_reqs
[NFS4_READ_AHEAD
] == NULL
) {
1332 mi
->mi_async_reqs
[NFS4_READ_AHEAD
] = args
;
1333 mi
->mi_async_tail
[NFS4_READ_AHEAD
] = args
;
1335 mi
->mi_async_tail
[NFS4_READ_AHEAD
]->a_next
= args
;
1336 mi
->mi_async_tail
[NFS4_READ_AHEAD
] = args
;
1339 if (mi
->mi_io_kstats
) {
1340 mutex_enter(&mi
->mi_lock
);
1341 kstat_waitq_enter(KSTAT_IO_PTR(mi
->mi_io_kstats
));
1342 mutex_exit(&mi
->mi_lock
);
1345 mi
->mi_async_req_count
++;
1346 ASSERT(mi
->mi_async_req_count
!= 0);
1347 cv_signal(&mi
->mi_async_reqs_cv
);
1348 mutex_exit(&mi
->mi_async_lock
);
1352 mutex_enter(&rp
->r_statelock
);
1354 cv_broadcast(&rp
->r_cv
);
1355 mutex_exit(&rp
->r_statelock
);
1358 kmem_free(args
, sizeof (*args
));
1363 nfs4_async_start(struct vfs
*vfsp
)
1365 nfs4_async_common_start(vfsp
, NFS4_ASYNC_QUEUE
);
1369 nfs4_async_pgops_start(struct vfs
*vfsp
)
1371 nfs4_async_common_start(vfsp
, NFS4_ASYNC_PGOPS_QUEUE
);
1375 * The async queues for each mounted file system are arranged as a
1376 * set of queues, one for each async i/o type. Requests are taken
1377 * from the queues in a round-robin fashion. A number of consecutive
1378 * requests are taken from each queue before moving on to the next
1379 * queue. This functionality may allow the NFS Version 2 server to do
1380 * write clustering, even if the client is mixing writes and reads
1381 * because it will take multiple write requests from the queue
1382 * before processing any of the other async i/o types.
1384 * XXX The nfs4_async_common_start thread is unsafe in the light of the present
1385 * model defined by cpr to suspend the system. Specifically over the
1386 * wire calls are cpr-unsafe. The thread should be reevaluated in
1387 * case of future updates to the cpr model.
1390 nfs4_async_common_start(struct vfs
*vfsp
, int async_queue
)
1392 struct nfs4_async_reqs
*args
;
1393 mntinfo4_t
*mi
= VFTOMI4(vfsp
);
1394 clock_t time_left
= 1;
1395 callb_cpr_t cprinfo
;
1397 extern int nfs_async_timeout
;
1399 kcondvar_t
*async_work_cv
;
1401 if (async_queue
== NFS4_ASYNC_QUEUE
) {
1402 async_types
= NFS4_ASYNC_TYPES
;
1403 async_work_cv
= &mi
->mi_async_work_cv
[NFS4_ASYNC_QUEUE
];
1405 async_types
= NFS4_ASYNC_PGOPS_TYPES
;
1406 async_work_cv
= &mi
->mi_async_work_cv
[NFS4_ASYNC_PGOPS_QUEUE
];
1410 * Dynamic initialization of nfs_async_timeout to allow nfs to be
1411 * built in an implementation independent manner.
1413 if (nfs_async_timeout
== -1)
1414 nfs_async_timeout
= NFS_ASYNC_TIMEOUT
;
1416 CALLB_CPR_INIT(&cprinfo
, &mi
->mi_async_lock
, callb_generic_cpr
, "nas");
1418 mutex_enter(&mi
->mi_async_lock
);
1421 * Find the next queue containing an entry. We start
1422 * at the current queue pointer and then round robin
1423 * through all of them until we either find a non-empty
1424 * queue or have looked through all of them.
1426 for (i
= 0; i
< async_types
; i
++) {
1427 args
= *mi
->mi_async_curr
[async_queue
];
1430 mi
->mi_async_curr
[async_queue
]++;
1431 if (mi
->mi_async_curr
[async_queue
] ==
1432 &mi
->mi_async_reqs
[async_types
]) {
1433 mi
->mi_async_curr
[async_queue
] =
1434 &mi
->mi_async_reqs
[0];
1438 * If we didn't find a entry, then block until woken up
1439 * again and then look through the queues again.
1443 * Exiting is considered to be safe for CPR as well
1445 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
1448 * Wakeup thread waiting to unmount the file
1449 * system only if all async threads are inactive.
1451 * If we've timed-out and there's nothing to do,
1452 * then get rid of this thread.
1454 if (mi
->mi_max_threads
== 0 || time_left
<= 0) {
1455 --mi
->mi_threads
[async_queue
];
1457 if (mi
->mi_threads
[NFS4_ASYNC_QUEUE
] == 0 &&
1458 mi
->mi_threads
[NFS4_ASYNC_PGOPS_QUEUE
] == 0)
1459 cv_signal(&mi
->mi_async_cv
);
1460 CALLB_CPR_EXIT(&cprinfo
);
1461 VFS_RELE(vfsp
); /* release thread's hold */
1466 time_left
= cv_reltimedwait(async_work_cv
,
1467 &mi
->mi_async_lock
, nfs_async_timeout
,
1470 CALLB_CPR_SAFE_END(&cprinfo
, &mi
->mi_async_lock
);
1478 * Remove the request from the async queue and then
1479 * update the current async request queue pointer. If
1480 * the current queue is empty or we have removed enough
1481 * consecutive entries from it, then reset the counter
1482 * for this queue and then move the current pointer to
1485 *mi
->mi_async_curr
[async_queue
] = args
->a_next
;
1486 if (*mi
->mi_async_curr
[async_queue
] == NULL
||
1487 --mi
->mi_async_clusters
[args
->a_io
] == 0) {
1488 mi
->mi_async_clusters
[args
->a_io
] =
1489 mi
->mi_async_init_clusters
;
1490 mi
->mi_async_curr
[async_queue
]++;
1491 if (mi
->mi_async_curr
[async_queue
] ==
1492 &mi
->mi_async_reqs
[async_types
]) {
1493 mi
->mi_async_curr
[async_queue
] =
1494 &mi
->mi_async_reqs
[0];
1498 if (args
->a_io
!= NFS4_INACTIVE
&& mi
->mi_io_kstats
) {
1499 mutex_enter(&mi
->mi_lock
);
1500 kstat_waitq_exit(KSTAT_IO_PTR(mi
->mi_io_kstats
));
1501 mutex_exit(&mi
->mi_lock
);
1504 mutex_exit(&mi
->mi_async_lock
);
1507 * Obtain arguments from the async request structure.
1509 if (args
->a_io
== NFS4_READ_AHEAD
&& mi
->mi_max_threads
> 0) {
1510 (*args
->a_nfs4_readahead
)(args
->a_vp
,
1511 args
->a_nfs4_blkoff
, args
->a_nfs4_addr
,
1512 args
->a_nfs4_seg
, args
->a_cred
);
1513 } else if (args
->a_io
== NFS4_PUTAPAGE
) {
1514 (void) (*args
->a_nfs4_putapage
)(args
->a_vp
,
1515 args
->a_nfs4_pp
, args
->a_nfs4_off
,
1516 args
->a_nfs4_len
, args
->a_nfs4_flags
,
1518 } else if (args
->a_io
== NFS4_PAGEIO
) {
1519 (void) (*args
->a_nfs4_pageio
)(args
->a_vp
,
1520 args
->a_nfs4_pp
, args
->a_nfs4_off
,
1521 args
->a_nfs4_len
, args
->a_nfs4_flags
,
1523 } else if (args
->a_io
== NFS4_READDIR
) {
1524 (void) ((*args
->a_nfs4_readdir
)(args
->a_vp
,
1525 args
->a_nfs4_rdc
, args
->a_cred
));
1526 } else if (args
->a_io
== NFS4_COMMIT
) {
1527 (*args
->a_nfs4_commit
)(args
->a_vp
, args
->a_nfs4_plist
,
1528 args
->a_nfs4_offset
, args
->a_nfs4_count
,
1530 } else if (args
->a_io
== NFS4_INACTIVE
) {
1531 nfs4_inactive_otw(args
->a_vp
, args
->a_cred
);
1535 * Now, release the vnode and free the credentials
1538 free_async_args4(args
);
1540 * Reacquire the mutex because it will be needed above.
1542 mutex_enter(&mi
->mi_async_lock
);
1547 * nfs4_inactive_thread - look for vnodes that need over-the-wire calls as
1548 * part of fop_inactive.
1552 nfs4_inactive_thread(mntinfo4_t
*mi
)
1554 struct nfs4_async_reqs
*args
;
1555 callb_cpr_t cprinfo
;
1556 vfs_t
*vfsp
= mi
->mi_vfsp
;
1558 CALLB_CPR_INIT(&cprinfo
, &mi
->mi_async_lock
, callb_generic_cpr
,
1559 "nfs4_inactive_thread");
1562 mutex_enter(&mi
->mi_async_lock
);
1563 args
= mi
->mi_async_reqs
[NFS4_INACTIVE
];
1565 mutex_enter(&mi
->mi_lock
);
1567 * We don't want to exit until the async manager is done
1568 * with its work; hence the check for mi_manager_thread
1571 * The async manager thread will cv_broadcast() on
1572 * mi_inact_req_cv when it's done, at which point we'll
1575 if (mi
->mi_manager_thread
== NULL
)
1577 mi
->mi_flags
|= MI4_INACTIVE_IDLE
;
1578 mutex_exit(&mi
->mi_lock
);
1579 cv_signal(&mi
->mi_async_cv
);
1580 CALLB_CPR_SAFE_BEGIN(&cprinfo
);
1581 cv_wait(&mi
->mi_inact_req_cv
, &mi
->mi_async_lock
);
1582 CALLB_CPR_SAFE_END(&cprinfo
, &mi
->mi_async_lock
);
1583 mutex_exit(&mi
->mi_async_lock
);
1585 mutex_enter(&mi
->mi_lock
);
1586 mi
->mi_flags
&= ~MI4_INACTIVE_IDLE
;
1587 mutex_exit(&mi
->mi_lock
);
1588 mi
->mi_async_reqs
[NFS4_INACTIVE
] = args
->a_next
;
1589 mutex_exit(&mi
->mi_async_lock
);
1590 nfs4_inactive_otw(args
->a_vp
, args
->a_cred
);
1591 crfree(args
->a_cred
);
1592 kmem_free(args
, sizeof (*args
));
1596 mutex_exit(&mi
->mi_lock
);
1597 mi
->mi_inactive_thread
= NULL
;
1598 cv_signal(&mi
->mi_async_cv
);
1601 * There is no explicit call to mutex_exit(&mi->mi_async_lock) since
1602 * CALLB_CPR_EXIT is actually responsible for releasing 'mi_async_lock'.
1604 CALLB_CPR_EXIT(&cprinfo
);
1606 NFS4_DEBUG(nfs4_client_zone_debug
, (CE_NOTE
,
1607 "nfs4_inactive_thread exiting for vfs %p\n", (void *)vfsp
));
1616 * Wait for all outstanding putpage operations and the inactive thread to
1617 * complete; nfs4_async_stop_sig() without interruptibility.
1620 nfs4_async_stop(struct vfs
*vfsp
)
1622 mntinfo4_t
*mi
= VFTOMI4(vfsp
);
1625 * Wait for all outstanding async operations to complete and for
1626 * worker threads to exit.
1628 mutex_enter(&mi
->mi_async_lock
);
1629 mi
->mi_max_threads
= 0;
1630 NFS4_WAKEALL_ASYNC_WORKERS(mi
->mi_async_work_cv
);
1631 while (mi
->mi_threads
[NFS4_ASYNC_QUEUE
] != 0 ||
1632 mi
->mi_threads
[NFS4_ASYNC_PGOPS_QUEUE
] != 0)
1633 cv_wait(&mi
->mi_async_cv
, &mi
->mi_async_lock
);
1636 * Wait for the inactive thread to finish doing what it's doing. It
1637 * won't exit until the last reference to the vfs_t goes away.
1639 if (mi
->mi_inactive_thread
!= NULL
) {
1640 mutex_enter(&mi
->mi_lock
);
1641 while (!(mi
->mi_flags
& MI4_INACTIVE_IDLE
) ||
1642 (mi
->mi_async_reqs
[NFS4_INACTIVE
] != NULL
)) {
1643 mutex_exit(&mi
->mi_lock
);
1644 cv_wait(&mi
->mi_async_cv
, &mi
->mi_async_lock
);
1645 mutex_enter(&mi
->mi_lock
);
1647 mutex_exit(&mi
->mi_lock
);
1649 mutex_exit(&mi
->mi_async_lock
);
1653 * nfs_async_stop_sig:
1654 * Wait for all outstanding putpage operations and the inactive thread to
1655 * complete. If a signal is delivered we will abort and return non-zero;
1656 * otherwise return 0. Since this routine is called from nfs4_unmount, we
1657 * need to make it interruptible.
1660 nfs4_async_stop_sig(struct vfs
*vfsp
)
1662 mntinfo4_t
*mi
= VFTOMI4(vfsp
);
1664 bool_t intr
= FALSE
;
1667 * Wait for all outstanding putpage operations to complete and for
1668 * worker threads to exit.
1670 mutex_enter(&mi
->mi_async_lock
);
1671 omax
= mi
->mi_max_threads
;
1672 mi
->mi_max_threads
= 0;
1673 NFS4_WAKEALL_ASYNC_WORKERS(mi
->mi_async_work_cv
);
1674 while (mi
->mi_threads
[NFS4_ASYNC_QUEUE
] != 0 ||
1675 mi
->mi_threads
[NFS4_ASYNC_PGOPS_QUEUE
] != 0) {
1676 if (!cv_wait_sig(&mi
->mi_async_cv
, &mi
->mi_async_lock
)) {
1683 * Wait for the inactive thread to finish doing what it's doing. It
1684 * won't exit until the a last reference to the vfs_t goes away.
1686 if (mi
->mi_inactive_thread
!= NULL
) {
1687 mutex_enter(&mi
->mi_lock
);
1688 while (!(mi
->mi_flags
& MI4_INACTIVE_IDLE
) ||
1689 (mi
->mi_async_reqs
[NFS4_INACTIVE
] != NULL
)) {
1690 mutex_exit(&mi
->mi_lock
);
1691 if (!cv_wait_sig(&mi
->mi_async_cv
,
1692 &mi
->mi_async_lock
)) {
1696 mutex_enter(&mi
->mi_lock
);
1698 mutex_exit(&mi
->mi_lock
);
1702 mi
->mi_max_threads
= omax
;
1703 mutex_exit(&mi
->mi_async_lock
);
1709 nfs4_async_putapage(vnode_t
*vp
, page_t
*pp
, uoff_t off
, size_t len
,
1710 int flags
, cred_t
*cr
, int (*putapage
)(vnode_t
*, page_t
*,
1711 uoff_t
, size_t, int, cred_t
*))
1715 struct nfs4_async_reqs
*args
;
1717 ASSERT(flags
& B_ASYNC
);
1718 ASSERT(vp
->v_vfsp
!= NULL
);
1721 ASSERT(rp
->r_count
> 0);
1726 * If we can't allocate a request structure, do the putpage
1727 * operation synchronously in this thread's context.
1729 if ((args
= kmem_alloc(sizeof (*args
), KM_NOSLEEP
)) == NULL
)
1732 args
->a_next
= NULL
;
1734 args
->a_queuer
= curthread
;
1741 args
->a_io
= NFS4_PUTAPAGE
;
1742 args
->a_nfs4_putapage
= putapage
;
1743 args
->a_nfs4_pp
= pp
;
1744 args
->a_nfs4_off
= off
;
1745 args
->a_nfs4_len
= (uint_t
)len
;
1746 args
->a_nfs4_flags
= flags
;
1748 mutex_enter(&mi
->mi_async_lock
);
1751 * If asyncio has been disabled, then make a synchronous request.
1752 * This check is done a second time in case async io was diabled
1753 * while this thread was blocked waiting for memory pressure to
1754 * reduce or for the queue to drain.
1756 if (mi
->mi_max_threads
== 0) {
1757 mutex_exit(&mi
->mi_async_lock
);
1761 kmem_free(args
, sizeof (*args
));
1766 * Link request structure into the async list and
1767 * wakeup async thread to do the i/o.
1769 if (mi
->mi_async_reqs
[NFS4_PUTAPAGE
] == NULL
) {
1770 mi
->mi_async_reqs
[NFS4_PUTAPAGE
] = args
;
1771 mi
->mi_async_tail
[NFS4_PUTAPAGE
] = args
;
1773 mi
->mi_async_tail
[NFS4_PUTAPAGE
]->a_next
= args
;
1774 mi
->mi_async_tail
[NFS4_PUTAPAGE
] = args
;
1777 mutex_enter(&rp
->r_statelock
);
1780 mutex_exit(&rp
->r_statelock
);
1782 if (mi
->mi_io_kstats
) {
1783 mutex_enter(&mi
->mi_lock
);
1784 kstat_waitq_enter(KSTAT_IO_PTR(mi
->mi_io_kstats
));
1785 mutex_exit(&mi
->mi_lock
);
1788 mi
->mi_async_req_count
++;
1789 ASSERT(mi
->mi_async_req_count
!= 0);
1790 cv_signal(&mi
->mi_async_reqs_cv
);
1791 mutex_exit(&mi
->mi_async_lock
);
1796 if (curproc
== proc_pageout
|| curproc
== proc_fsflush
) {
1798 * If we get here in the context of the pageout/fsflush,
1799 * or we have run out of memory or we're attempting to
1800 * unmount we refuse to do a sync write, because this may
1801 * hang pageout/fsflush and the machine. In this case,
1802 * we just re-mark the page as dirty and punt on the page.
1804 * Make sure B_FORCE isn't set. We can re-mark the
1805 * pages as dirty and unlock the pages in one swoop by
1806 * passing in B_ERROR to pvn_write_done(). However,
1807 * we should make sure B_FORCE isn't set - we don't
1808 * want the page tossed before it gets written out.
1810 if (flags
& B_FORCE
)
1811 flags
&= ~(B_INVAL
| B_FORCE
);
1812 pvn_write_done(pp
, flags
| B_ERROR
);
1816 if (nfs_zone() != mi
->mi_zone
) {
1818 * So this was a cross-zone sync putpage.
1820 * We pass in B_ERROR to pvn_write_done() to re-mark the pages
1821 * as dirty and unlock them.
1823 * We don't want to clear B_FORCE here as the caller presumably
1824 * knows what they're doing if they set it.
1826 pvn_write_done(pp
, flags
| B_ERROR
);
1829 return ((*putapage
)(vp
, pp
, off
, len
, flags
, cr
));
1833 nfs4_async_pageio(vnode_t
*vp
, page_t
*pp
, uoff_t io_off
, size_t io_len
,
1834 int flags
, cred_t
*cr
, int (*pageio
)(vnode_t
*, page_t
*, uoff_t
,
1835 size_t, int, cred_t
*))
1839 struct nfs4_async_reqs
*args
;
1841 ASSERT(flags
& B_ASYNC
);
1842 ASSERT(vp
->v_vfsp
!= NULL
);
1845 ASSERT(rp
->r_count
> 0);
1850 * If we can't allocate a request structure, do the pageio
1851 * request synchronously in this thread's context.
1853 if ((args
= kmem_alloc(sizeof (*args
), KM_NOSLEEP
)) == NULL
)
1856 args
->a_next
= NULL
;
1858 args
->a_queuer
= curthread
;
1865 args
->a_io
= NFS4_PAGEIO
;
1866 args
->a_nfs4_pageio
= pageio
;
1867 args
->a_nfs4_pp
= pp
;
1868 args
->a_nfs4_off
= io_off
;
1869 args
->a_nfs4_len
= (uint_t
)io_len
;
1870 args
->a_nfs4_flags
= flags
;
1872 mutex_enter(&mi
->mi_async_lock
);
1875 * If asyncio has been disabled, then make a synchronous request.
1876 * This check is done a second time in case async io was diabled
1877 * while this thread was blocked waiting for memory pressure to
1878 * reduce or for the queue to drain.
1880 if (mi
->mi_max_threads
== 0) {
1881 mutex_exit(&mi
->mi_async_lock
);
1885 kmem_free(args
, sizeof (*args
));
1890 * Link request structure into the async list and
1891 * wakeup async thread to do the i/o.
1893 if (mi
->mi_async_reqs
[NFS4_PAGEIO
] == NULL
) {
1894 mi
->mi_async_reqs
[NFS4_PAGEIO
] = args
;
1895 mi
->mi_async_tail
[NFS4_PAGEIO
] = args
;
1897 mi
->mi_async_tail
[NFS4_PAGEIO
]->a_next
= args
;
1898 mi
->mi_async_tail
[NFS4_PAGEIO
] = args
;
1901 mutex_enter(&rp
->r_statelock
);
1904 mutex_exit(&rp
->r_statelock
);
1906 if (mi
->mi_io_kstats
) {
1907 mutex_enter(&mi
->mi_lock
);
1908 kstat_waitq_enter(KSTAT_IO_PTR(mi
->mi_io_kstats
));
1909 mutex_exit(&mi
->mi_lock
);
1912 mi
->mi_async_req_count
++;
1913 ASSERT(mi
->mi_async_req_count
!= 0);
1914 cv_signal(&mi
->mi_async_reqs_cv
);
1915 mutex_exit(&mi
->mi_async_lock
);
1920 * If we can't do it ASYNC, for reads we do nothing (but cleanup
1921 * the page list), for writes we do it synchronously, except for
1922 * proc_pageout/proc_fsflush as described below.
1924 if (flags
& B_READ
) {
1925 pvn_read_done(pp
, flags
| B_ERROR
);
1929 if (curproc
== proc_pageout
|| curproc
== proc_fsflush
) {
1931 * If we get here in the context of the pageout/fsflush,
1932 * we refuse to do a sync write, because this may hang
1933 * pageout/fsflush (and the machine). In this case, we just
1934 * re-mark the page as dirty and punt on the page.
1936 * Make sure B_FORCE isn't set. We can re-mark the
1937 * pages as dirty and unlock the pages in one swoop by
1938 * passing in B_ERROR to pvn_write_done(). However,
1939 * we should make sure B_FORCE isn't set - we don't
1940 * want the page tossed before it gets written out.
1942 if (flags
& B_FORCE
)
1943 flags
&= ~(B_INVAL
| B_FORCE
);
1944 pvn_write_done(pp
, flags
| B_ERROR
);
1948 if (nfs_zone() != mi
->mi_zone
) {
1950 * So this was a cross-zone sync pageio. We pass in B_ERROR
1951 * to pvn_write_done() to re-mark the pages as dirty and unlock
1954 * We don't want to clear B_FORCE here as the caller presumably
1955 * knows what they're doing if they set it.
1957 pvn_write_done(pp
, flags
| B_ERROR
);
1960 return ((*pageio
)(vp
, pp
, io_off
, io_len
, flags
, cr
));
1964 nfs4_async_readdir(vnode_t
*vp
, rddir4_cache
*rdc
, cred_t
*cr
,
1965 int (*readdir
)(vnode_t
*, rddir4_cache
*, cred_t
*))
1969 struct nfs4_async_reqs
*args
;
1972 ASSERT(rp
->r_freef
== NULL
);
1977 * If we can't allocate a request structure, skip the readdir.
1979 if ((args
= kmem_alloc(sizeof (*args
), KM_NOSLEEP
)) == NULL
)
1982 args
->a_next
= NULL
;
1984 args
->a_queuer
= curthread
;
1991 args
->a_io
= NFS4_READDIR
;
1992 args
->a_nfs4_readdir
= readdir
;
1993 args
->a_nfs4_rdc
= rdc
;
1995 mutex_enter(&mi
->mi_async_lock
);
1998 * If asyncio has been disabled, then skip this request
2000 if (mi
->mi_max_threads
== 0) {
2001 mutex_exit(&mi
->mi_async_lock
);
2005 kmem_free(args
, sizeof (*args
));
2010 * Link request structure into the async list and
2011 * wakeup async thread to do the i/o.
2013 if (mi
->mi_async_reqs
[NFS4_READDIR
] == NULL
) {
2014 mi
->mi_async_reqs
[NFS4_READDIR
] = args
;
2015 mi
->mi_async_tail
[NFS4_READDIR
] = args
;
2017 mi
->mi_async_tail
[NFS4_READDIR
]->a_next
= args
;
2018 mi
->mi_async_tail
[NFS4_READDIR
] = args
;
2021 mutex_enter(&rp
->r_statelock
);
2023 mutex_exit(&rp
->r_statelock
);
2025 if (mi
->mi_io_kstats
) {
2026 mutex_enter(&mi
->mi_lock
);
2027 kstat_waitq_enter(KSTAT_IO_PTR(mi
->mi_io_kstats
));
2028 mutex_exit(&mi
->mi_lock
);
2031 mi
->mi_async_req_count
++;
2032 ASSERT(mi
->mi_async_req_count
!= 0);
2033 cv_signal(&mi
->mi_async_reqs_cv
);
2034 mutex_exit(&mi
->mi_async_lock
);
2038 mutex_enter(&rp
->r_statelock
);
2039 rdc
->entries
= NULL
;
2041 * Indicate that no one is trying to fill this entry and
2042 * it still needs to be filled.
2044 rdc
->flags
&= ~RDDIR
;
2045 rdc
->flags
|= RDDIRREQ
;
2046 rddir4_cache_rele(rp
, rdc
);
2047 mutex_exit(&rp
->r_statelock
);
2051 nfs4_async_commit(vnode_t
*vp
, page_t
*plist
, offset3 offset
, count3 count
,
2052 cred_t
*cr
, void (*commit
)(vnode_t
*, page_t
*, offset3
, count3
,
2057 struct nfs4_async_reqs
*args
;
2064 * If we can't allocate a request structure, do the commit
2065 * operation synchronously in this thread's context.
2067 if ((args
= kmem_alloc(sizeof (*args
), KM_NOSLEEP
)) == NULL
)
2070 args
->a_next
= NULL
;
2072 args
->a_queuer
= curthread
;
2079 args
->a_io
= NFS4_COMMIT
;
2080 args
->a_nfs4_commit
= commit
;
2081 args
->a_nfs4_plist
= plist
;
2082 args
->a_nfs4_offset
= offset
;
2083 args
->a_nfs4_count
= count
;
2085 mutex_enter(&mi
->mi_async_lock
);
2088 * If asyncio has been disabled, then make a synchronous request.
2089 * This check is done a second time in case async io was diabled
2090 * while this thread was blocked waiting for memory pressure to
2091 * reduce or for the queue to drain.
2093 if (mi
->mi_max_threads
== 0) {
2094 mutex_exit(&mi
->mi_async_lock
);
2098 kmem_free(args
, sizeof (*args
));
2103 * Link request structure into the async list and
2104 * wakeup async thread to do the i/o.
2106 if (mi
->mi_async_reqs
[NFS4_COMMIT
] == NULL
) {
2107 mi
->mi_async_reqs
[NFS4_COMMIT
] = args
;
2108 mi
->mi_async_tail
[NFS4_COMMIT
] = args
;
2110 mi
->mi_async_tail
[NFS4_COMMIT
]->a_next
= args
;
2111 mi
->mi_async_tail
[NFS4_COMMIT
] = args
;
2114 mutex_enter(&rp
->r_statelock
);
2116 mutex_exit(&rp
->r_statelock
);
2118 if (mi
->mi_io_kstats
) {
2119 mutex_enter(&mi
->mi_lock
);
2120 kstat_waitq_enter(KSTAT_IO_PTR(mi
->mi_io_kstats
));
2121 mutex_exit(&mi
->mi_lock
);
2124 mi
->mi_async_req_count
++;
2125 ASSERT(mi
->mi_async_req_count
!= 0);
2126 cv_signal(&mi
->mi_async_reqs_cv
);
2127 mutex_exit(&mi
->mi_async_lock
);
2131 if (curproc
== proc_pageout
|| curproc
== proc_fsflush
||
2132 nfs_zone() != mi
->mi_zone
) {
2133 while (plist
!= NULL
) {
2135 page_sub(&plist
, pp
);
2136 pp
->p_fsdata
= C_COMMIT
;
2141 (*commit
)(vp
, plist
, offset
, count
, cr
);
2145 * nfs4_async_inactive - hand off a fop_inactive call to a thread. The
2146 * reference to the vnode is handed over to the thread; the caller should
2147 * no longer refer to the vnode.
2149 * Unlike most of the async routines, this handoff is needed for
2150 * correctness reasons, not just performance. So doing operations in the
2151 * context of the current thread is not an option.
2154 nfs4_async_inactive(vnode_t
*vp
, cred_t
*cr
)
2157 struct nfs4_async_reqs
*args
;
2158 boolean_t signal_inactive_thread
= B_FALSE
;
2162 args
= kmem_alloc(sizeof (*args
), KM_SLEEP
);
2163 args
->a_next
= NULL
;
2165 args
->a_queuer
= curthread
;
2171 args
->a_io
= NFS4_INACTIVE
;
2174 * Note that we don't check mi->mi_max_threads here, since we
2175 * *need* to get rid of this vnode regardless of whether someone
2176 * set nfs4_max_threads to zero in /etc/system.
2178 * The manager thread knows about this and is willing to create
2179 * at least one thread to accommodate us.
2181 mutex_enter(&mi
->mi_async_lock
);
2182 if (mi
->mi_inactive_thread
== NULL
) {
2184 vnode_t
*unldvp
= NULL
;
2188 mutex_exit(&mi
->mi_async_lock
);
2190 * We just need to free up the memory associated with the
2191 * vnode, which can be safely done from within the current
2194 crfree(cr
); /* drop our reference */
2195 kmem_free(args
, sizeof (*args
));
2197 mutex_enter(&rp
->r_statelock
);
2198 if (rp
->r_unldvp
!= NULL
) {
2199 unldvp
= rp
->r_unldvp
;
2200 rp
->r_unldvp
= NULL
;
2201 unlname
= rp
->r_unlname
;
2202 rp
->r_unlname
= NULL
;
2203 unlcred
= rp
->r_unlcred
;
2204 rp
->r_unlcred
= NULL
;
2206 mutex_exit(&rp
->r_statelock
);
2208 * No need to explicitly throw away any cached pages. The
2209 * eventual r4inactive() will attempt a synchronous
2210 * fop_putpage() which will immediately fail since the request
2211 * is coming from the wrong zone, and then will proceed to call
2212 * nfs4_invalidate_pages() which will clean things up for us.
2214 * Throw away the delegation here so rp4_addfree()'s attempt to
2215 * return any existing delegations becomes a no-op.
2217 if (rp
->r_deleg_type
!= OPEN_DELEGATE_NONE
) {
2218 (void) nfs_rw_enter_sig(&mi
->mi_recovlock
, RW_READER
,
2220 (void) nfs4delegreturn(rp
, NFS4_DR_DISCARD
);
2221 nfs_rw_exit(&mi
->mi_recovlock
);
2223 nfs4_clear_open_streams(rp
);
2225 rp4_addfree(rp
, cr
);
2226 if (unldvp
!= NULL
) {
2227 kmem_free(unlname
, MAXNAMELEN
);
2234 if (mi
->mi_manager_thread
== NULL
) {
2236 * We want to talk to the inactive thread.
2238 signal_inactive_thread
= B_TRUE
;
2242 * Enqueue the vnode and wake up either the special thread (empty
2243 * list) or an async thread.
2245 if (mi
->mi_async_reqs
[NFS4_INACTIVE
] == NULL
) {
2246 mi
->mi_async_reqs
[NFS4_INACTIVE
] = args
;
2247 mi
->mi_async_tail
[NFS4_INACTIVE
] = args
;
2248 signal_inactive_thread
= B_TRUE
;
2250 mi
->mi_async_tail
[NFS4_INACTIVE
]->a_next
= args
;
2251 mi
->mi_async_tail
[NFS4_INACTIVE
] = args
;
2253 if (signal_inactive_thread
) {
2254 cv_signal(&mi
->mi_inact_req_cv
);
2256 mi
->mi_async_req_count
++;
2257 ASSERT(mi
->mi_async_req_count
!= 0);
2258 cv_signal(&mi
->mi_async_reqs_cv
);
2261 mutex_exit(&mi
->mi_async_lock
);
2265 writerp4(rnode4_t
*rp
, caddr_t base
, int tcount
, struct uio
*uio
, int pgcreated
)
2274 vnode_t
*vp
= RTOV(rp
);
2276 ASSERT(tcount
<= MAXBSIZE
&& tcount
<= uio
->uio_resid
);
2277 ASSERT(nfs_rw_lock_held(&rp
->r_rwlock
, RW_WRITER
));
2279 ASSERT(((uintptr_t)base
& MAXBOFFSET
) + tcount
<= MAXBSIZE
);
2283 * Move bytes in at most PAGESIZE chunks. We must avoid
2284 * spanning pages in uiomove() because page faults may cause
2285 * the cache to be invalidated out from under us. The r_size is not
2286 * updated until after the uiomove. If we push the last page of a
2287 * file before r_size is correct, we will lose the data written past
2288 * the current (and invalid) r_size.
2291 offset
= uio
->uio_loffset
;
2295 * n is the number of bytes required to satisfy the request
2296 * or the number of bytes to fill out the page.
2298 n
= (int)MIN((PAGESIZE
- (offset
& PAGEOFFSET
)), tcount
);
2301 * Check to see if we can skip reading in the page
2302 * and just allocate the memory. We can do this
2303 * if we are going to rewrite the entire mapping
2304 * or if we are going to write to or beyond the current
2305 * end of file from the beginning of the mapping.
2307 * The read of r_size is now protected by r_statelock.
2309 mutex_enter(&rp
->r_statelock
);
2311 * When pgcreated is nonzero the caller has already done
2312 * a segmap_getmapflt with forcefault 0 and S_WRITE. With
2313 * segkpm this means we already have at least one page
2314 * created and mapped at base.
2316 pagecreate
= pgcreated
||
2317 ((offset
& PAGEOFFSET
) == 0 &&
2318 (n
== PAGESIZE
|| ((offset
+ n
) >= rp
->r_size
)));
2320 mutex_exit(&rp
->r_statelock
);
2322 if (!vpm_enable
&& pagecreate
) {
2324 * The last argument tells segmap_pagecreate() to
2325 * always lock the page, as opposed to sometimes
2326 * returning with the page locked. This way we avoid a
2327 * fault on the ensuing uiomove(), but also
2328 * more importantly (to fix bug 1094402) we can
2329 * call segmap_fault() to unlock the page in all
2330 * cases. An alternative would be to modify
2331 * segmap_pagecreate() to tell us when it is
2332 * locking a page, but that's a fairly major
2336 (void) segmap_pagecreate(segkmap
, base
,
2343 * The number of bytes of data in the last page can not
2344 * be accurately be determined while page is being
2345 * uiomove'd to and the size of the file being updated.
2346 * Thus, inform threads which need to know accurately
2347 * how much data is in the last page of the file. They
2348 * will not do the i/o immediately, but will arrange for
2349 * the i/o to happen later when this modify operation
2350 * will have finished.
2352 ASSERT(!(rp
->r_flags
& R4MODINPROGRESS
));
2353 mutex_enter(&rp
->r_statelock
);
2354 rp
->r_flags
|= R4MODINPROGRESS
;
2355 rp
->r_modaddr
= (offset
& MAXBMASK
);
2356 mutex_exit(&rp
->r_statelock
);
2360 * Copy data. If new pages are created, part of
2361 * the page that is not written will be initizliazed
2364 error
= vpm_data_copy(vp
, offset
, n
, uio
,
2365 !pagecreate
, NULL
, 0, S_WRITE
);
2367 error
= uiomove(base
, n
, UIO_WRITE
, uio
);
2371 * r_size is the maximum number of
2372 * bytes known to be in the file.
2373 * Make sure it is at least as high as the
2374 * first unwritten byte pointed to by uio_loffset.
2376 mutex_enter(&rp
->r_statelock
);
2377 if (rp
->r_size
< uio
->uio_loffset
)
2378 rp
->r_size
= uio
->uio_loffset
;
2379 rp
->r_flags
&= ~R4MODINPROGRESS
;
2380 rp
->r_flags
|= R4DIRTY
;
2381 mutex_exit(&rp
->r_statelock
);
2383 /* n = # of bytes written */
2384 n
= (int)(uio
->uio_loffset
- offset
);
2392 * If we created pages w/o initializing them completely,
2393 * we need to zero the part that wasn't set up.
2394 * This happens on a most EOF write cases and if
2395 * we had some sort of error during the uiomove.
2397 if (!vpm_enable
&& pagecreate
) {
2398 if ((uio
->uio_loffset
& PAGEOFFSET
) || n
== 0)
2399 (void) kzero(base
, PAGESIZE
- n
);
2403 * Caller is responsible for this page,
2404 * it was not created in this loop.
2409 * For bug 1094402: segmap_pagecreate locks
2410 * page. Unlock it. This also unlocks the
2411 * pages allocated by page_create_va() in
2412 * segmap_pagecreate().
2414 sm_error
= segmap_fault(kas
.a_hat
, segkmap
,
2415 saved_base
, saved_n
,
2416 F_SOFTUNLOCK
, S_WRITE
);
2421 } while (tcount
> 0 && error
== 0);
2427 nfs4_putpages(vnode_t
*vp
, uoff_t off
, size_t len
, int flags
, cred_t
*cr
)
2439 ASSERT(rp
->r_count
> 0);
2441 if (!nfs4_has_pages(vp
))
2444 ASSERT(vp
->v_type
!= VCHR
);
2447 * If R4OUTOFSPACE is set, then all writes turn into B_INVAL
2448 * writes. B_FORCE is set to force the VM system to actually
2449 * invalidate the pages, even if the i/o failed. The pages
2450 * need to get invalidated because they can't be written out
2451 * because there isn't any space left on either the server's
2452 * file system or in the user's disk quota. The B_FREE bit
2453 * is cleared to avoid confusion as to whether this is a
2454 * request to place the page on the freelist or to destroy
2457 if ((rp
->r_flags
& R4OUTOFSPACE
) ||
2458 (vp
->v_vfsp
->vfs_flag
& VFS_UNMOUNTED
))
2459 flags
= (flags
& ~B_FREE
) | B_INVAL
| B_FORCE
;
2463 * If doing a full file synchronous operation, then clear
2464 * the R4DIRTY bit. If a page gets dirtied while the flush
2465 * is happening, then R4DIRTY will get set again. The
2466 * R4DIRTY bit must get cleared before the flush so that
2467 * we don't lose this information.
2469 * If there are no full file async write operations
2470 * pending and RDIRTY bit is set, clear it.
2473 !(flags
& B_ASYNC
) &&
2474 (rp
->r_flags
& R4DIRTY
)) {
2475 mutex_enter(&rp
->r_statelock
);
2476 rdirty
= (rp
->r_flags
& R4DIRTY
);
2477 rp
->r_flags
&= ~R4DIRTY
;
2478 mutex_exit(&rp
->r_statelock
);
2479 } else if (flags
& B_ASYNC
&& off
== 0) {
2480 mutex_enter(&rp
->r_statelock
);
2481 if (rp
->r_flags
& R4DIRTY
&& rp
->r_awcount
== 0) {
2482 rdirty
= (rp
->r_flags
& R4DIRTY
);
2483 rp
->r_flags
&= ~R4DIRTY
;
2485 mutex_exit(&rp
->r_statelock
);
2490 * Search the entire vp list for pages >= off, and flush
2493 error
= pvn_vplist_dirty(vp
, off
, rp
->r_putapage
,
2497 * If an error occurred and the file was marked as dirty
2498 * before and we aren't forcibly invalidating pages, then
2499 * reset the R4DIRTY flag.
2501 if (error
&& rdirty
&&
2502 (flags
& (B_INVAL
| B_FORCE
)) != (B_INVAL
| B_FORCE
)) {
2503 mutex_enter(&rp
->r_statelock
);
2504 rp
->r_flags
|= R4DIRTY
;
2505 mutex_exit(&rp
->r_statelock
);
2509 * Do a range from [off...off + len) looking for pages
2515 mutex_enter(&rp
->r_statelock
);
2516 for (io_off
= off
; io_off
< eoff
&& io_off
< rp
->r_size
;
2518 mutex_exit(&rp
->r_statelock
);
2520 * If we are not invalidating, synchronously
2521 * freeing or writing pages use the routine
2522 * page_lookup_nowait() to prevent reclaiming
2523 * them from the free list.
2525 if ((flags
& B_INVAL
) || !(flags
& B_ASYNC
)) {
2526 pp
= page_lookup(&vp
->v_object
, io_off
,
2527 (flags
& (B_INVAL
| B_FREE
)) ? SE_EXCL
: SE_SHARED
);
2529 pp
= page_lookup_nowait(&vp
->v_object
,
2531 (flags
& B_FREE
) ? SE_EXCL
: SE_SHARED
);
2534 if (pp
== NULL
|| !pvn_getdirty(pp
, flags
))
2537 err
= (*rp
->r_putapage
)(vp
, pp
, &io_off
,
2538 &io_len
, flags
, cr
);
2542 * "io_off" and "io_len" are returned as
2543 * the range of pages we actually wrote.
2544 * This allows us to skip ahead more quickly
2545 * since several pages may've been dealt
2546 * with by this iteration of the loop.
2549 mutex_enter(&rp
->r_statelock
);
2551 mutex_exit(&rp
->r_statelock
);
2558 nfs4_invalidate_pages(vnode_t
*vp
, uoff_t off
, cred_t
*cr
)
2563 if (IS_SHADOW(vp
, rp
))
2565 mutex_enter(&rp
->r_statelock
);
2566 while (rp
->r_flags
& R4TRUNCATE
)
2567 cv_wait(&rp
->r_cv
, &rp
->r_statelock
);
2568 rp
->r_flags
|= R4TRUNCATE
;
2570 rp
->r_flags
&= ~R4DIRTY
;
2571 if (!(rp
->r_flags
& R4STALE
))
2574 rp
->r_truncaddr
= off
;
2575 mutex_exit(&rp
->r_statelock
);
2576 (void) pvn_vplist_dirty(vp
, off
, rp
->r_putapage
,
2577 B_INVAL
| B_TRUNC
, cr
);
2578 mutex_enter(&rp
->r_statelock
);
2579 rp
->r_flags
&= ~R4TRUNCATE
;
2580 cv_broadcast(&rp
->r_cv
);
2581 mutex_exit(&rp
->r_statelock
);
2585 nfs4_mnt_kstat_update(kstat_t
*ksp
, int rw
)
2588 struct mntinfo_kstat
*mik
;
2591 /* this is a read-only kstat. Bail out on a write */
2592 if (rw
== KSTAT_WRITE
)
2597 * We don't want to wait here as kstat_chain_lock could be held by
2598 * dounmount(). dounmount() takes vfs_reflock before the chain lock
2599 * and thus could lead to a deadlock.
2601 vfsp
= (struct vfs
*)ksp
->ks_private
;
2604 mik
= (struct mntinfo_kstat
*)ksp
->ks_data
;
2606 (void) strcpy(mik
->mik_proto
, mi
->mi_curr_serv
->sv_knconf
->knc_proto
);
2608 mik
->mik_vers
= (uint32_t)mi
->mi_vers
;
2609 mik
->mik_flags
= mi
->mi_flags
;
2611 * The sv_secdata holds the flavor the client specifies.
2612 * If the client uses default and a security negotiation
2613 * occurs, sv_currsec will point to the current flavor
2614 * selected from the server flavor list.
2615 * sv_currsec is NULL if no security negotiation takes place.
2617 mik
->mik_secmod
= mi
->mi_curr_serv
->sv_currsec
?
2618 mi
->mi_curr_serv
->sv_currsec
->secmod
:
2619 mi
->mi_curr_serv
->sv_secdata
->secmod
;
2620 mik
->mik_curread
= (uint32_t)mi
->mi_curread
;
2621 mik
->mik_curwrite
= (uint32_t)mi
->mi_curwrite
;
2622 mik
->mik_retrans
= mi
->mi_retrans
;
2623 mik
->mik_timeo
= mi
->mi_timeo
;
2624 mik
->mik_acregmin
= HR2SEC(mi
->mi_acregmin
);
2625 mik
->mik_acregmax
= HR2SEC(mi
->mi_acregmax
);
2626 mik
->mik_acdirmin
= HR2SEC(mi
->mi_acdirmin
);
2627 mik
->mik_acdirmax
= HR2SEC(mi
->mi_acdirmax
);
2628 mik
->mik_noresponse
= (uint32_t)mi
->mi_noresponse
;
2629 mik
->mik_failover
= (uint32_t)mi
->mi_failover
;
2630 mik
->mik_remap
= (uint32_t)mi
->mi_remap
;
2632 (void) strcpy(mik
->mik_curserver
, mi
->mi_curr_serv
->sv_hostname
);
2638 nfs4_mnt_kstat_init(struct vfs
*vfsp
)
2640 mntinfo4_t
*mi
= VFTOMI4(vfsp
);
2643 * PSARC 2001/697 Contract Private Interface
2644 * All nfs kstats are under SunMC contract
2645 * Please refer to the PSARC listed above and contact
2646 * SunMC before making any changes!
2648 * Changes must be reviewed by Solaris File Sharing
2649 * Changes must be communicated to contract-2001-697@sun.com
2653 mi
->mi_io_kstats
= kstat_create_zone("nfs", getminor(vfsp
->vfs_dev
),
2654 NULL
, "nfs", KSTAT_TYPE_IO
, 1, 0, mi
->mi_zone
->zone_id
);
2655 if (mi
->mi_io_kstats
) {
2656 if (mi
->mi_zone
->zone_id
!= GLOBAL_ZONEID
)
2657 kstat_zone_add(mi
->mi_io_kstats
, GLOBAL_ZONEID
);
2658 mi
->mi_io_kstats
->ks_lock
= &mi
->mi_lock
;
2659 kstat_install(mi
->mi_io_kstats
);
2662 if ((mi
->mi_ro_kstats
= kstat_create_zone("nfs",
2663 getminor(vfsp
->vfs_dev
), "mntinfo", "misc", KSTAT_TYPE_RAW
,
2664 sizeof (struct mntinfo_kstat
), 0, mi
->mi_zone
->zone_id
)) != NULL
) {
2665 if (mi
->mi_zone
->zone_id
!= GLOBAL_ZONEID
)
2666 kstat_zone_add(mi
->mi_ro_kstats
, GLOBAL_ZONEID
);
2667 mi
->mi_ro_kstats
->ks_update
= nfs4_mnt_kstat_update
;
2668 mi
->mi_ro_kstats
->ks_private
= (void *)vfsp
;
2669 kstat_install(mi
->mi_ro_kstats
);
2672 nfs4_mnt_recov_kstat_init(vfsp
);
2676 nfs4_write_error(vnode_t
*vp
, int error
, cred_t
*cr
)
2679 clock_t now
= ddi_get_lbolt();
2683 * In case of forced unmount, do not print any messages
2684 * since it can flood the console with error messages.
2686 if (mi
->mi_vfsp
->vfs_flag
& VFS_UNMOUNTED
)
2690 * If the mount point is dead, not recoverable, do not
2691 * print error messages that can flood the console.
2693 if (mi
->mi_flags
& MI4_RECOV_FAIL
)
2697 * No use in flooding the console with ENOSPC
2698 * messages from the same file system.
2700 if ((error
!= ENOSPC
&& error
!= EDQUOT
) ||
2701 now
- mi
->mi_printftime
> 0) {
2702 zoneid_t zoneid
= mi
->mi_zone
->zone_id
;
2705 nfs_perror(error
, "NFS%ld write error on host %s: %m.\n",
2706 mi
->mi_vers
, VTOR4(vp
)->r_server
->sv_hostname
, NULL
);
2708 nfs_perror(error
, "NFS write error on host %s: %m.\n",
2709 VTOR4(vp
)->r_server
->sv_hostname
, NULL
);
2711 if (error
== ENOSPC
|| error
== EDQUOT
) {
2712 zcmn_err(zoneid
, CE_CONT
,
2713 "^File: userid=%d, groupid=%d\n",
2714 crgetuid(cr
), crgetgid(cr
));
2715 if (crgetuid(curthread
->t_cred
) != crgetuid(cr
) ||
2716 crgetgid(curthread
->t_cred
) != crgetgid(cr
)) {
2717 zcmn_err(zoneid
, CE_CONT
,
2718 "^User: userid=%d, groupid=%d\n",
2719 crgetuid(curthread
->t_cred
),
2720 crgetgid(curthread
->t_cred
));
2722 mi
->mi_printftime
= now
+
2723 nfs_write_error_interval
* hz
;
2725 sfh4_printfhandle(VTOR4(vp
)->r_fh
);
2727 if (error
== EACCES
) {
2728 zcmn_err(zoneid
, CE_CONT
,
2729 "nfs_bio: cred is%s kcred\n",
2730 cr
== kcred
? "" : " not");
2737 * Return non-zero if the given file can be safely memory mapped. Locks
2738 * are safe if whole-file (length and offset are both zero).
2741 #define SAFE_LOCK(flk) ((flk).l_start == 0 && (flk).l_len == 0)
2744 nfs4_safemap(const vnode_t
*vp
)
2746 locklist_t
*llp
, *next_llp
;
2748 rnode4_t
*rp
= VTOR4(vp
);
2750 ASSERT(nfs_rw_lock_held(&rp
->r_lkserlock
, RW_WRITER
));
2752 NFS4_DEBUG(nfs4_client_map_debug
, (CE_NOTE
, "nfs4_safemap: "
2753 "vp = %p", (void *)vp
));
2756 * Review all the locks for the vnode, both ones that have been
2757 * acquired and ones that are pending. We assume that
2758 * flk_active_locks_for_vp() has merged any locks that can be
2759 * merged (so that if a process has the entire file locked, it is
2760 * represented as a single lock).
2762 * Note that we can't bail out of the loop if we find a non-safe
2763 * lock, because we have to free all the elements in the llp list.
2764 * We might be able to speed up this code slightly by not looking
2765 * at each lock's l_start and l_len fields once we've found a
2769 llp
= flk_active_locks_for_vp(vp
);
2771 NFS4_DEBUG(nfs4_client_map_debug
, (CE_NOTE
,
2772 "nfs4_safemap: active lock (%" PRId64
", %" PRId64
")",
2773 llp
->ll_flock
.l_start
, llp
->ll_flock
.l_len
));
2774 if (!SAFE_LOCK(llp
->ll_flock
)) {
2776 NFS4_DEBUG(nfs4_client_map_debug
, (CE_NOTE
,
2777 "nfs4_safemap: unsafe active lock (%" PRId64
2778 ", %" PRId64
")", llp
->ll_flock
.l_start
,
2779 llp
->ll_flock
.l_len
));
2781 next_llp
= llp
->ll_next
;
2782 VN_RELE(llp
->ll_vp
);
2783 kmem_free(llp
, sizeof (*llp
));
2787 NFS4_DEBUG(nfs4_client_map_debug
, (CE_NOTE
, "nfs4_safemap: %s",
2788 safe
? "safe" : "unsafe"));
2793 * Return whether there is a lost LOCK or LOCKU queued up for the given
2794 * file that would make an mmap request unsafe. cf. nfs4_safemap().
2798 nfs4_map_lost_lock_conflict(vnode_t
*vp
)
2800 bool_t conflict
= FALSE
;
2801 nfs4_lost_rqst_t
*lrp
;
2802 mntinfo4_t
*mi
= VTOMI4(vp
);
2804 mutex_enter(&mi
->mi_lock
);
2805 for (lrp
= list_head(&mi
->mi_lost_state
); lrp
!= NULL
;
2806 lrp
= list_next(&mi
->mi_lost_state
, lrp
)) {
2807 if (lrp
->lr_op
!= OP_LOCK
&& lrp
->lr_op
!= OP_LOCKU
)
2809 ASSERT(lrp
->lr_vp
!= NULL
);
2810 if (!fop_cmp(lrp
->lr_vp
, vp
, NULL
))
2811 continue; /* different file */
2812 if (!SAFE_LOCK(*lrp
->lr_flk
)) {
2818 mutex_exit(&mi
->mi_lock
);
2823 * nfs_lockcompletion:
2825 * If the vnode has a lock that makes it unsafe to cache the file, mark it
2826 * as non cachable (set VNOCACHE bit).
2830 nfs4_lockcompletion(vnode_t
*vp
, int cmd
)
2832 rnode4_t
*rp
= VTOR4(vp
);
2834 ASSERT(nfs_rw_lock_held(&rp
->r_lkserlock
, RW_WRITER
));
2835 ASSERT(!IS_SHADOW(vp
, rp
));
2837 if (cmd
== F_SETLK
|| cmd
== F_SETLKW
) {
2839 if (!nfs4_safemap(vp
)) {
2840 mutex_enter(&vp
->v_lock
);
2841 vp
->v_flag
|= VNOCACHE
;
2842 mutex_exit(&vp
->v_lock
);
2844 mutex_enter(&vp
->v_lock
);
2845 vp
->v_flag
&= ~VNOCACHE
;
2846 mutex_exit(&vp
->v_lock
);
2850 * The cached attributes of the file are stale after acquiring
2851 * the lock on the file. They were updated when the file was
2852 * opened, but not updated when the lock was acquired. Therefore the
2853 * cached attributes are invalidated after the lock is obtained.
2855 PURGE_ATTRCACHE4(vp
);
2860 nfs4_mi_init(zoneid_t zoneid
)
2862 struct mi4_globals
*mig
;
2864 mig
= kmem_alloc(sizeof (*mig
), KM_SLEEP
);
2865 mutex_init(&mig
->mig_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2866 list_create(&mig
->mig_list
, sizeof (mntinfo4_t
),
2867 offsetof(mntinfo4_t
, mi_zone_node
));
2868 mig
->mig_destructor_called
= B_FALSE
;
2873 * Callback routine to tell all NFSv4 mounts in the zone to start tearing down
2874 * state and killing off threads.
2878 nfs4_mi_shutdown(zoneid_t zoneid
, void *data
)
2880 struct mi4_globals
*mig
= data
;
2884 NFS4_DEBUG(nfs4_client_zone_debug
, (CE_NOTE
,
2885 "nfs4_mi_shutdown zone %d\n", zoneid
));
2886 ASSERT(mig
!= NULL
);
2888 mutex_enter(&mig
->mig_lock
);
2889 mi
= list_head(&mig
->mig_list
);
2891 mutex_exit(&mig
->mig_lock
);
2895 NFS4_DEBUG(nfs4_client_zone_debug
, (CE_NOTE
,
2896 "nfs4_mi_shutdown stopping vfs %p\n", (void *)mi
->mi_vfsp
));
2898 * purge the DNLC for this filesystem
2900 (void) dnlc_purge_vfsp(mi
->mi_vfsp
, 0);
2902 * Tell existing async worker threads to exit.
2904 mutex_enter(&mi
->mi_async_lock
);
2905 mi
->mi_max_threads
= 0;
2906 NFS4_WAKEALL_ASYNC_WORKERS(mi
->mi_async_work_cv
);
2908 * Set the appropriate flags, signal and wait for both the
2909 * async manager and the inactive thread to exit when they're
2910 * done with their current work.
2912 mutex_enter(&mi
->mi_lock
);
2913 mi
->mi_flags
|= (MI4_ASYNC_MGR_STOP
|MI4_DEAD
);
2914 mutex_exit(&mi
->mi_lock
);
2915 mutex_exit(&mi
->mi_async_lock
);
2916 if (mi
->mi_manager_thread
) {
2917 nfs4_async_manager_stop(mi
->mi_vfsp
);
2919 if (mi
->mi_inactive_thread
) {
2920 mutex_enter(&mi
->mi_async_lock
);
2921 cv_signal(&mi
->mi_inact_req_cv
);
2923 * Wait for the inactive thread to exit.
2925 while (mi
->mi_inactive_thread
!= NULL
) {
2926 cv_wait(&mi
->mi_async_cv
, &mi
->mi_async_lock
);
2928 mutex_exit(&mi
->mi_async_lock
);
2931 * Wait for the recovery thread to complete, that is, it will
2932 * signal when it is done using the "mi" structure and about
2935 mutex_enter(&mi
->mi_lock
);
2936 while (mi
->mi_in_recovery
> 0)
2937 cv_wait(&mi
->mi_cv_in_recov
, &mi
->mi_lock
);
2938 mutex_exit(&mi
->mi_lock
);
2940 * We're done when every mi has been done or the list is empty.
2941 * This one is done, remove it from the list.
2943 list_remove(&mig
->mig_list
, mi
);
2944 mutex_exit(&mig
->mig_lock
);
2945 zone_rele_ref(&mi
->mi_zone_ref
, ZONE_REF_NFSV4
);
2948 * Release hold on vfs and mi done to prevent race with zone
2949 * shutdown. This releases the hold in nfs4_mi_zonelist_add.
2951 VFS_RELE(mi
->mi_vfsp
);
2955 * Tell each renew thread in the zone to exit
2957 mutex_enter(&nfs4_server_lst_lock
);
2958 for (np
= nfs4_server_lst
.forw
; np
!= &nfs4_server_lst
; np
= np
->forw
) {
2959 mutex_enter(&np
->s_lock
);
2960 if (np
->zoneid
== zoneid
) {
2962 * We add another hold onto the nfs4_server_t
2963 * because this will make sure tha the nfs4_server_t
2964 * stays around until nfs4_callback_fini_zone destroys
2965 * the zone. This way, the renew thread can
2966 * unconditionally release its holds on the
2970 nfs4_mark_srv_dead(np
);
2972 mutex_exit(&np
->s_lock
);
2974 mutex_exit(&nfs4_server_lst_lock
);
2978 nfs4_mi_free_globals(struct mi4_globals
*mig
)
2980 list_destroy(&mig
->mig_list
); /* makes sure the list is empty */
2981 mutex_destroy(&mig
->mig_lock
);
2982 kmem_free(mig
, sizeof (*mig
));
2987 nfs4_mi_destroy(zoneid_t zoneid
, void *data
)
2989 struct mi4_globals
*mig
= data
;
2991 NFS4_DEBUG(nfs4_client_zone_debug
, (CE_NOTE
,
2992 "nfs4_mi_destroy zone %d\n", zoneid
));
2993 ASSERT(mig
!= NULL
);
2994 mutex_enter(&mig
->mig_lock
);
2995 if (list_head(&mig
->mig_list
) != NULL
) {
2996 /* Still waiting for VFS_FREEVFS() */
2997 mig
->mig_destructor_called
= B_TRUE
;
2998 mutex_exit(&mig
->mig_lock
);
3001 nfs4_mi_free_globals(mig
);
3005 * Add an NFS mount to the per-zone list of NFS mounts.
3008 nfs4_mi_zonelist_add(mntinfo4_t
*mi
)
3010 struct mi4_globals
*mig
;
3012 mig
= zone_getspecific(mi4_list_key
, mi
->mi_zone
);
3013 mutex_enter(&mig
->mig_lock
);
3014 list_insert_head(&mig
->mig_list
, mi
);
3016 * hold added to eliminate race with zone shutdown -this will be
3017 * released in mi_shutdown
3020 VFS_HOLD(mi
->mi_vfsp
);
3021 mutex_exit(&mig
->mig_lock
);
3025 * Remove an NFS mount from the per-zone list of NFS mounts.
3028 nfs4_mi_zonelist_remove(mntinfo4_t
*mi
)
3030 struct mi4_globals
*mig
;
3033 mig
= zone_getspecific(mi4_list_key
, mi
->mi_zone
);
3034 mutex_enter(&mig
->mig_lock
);
3035 mutex_enter(&mi
->mi_lock
);
3036 /* if this mi is marked dead, then the zone already released it */
3037 if (!(mi
->mi_flags
& MI4_DEAD
)) {
3038 list_remove(&mig
->mig_list
, mi
);
3039 mutex_exit(&mi
->mi_lock
);
3041 /* release the holds put on in zonelist_add(). */
3042 VFS_RELE(mi
->mi_vfsp
);
3046 mutex_exit(&mi
->mi_lock
);
3050 * We can be called asynchronously by VFS_FREEVFS() after the zone
3051 * shutdown/destroy callbacks have executed; if so, clean up the zone's
3054 if (list_head(&mig
->mig_list
) == NULL
&&
3055 mig
->mig_destructor_called
== B_TRUE
) {
3056 nfs4_mi_free_globals(mig
);
3059 mutex_exit(&mig
->mig_lock
);
3064 nfs_free_mi4(mntinfo4_t
*mi
)
3066 nfs4_open_owner_t
*foop
;
3067 nfs4_oo_hash_bucket_t
*bucketp
;
3068 nfs4_debug_msg_t
*msgp
;
3073 * Code introduced here should be carefully evaluated to make
3074 * sure none of the freed resources are accessed either directly
3075 * or indirectly after freeing them. For eg: Introducing calls to
3076 * NFS4_DEBUG that use mntinfo4_t structure member after freeing
3077 * the structure members or other routines calling back into NFS
3078 * accessing freed mntinfo4_t structure member.
3080 mutex_enter(&mi
->mi_lock
);
3081 ASSERT(mi
->mi_recovthread
== NULL
);
3082 ASSERT(mi
->mi_flags
& MI4_ASYNC_MGR_STOP
);
3083 mutex_exit(&mi
->mi_lock
);
3084 mutex_enter(&mi
->mi_async_lock
);
3085 ASSERT(mi
->mi_threads
[NFS4_ASYNC_QUEUE
] == 0 &&
3086 mi
->mi_threads
[NFS4_ASYNC_PGOPS_QUEUE
] == 0);
3087 ASSERT(mi
->mi_manager_thread
== NULL
);
3088 mutex_exit(&mi
->mi_async_lock
);
3089 if (mi
->mi_io_kstats
) {
3090 kstat_delete(mi
->mi_io_kstats
);
3091 mi
->mi_io_kstats
= NULL
;
3093 if (mi
->mi_ro_kstats
) {
3094 kstat_delete(mi
->mi_ro_kstats
);
3095 mi
->mi_ro_kstats
= NULL
;
3097 if (mi
->mi_recov_ksp
) {
3098 kstat_delete(mi
->mi_recov_ksp
);
3099 mi
->mi_recov_ksp
= NULL
;
3101 mutex_enter(&mi
->mi_msg_list_lock
);
3102 while (msgp
= list_head(&mi
->mi_msg_list
)) {
3103 list_remove(&mi
->mi_msg_list
, msgp
);
3104 nfs4_free_msg(msgp
);
3106 mutex_exit(&mi
->mi_msg_list_lock
);
3107 list_destroy(&mi
->mi_msg_list
);
3108 if (mi
->mi_fname
!= NULL
)
3109 fn_rele(&mi
->mi_fname
);
3110 if (mi
->mi_rootfh
!= NULL
)
3111 sfh4_rele(&mi
->mi_rootfh
);
3112 if (mi
->mi_srvparentfh
!= NULL
)
3113 sfh4_rele(&mi
->mi_srvparentfh
);
3114 svp
= mi
->mi_servers
;
3116 mutex_destroy(&mi
->mi_lock
);
3117 mutex_destroy(&mi
->mi_async_lock
);
3118 mutex_destroy(&mi
->mi_msg_list_lock
);
3119 nfs_rw_destroy(&mi
->mi_recovlock
);
3120 nfs_rw_destroy(&mi
->mi_rename_lock
);
3121 nfs_rw_destroy(&mi
->mi_fh_lock
);
3122 cv_destroy(&mi
->mi_failover_cv
);
3123 cv_destroy(&mi
->mi_async_reqs_cv
);
3124 cv_destroy(&mi
->mi_async_work_cv
[NFS4_ASYNC_QUEUE
]);
3125 cv_destroy(&mi
->mi_async_work_cv
[NFS4_ASYNC_PGOPS_QUEUE
]);
3126 cv_destroy(&mi
->mi_async_cv
);
3127 cv_destroy(&mi
->mi_inact_req_cv
);
3129 * Destroy the oo hash lists and mutexes for the cred hash table.
3131 for (i
= 0; i
< NFS4_NUM_OO_BUCKETS
; i
++) {
3132 bucketp
= &(mi
->mi_oo_list
[i
]);
3133 /* Destroy any remaining open owners on the list */
3134 foop
= list_head(&bucketp
->b_oo_hash_list
);
3135 while (foop
!= NULL
) {
3136 list_remove(&bucketp
->b_oo_hash_list
, foop
);
3137 nfs4_destroy_open_owner(foop
);
3138 foop
= list_head(&bucketp
->b_oo_hash_list
);
3140 list_destroy(&bucketp
->b_oo_hash_list
);
3141 mutex_destroy(&bucketp
->b_lock
);
3144 * Empty and destroy the freed open owner list.
3146 foop
= list_head(&mi
->mi_foo_list
);
3147 while (foop
!= NULL
) {
3148 list_remove(&mi
->mi_foo_list
, foop
);
3149 nfs4_destroy_open_owner(foop
);
3150 foop
= list_head(&mi
->mi_foo_list
);
3152 list_destroy(&mi
->mi_foo_list
);
3153 list_destroy(&mi
->mi_bseqid_list
);
3154 list_destroy(&mi
->mi_lost_state
);
3155 avl_destroy(&mi
->mi_filehandles
);
3156 kmem_free(mi
, sizeof (*mi
));
3159 mi_hold(mntinfo4_t
*mi
)
3161 atomic_inc_32(&mi
->mi_count
);
3162 ASSERT(mi
->mi_count
!= 0);
3166 mi_rele(mntinfo4_t
*mi
)
3168 ASSERT(mi
->mi_count
!= 0);
3169 if (atomic_dec_32_nv(&mi
->mi_count
) == 0) {
3174 vnode_t nfs4_xattr_notsupp_vnode
;
3177 nfs4_clnt_init(void)
3180 (void) nfs4_rnode_init();
3181 (void) nfs4_shadow_init();
3182 (void) nfs4_acache_init();
3183 (void) nfs4_subr_init();
3186 nfs4_callback_init();
3187 nfs4_secinfo_init();
3189 tsd_create(&nfs4_tsd_key
, NULL
);
3193 * Add a CPR callback so that we can update client
3194 * lease after a suspend and resume.
3196 cid
= callb_add(nfs4_client_cpr_callb
, 0, CB_CL_CPR_RPC
, "nfs4");
3198 zone_key_create(&mi4_list_key
, nfs4_mi_init
, nfs4_mi_shutdown
,
3202 * Initialize the reference count of the notsupp xattr cache vnode to 1
3203 * so that it never goes away (fop_inactive isn't called on it).
3205 vmobject_init(&nfs4_xattr_notsupp_vnode
.v_object
, &nfs4_xattr_notsupp_vnode
);
3206 vn_reinit(&nfs4_xattr_notsupp_vnode
);
3210 nfs4_clnt_fini(void)
3212 (void) zone_key_delete(mi4_list_key
);
3214 (void) nfs4_rnode_fini();
3215 (void) nfs4_shadow_fini();
3216 (void) nfs4_acache_fini();
3217 (void) nfs4_subr_fini();
3219 nfs4_callback_fini();
3220 nfs4_secinfo_fini();
3222 tsd_destroy(&nfs4_tsd_key
);
3225 (void) callb_delete(cid
);
3230 nfs4_client_cpr_callb(void *arg
, int code
)
3233 * We get called for Suspend and Resume events.
3234 * For the suspend case we simply don't care!
3236 if (code
== CB_CODE_CPR_CHKPT
) {
3241 * When we get to here we are in the process of
3242 * resuming the system from a previous suspend.
3244 nfs4_client_resumed
= gethrestime_sec();
3249 nfs4_renew_lease_thread(nfs4_server_t
*sp
)
3252 time_t tmp_last_renewal_time
, tmp_time
, tmp_now_time
, kip_secs
;
3253 clock_t tick_delay
= 0;
3254 clock_t time_left
= 0;
3255 callb_cpr_t cpr_info
;
3258 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3259 "nfs4_renew_lease_thread: acting on sp 0x%p", (void*)sp
));
3260 mutex_init(&cpr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3261 CALLB_CPR_INIT(&cpr_info
, &cpr_lock
, callb_generic_cpr
, "nfsv4Lease");
3263 mutex_enter(&sp
->s_lock
);
3264 /* sp->s_lease_time is set via a GETATTR */
3265 sp
->last_renewal_time
= gethrestime_sec();
3266 sp
->lease_valid
= NFS4_LEASE_UNINITIALIZED
;
3267 ASSERT(sp
->s_refcnt
>= 1);
3270 if (!sp
->state_ref_count
||
3271 sp
->lease_valid
!= NFS4_LEASE_VALID
) {
3273 kip_secs
= MAX((sp
->s_lease_time
>> 1) -
3274 (3 * sp
->propagation_delay
.tv_sec
), 1);
3276 tick_delay
= SEC_TO_TICK(kip_secs
);
3278 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3279 "nfs4_renew_lease_thread: no renew : thread "
3280 "wait %ld secs", kip_secs
));
3282 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3283 "nfs4_renew_lease_thread: no renew : "
3284 "state_ref_count %d, lease_valid %d",
3285 sp
->state_ref_count
, sp
->lease_valid
));
3287 mutex_enter(&cpr_lock
);
3288 CALLB_CPR_SAFE_BEGIN(&cpr_info
);
3289 mutex_exit(&cpr_lock
);
3290 time_left
= cv_reltimedwait(&sp
->cv_thread_exit
,
3291 &sp
->s_lock
, tick_delay
, TR_CLOCK_TICK
);
3292 mutex_enter(&cpr_lock
);
3293 CALLB_CPR_SAFE_END(&cpr_info
, &cpr_lock
);
3294 mutex_exit(&cpr_lock
);
3296 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3297 "nfs4_renew_lease_thread: no renew: "
3298 "time left %ld", time_left
));
3300 if (sp
->s_thread_exit
== NFS4_THREAD_EXIT
)
3305 tmp_last_renewal_time
= sp
->last_renewal_time
;
3307 tmp_time
= gethrestime_sec() - sp
->last_renewal_time
+
3308 (3 * sp
->propagation_delay
.tv_sec
);
3310 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3311 "nfs4_renew_lease_thread: tmp_time %ld, "
3312 "sp->last_renewal_time %ld", tmp_time
,
3313 sp
->last_renewal_time
));
3315 kip_secs
= MAX((sp
->s_lease_time
>> 1) - tmp_time
, 1);
3317 tick_delay
= SEC_TO_TICK(kip_secs
);
3319 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3320 "nfs4_renew_lease_thread: valid lease: sleep for %ld "
3323 mutex_enter(&cpr_lock
);
3324 CALLB_CPR_SAFE_BEGIN(&cpr_info
);
3325 mutex_exit(&cpr_lock
);
3326 time_left
= cv_reltimedwait(&sp
->cv_thread_exit
, &sp
->s_lock
,
3327 tick_delay
, TR_CLOCK_TICK
);
3328 mutex_enter(&cpr_lock
);
3329 CALLB_CPR_SAFE_END(&cpr_info
, &cpr_lock
);
3330 mutex_exit(&cpr_lock
);
3332 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3333 "nfs4_renew_lease_thread: valid lease: time left %ld :"
3334 "sp last_renewal_time %ld, nfs4_client_resumed %ld, "
3335 "tmp_last_renewal_time %ld", time_left
,
3336 sp
->last_renewal_time
, nfs4_client_resumed
,
3337 tmp_last_renewal_time
));
3339 if (sp
->s_thread_exit
== NFS4_THREAD_EXIT
)
3342 if (tmp_last_renewal_time
== sp
->last_renewal_time
||
3343 (nfs4_client_resumed
!= 0 &&
3344 nfs4_client_resumed
> sp
->last_renewal_time
)) {
3346 * Issue RENEW op since we haven't renewed the lease
3349 tmp_now_time
= gethrestime_sec();
3350 error
= nfs4renew(sp
);
3352 * Need to re-acquire sp's lock, nfs4renew()
3355 mutex_enter(&sp
->s_lock
);
3358 * See if someone changed s_thread_exit while we gave
3361 if (sp
->s_thread_exit
== NFS4_THREAD_EXIT
)
3366 * check to see if we implicitly renewed while
3367 * we waited for a reply for our RENEW call.
3369 if (tmp_last_renewal_time
==
3370 sp
->last_renewal_time
) {
3371 /* no implicit renew came */
3372 sp
->last_renewal_time
= tmp_now_time
;
3374 NFS4_DEBUG(nfs4_client_lease_debug
,
3375 (CE_NOTE
, "renew_thread: did "
3376 "implicit renewal before reply "
3377 "from server for RENEW"));
3380 /* figure out error */
3381 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3382 "renew_thread: nfs4renew returned error"
3390 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3391 "nfs4_renew_lease_thread: thread exiting"));
3393 while (sp
->s_otw_call_count
!= 0) {
3394 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3395 "nfs4_renew_lease_thread: waiting for outstanding "
3396 "otw calls to finish for sp 0x%p, current "
3397 "s_otw_call_count %d", (void *)sp
,
3398 sp
->s_otw_call_count
));
3399 mutex_enter(&cpr_lock
);
3400 CALLB_CPR_SAFE_BEGIN(&cpr_info
);
3401 mutex_exit(&cpr_lock
);
3402 cv_wait(&sp
->s_cv_otw_count
, &sp
->s_lock
);
3403 mutex_enter(&cpr_lock
);
3404 CALLB_CPR_SAFE_END(&cpr_info
, &cpr_lock
);
3405 mutex_exit(&cpr_lock
);
3407 mutex_exit(&sp
->s_lock
);
3409 nfs4_server_rele(sp
); /* free the thread's reference */
3410 nfs4_server_rele(sp
); /* free the list's reference */
3414 mutex_enter(&cpr_lock
);
3415 CALLB_CPR_EXIT(&cpr_info
); /* drops cpr_lock */
3416 mutex_destroy(&cpr_lock
);
3418 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3419 "nfs4_renew_lease_thread: renew thread exit officially"));
3426 * Send out a RENEW op to the server.
3427 * Assumes sp is locked down.
3430 nfs4renew(nfs4_server_t
*sp
)
3432 COMPOUND4args_clnt args
;
3433 COMPOUND4res_clnt res
;
3434 nfs_argop4 argop
[1];
3439 timespec_t prop_time
, after_time
;
3440 int needrecov
= FALSE
;
3441 nfs4_recov_state_t recov_state
;
3442 nfs4_error_t e
= { 0, NFS4_OK
, RPC_SUCCESS
};
3444 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
, "nfs4renew"));
3446 recov_state
.rs_flags
= 0;
3447 recov_state
.rs_num_retry_despite_err
= 0;
3450 mi
= sp
->mntinfo4_list
;
3451 VFS_HOLD(mi
->mi_vfsp
);
3452 mutex_exit(&sp
->s_lock
);
3455 e
.error
= nfs4_start_op(mi
, NULL
, NULL
, &recov_state
);
3457 VFS_RELE(mi
->mi_vfsp
);
3461 /* Check to see if we're dealing with a marked-dead sp */
3462 mutex_enter(&sp
->s_lock
);
3463 if (sp
->s_thread_exit
== NFS4_THREAD_EXIT
) {
3464 mutex_exit(&sp
->s_lock
);
3465 nfs4_end_op(mi
, NULL
, NULL
, &recov_state
, needrecov
);
3466 VFS_RELE(mi
->mi_vfsp
);
3470 /* Make sure mi hasn't changed on us */
3471 if (mi
!= sp
->mntinfo4_list
) {
3472 /* Must drop sp's lock to avoid a recursive mutex enter */
3473 mutex_exit(&sp
->s_lock
);
3474 nfs4_end_op(mi
, NULL
, NULL
, &recov_state
, needrecov
);
3475 VFS_RELE(mi
->mi_vfsp
);
3476 mutex_enter(&sp
->s_lock
);
3479 mutex_exit(&sp
->s_lock
);
3481 args
.ctag
= TAG_RENEW
;
3486 argop
[0].argop
= OP_RENEW
;
3488 mutex_enter(&sp
->s_lock
);
3489 argop
[0].nfs_argop4_u
.oprenew
.clientid
= sp
->clientid
;
3492 mutex_exit(&sp
->s_lock
);
3496 /* used to figure out RTT for sp */
3497 gethrestime(&prop_time
);
3499 NFS4_DEBUG(nfs4_client_call_debug
, (CE_NOTE
,
3500 "nfs4renew: %s call, sp 0x%p", needrecov
? "recov" : "first",
3502 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
, "before: %ld s %ld ns ",
3503 prop_time
.tv_sec
, prop_time
.tv_nsec
));
3505 DTRACE_PROBE2(nfs4__renew__start
, nfs4_server_t
*, sp
,
3508 rfs4call(mi
, &args
, &res
, cr
, &doqueue
, 0, &e
);
3511 DTRACE_PROBE2(nfs4__renew__end
, nfs4_server_t
*, sp
,
3514 gethrestime(&after_time
);
3516 mutex_enter(&sp
->s_lock
);
3517 sp
->propagation_delay
.tv_sec
=
3518 MAX(1, after_time
.tv_sec
- prop_time
.tv_sec
);
3519 mutex_exit(&sp
->s_lock
);
3521 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
, "after : %ld s %ld ns ",
3522 after_time
.tv_sec
, after_time
.tv_nsec
));
3524 if (e
.error
== 0 && res
.status
== NFS4ERR_CB_PATH_DOWN
) {
3525 xdr_free(xdr_COMPOUND4res_clnt
, (caddr_t
)&res
);
3526 nfs4_delegreturn_all(sp
);
3527 nfs4_end_op(mi
, NULL
, NULL
, &recov_state
, needrecov
);
3528 VFS_RELE(mi
->mi_vfsp
);
3530 * If the server returns CB_PATH_DOWN, it has renewed
3531 * the lease and informed us that the callback path is
3532 * down. Since the lease is renewed, just return 0 and
3533 * let the renew thread proceed as normal.
3538 needrecov
= nfs4_needs_recovery(&e
, FALSE
, mi
->mi_vfsp
);
3539 if (!needrecov
&& e
.error
) {
3540 nfs4_end_op(mi
, NULL
, NULL
, &recov_state
, needrecov
);
3541 VFS_RELE(mi
->mi_vfsp
);
3545 rpc_error
= e
.error
;
3548 NFS4_DEBUG(nfs4_client_recov_debug
, (CE_NOTE
,
3549 "nfs4renew: initiating recovery\n"));
3551 if (nfs4_start_recovery(&e
, mi
, NULL
, NULL
, NULL
, NULL
,
3552 OP_RENEW
, NULL
, NULL
, NULL
) == FALSE
) {
3553 nfs4_end_op(mi
, NULL
, NULL
, &recov_state
, needrecov
);
3554 VFS_RELE(mi
->mi_vfsp
);
3556 xdr_free(xdr_COMPOUND4res_clnt
, (caddr_t
)&res
);
3557 mutex_enter(&sp
->s_lock
);
3560 /* fall through for res.status case */
3564 if (res
.status
== NFS4ERR_LEASE_MOVED
) {
3567 * XXX need to try every mntinfo4 in sp->mntinfo4_list
3568 * to renew the lease on that server
3571 e
.error
= geterrno4(res
.status
);
3575 xdr_free(xdr_COMPOUND4res_clnt
, (caddr_t
)&res
);
3577 nfs4_end_op(mi
, NULL
, NULL
, &recov_state
, needrecov
);
3579 VFS_RELE(mi
->mi_vfsp
);
3585 nfs4_inc_state_ref_count(mntinfo4_t
*mi
)
3589 /* this locks down sp if it is found */
3590 sp
= find_nfs4_server(mi
);
3593 nfs4_inc_state_ref_count_nolock(sp
, mi
);
3594 mutex_exit(&sp
->s_lock
);
3595 nfs4_server_rele(sp
);
3600 * Bump the number of OPEN files (ie: those with state) so we know if this
3601 * nfs4_server has any state to maintain a lease for or not.
3603 * Also, marks the nfs4_server's lease valid if it hasn't been done so already.
3606 nfs4_inc_state_ref_count_nolock(nfs4_server_t
*sp
, mntinfo4_t
*mi
)
3608 ASSERT(mutex_owned(&sp
->s_lock
));
3610 sp
->state_ref_count
++;
3611 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3612 "nfs4_inc_state_ref_count: state_ref_count now %d",
3613 sp
->state_ref_count
));
3615 if (sp
->lease_valid
== NFS4_LEASE_UNINITIALIZED
)
3616 sp
->lease_valid
= NFS4_LEASE_VALID
;
3619 * If this call caused the lease to be marked valid and/or
3620 * took the state_ref_count from 0 to 1, then start the time
3623 if (sp
->lease_valid
== NFS4_LEASE_VALID
&& sp
->state_ref_count
== 1)
3624 sp
->last_renewal_time
= gethrestime_sec();
3626 /* update the number of open files for mi */
3627 mi
->mi_open_files
++;
3631 nfs4_dec_state_ref_count(mntinfo4_t
*mi
)
3635 /* this locks down sp if it is found */
3636 sp
= find_nfs4_server_all(mi
, 1);
3639 nfs4_dec_state_ref_count_nolock(sp
, mi
);
3640 mutex_exit(&sp
->s_lock
);
3641 nfs4_server_rele(sp
);
3646 * Decrement the number of OPEN files (ie: those with state) so we know if
3647 * this nfs4_server has any state to maintain a lease for or not.
3650 nfs4_dec_state_ref_count_nolock(nfs4_server_t
*sp
, mntinfo4_t
*mi
)
3652 ASSERT(mutex_owned(&sp
->s_lock
));
3653 ASSERT(sp
->state_ref_count
!= 0);
3654 sp
->state_ref_count
--;
3656 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3657 "nfs4_dec_state_ref_count: state ref count now %d",
3658 sp
->state_ref_count
));
3660 mi
->mi_open_files
--;
3661 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3662 "nfs4_dec_state_ref_count: mi open files %d, v4 flags 0x%x",
3663 mi
->mi_open_files
, mi
->mi_flags
));
3665 /* We don't have to hold the mi_lock to test mi_flags */
3666 if (mi
->mi_open_files
== 0 &&
3667 (mi
->mi_flags
& MI4_REMOVE_ON_LAST_CLOSE
)) {
3668 NFS4_DEBUG(nfs4_client_lease_debug
, (CE_NOTE
,
3669 "nfs4_dec_state_ref_count: remove mntinfo4 %p since "
3670 "we have closed the last open file", (void*)mi
));
3671 nfs4_remove_mi_from_server(mi
, sp
);
3676 inlease(nfs4_server_t
*sp
)
3680 ASSERT(mutex_owned(&sp
->s_lock
));
3682 if (sp
->lease_valid
== NFS4_LEASE_VALID
&&
3683 gethrestime_sec() < sp
->last_renewal_time
+ sp
->s_lease_time
)
3693 * Return non-zero if the given nfs4_server_t is going through recovery.
3697 nfs4_server_in_recovery(nfs4_server_t
*sp
)
3699 return (nfs_rw_lock_held(&sp
->s_recovlock
, RW_WRITER
));
3703 * Compare two shared filehandle objects. Returns -1, 0, or +1, if the
3704 * first is less than, equal to, or greater than the second.
3708 sfh4cmp(const void *p1
, const void *p2
)
3710 const nfs4_sharedfh_t
*sfh1
= (const nfs4_sharedfh_t
*)p1
;
3711 const nfs4_sharedfh_t
*sfh2
= (const nfs4_sharedfh_t
*)p2
;
3713 return (nfs4cmpfh(&sfh1
->sfh_fh
, &sfh2
->sfh_fh
));
3717 * Create a table for shared filehandle objects.
3721 sfh4_createtab(avl_tree_t
*tab
)
3723 avl_create(tab
, sfh4cmp
, sizeof (nfs4_sharedfh_t
),
3724 offsetof(nfs4_sharedfh_t
, sfh_tree
));
3728 * Return a shared filehandle object for the given filehandle. The caller
3729 * is responsible for eventually calling sfh4_rele().
3733 sfh4_put(const nfs_fh4
*fh
, mntinfo4_t
*mi
, nfs4_sharedfh_t
*key
)
3735 nfs4_sharedfh_t
*sfh
, *nsfh
;
3737 nfs4_sharedfh_t skey
;
3744 nsfh
= kmem_alloc(sizeof (nfs4_sharedfh_t
), KM_SLEEP
);
3745 nsfh
->sfh_fh
.nfs_fh4_len
= fh
->nfs_fh4_len
;
3747 * We allocate the largest possible filehandle size because it's
3748 * not that big, and it saves us from possibly having to resize the
3751 nsfh
->sfh_fh
.nfs_fh4_val
= kmem_alloc(NFS4_FHSIZE
, KM_SLEEP
);
3752 bcopy(fh
->nfs_fh4_val
, nsfh
->sfh_fh
.nfs_fh4_val
, fh
->nfs_fh4_len
);
3753 mutex_init(&nsfh
->sfh_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3754 nsfh
->sfh_refcnt
= 1;
3755 nsfh
->sfh_flags
= SFH4_IN_TREE
;
3757 NFS4_DEBUG(nfs4_sharedfh_debug
, (CE_NOTE
, "sfh4_get: new object (%p)",
3760 (void) nfs_rw_enter_sig(&mi
->mi_fh_lock
, RW_WRITER
, 0);
3761 sfh
= avl_find(&mi
->mi_filehandles
, key
, &where
);
3763 mutex_enter(&sfh
->sfh_lock
);
3765 mutex_exit(&sfh
->sfh_lock
);
3766 nfs_rw_exit(&mi
->mi_fh_lock
);
3767 /* free our speculative allocs */
3768 kmem_free(nsfh
->sfh_fh
.nfs_fh4_val
, NFS4_FHSIZE
);
3769 kmem_free(nsfh
, sizeof (nfs4_sharedfh_t
));
3773 avl_insert(&mi
->mi_filehandles
, nsfh
, where
);
3774 nfs_rw_exit(&mi
->mi_fh_lock
);
3780 * Return a shared filehandle object for the given filehandle. The caller
3781 * is responsible for eventually calling sfh4_rele().
3785 sfh4_get(const nfs_fh4
*fh
, mntinfo4_t
*mi
)
3787 nfs4_sharedfh_t
*sfh
;
3788 nfs4_sharedfh_t key
;
3790 ASSERT(fh
->nfs_fh4_len
<= NFS4_FHSIZE
);
3793 if (nfs4_sharedfh_debug
) {
3794 nfs4_fhandle_t fhandle
;
3796 fhandle
.fh_len
= fh
->nfs_fh4_len
;
3797 bcopy(fh
->nfs_fh4_val
, fhandle
.fh_buf
, fhandle
.fh_len
);
3798 zcmn_err(mi
->mi_zone
->zone_id
, CE_NOTE
, "sfh4_get:");
3799 nfs4_printfhandle(&fhandle
);
3804 * If there's already an object for the given filehandle, bump the
3805 * reference count and return it. Otherwise, create a new object
3806 * and add it to the AVL tree.
3811 (void) nfs_rw_enter_sig(&mi
->mi_fh_lock
, RW_READER
, 0);
3812 sfh
= avl_find(&mi
->mi_filehandles
, &key
, NULL
);
3814 mutex_enter(&sfh
->sfh_lock
);
3816 NFS4_DEBUG(nfs4_sharedfh_debug
, (CE_NOTE
,
3817 "sfh4_get: found existing %p, new refcnt=%d",
3818 (void *)sfh
, sfh
->sfh_refcnt
));
3819 mutex_exit(&sfh
->sfh_lock
);
3820 nfs_rw_exit(&mi
->mi_fh_lock
);
3823 nfs_rw_exit(&mi
->mi_fh_lock
);
3825 return (sfh4_put(fh
, mi
, &key
));
3829 * Get a reference to the given shared filehandle object.
3833 sfh4_hold(nfs4_sharedfh_t
*sfh
)
3835 ASSERT(sfh
->sfh_refcnt
> 0);
3837 mutex_enter(&sfh
->sfh_lock
);
3839 NFS4_DEBUG(nfs4_sharedfh_debug
,
3840 (CE_NOTE
, "sfh4_hold %p, new refcnt=%d",
3841 (void *)sfh
, sfh
->sfh_refcnt
));
3842 mutex_exit(&sfh
->sfh_lock
);
3846 * Release a reference to the given shared filehandle object and null out
3847 * the given pointer.
3851 sfh4_rele(nfs4_sharedfh_t
**sfhpp
)
3854 nfs4_sharedfh_t
*sfh
= *sfhpp
;
3856 ASSERT(sfh
->sfh_refcnt
> 0);
3858 mutex_enter(&sfh
->sfh_lock
);
3859 if (sfh
->sfh_refcnt
> 1) {
3861 NFS4_DEBUG(nfs4_sharedfh_debug
, (CE_NOTE
,
3862 "sfh4_rele %p, new refcnt=%d",
3863 (void *)sfh
, sfh
->sfh_refcnt
));
3864 mutex_exit(&sfh
->sfh_lock
);
3867 mutex_exit(&sfh
->sfh_lock
);
3870 * Possibly the last reference, so get the lock for the table in
3871 * case it's time to remove the object from the table.
3874 (void) nfs_rw_enter_sig(&mi
->mi_fh_lock
, RW_WRITER
, 0);
3875 mutex_enter(&sfh
->sfh_lock
);
3877 if (sfh
->sfh_refcnt
> 0) {
3878 NFS4_DEBUG(nfs4_sharedfh_debug
, (CE_NOTE
,
3879 "sfh4_rele %p, new refcnt=%d",
3880 (void *)sfh
, sfh
->sfh_refcnt
));
3881 mutex_exit(&sfh
->sfh_lock
);
3882 nfs_rw_exit(&mi
->mi_fh_lock
);
3886 NFS4_DEBUG(nfs4_sharedfh_debug
, (CE_NOTE
,
3887 "sfh4_rele %p, last ref", (void *)sfh
));
3888 if (sfh
->sfh_flags
& SFH4_IN_TREE
) {
3889 avl_remove(&mi
->mi_filehandles
, sfh
);
3890 sfh
->sfh_flags
&= ~SFH4_IN_TREE
;
3892 mutex_exit(&sfh
->sfh_lock
);
3893 nfs_rw_exit(&mi
->mi_fh_lock
);
3894 mutex_destroy(&sfh
->sfh_lock
);
3895 kmem_free(sfh
->sfh_fh
.nfs_fh4_val
, NFS4_FHSIZE
);
3896 kmem_free(sfh
, sizeof (nfs4_sharedfh_t
));
3903 * Update the filehandle for the given shared filehandle object.
3906 int nfs4_warn_dupfh
= 0; /* if set, always warn about dup fhs below */
3909 sfh4_update(nfs4_sharedfh_t
*sfh
, const nfs_fh4
*newfh
)
3911 mntinfo4_t
*mi
= sfh
->sfh_mi
;
3912 nfs4_sharedfh_t
*dupsfh
;
3914 nfs4_sharedfh_t key
;
3917 mutex_enter(&sfh
->sfh_lock
);
3918 ASSERT(sfh
->sfh_refcnt
> 0);
3919 mutex_exit(&sfh
->sfh_lock
);
3921 ASSERT(newfh
->nfs_fh4_len
<= NFS4_FHSIZE
);
3924 * The basic plan is to remove the shared filehandle object from
3925 * the table, update it to have the new filehandle, then reinsert
3929 (void) nfs_rw_enter_sig(&mi
->mi_fh_lock
, RW_WRITER
, 0);
3930 mutex_enter(&sfh
->sfh_lock
);
3931 if (sfh
->sfh_flags
& SFH4_IN_TREE
) {
3932 avl_remove(&mi
->mi_filehandles
, sfh
);
3933 sfh
->sfh_flags
&= ~SFH4_IN_TREE
;
3935 mutex_exit(&sfh
->sfh_lock
);
3936 sfh
->sfh_fh
.nfs_fh4_len
= newfh
->nfs_fh4_len
;
3937 bcopy(newfh
->nfs_fh4_val
, sfh
->sfh_fh
.nfs_fh4_val
,
3938 sfh
->sfh_fh
.nfs_fh4_len
);
3941 * XXX If there is already a shared filehandle object with the new
3942 * filehandle, we're in trouble, because the rnode code assumes
3943 * that there is only one shared filehandle object for a given
3944 * filehandle. So issue a warning (for read-write mounts only)
3945 * and don't try to re-insert the given object into the table.
3946 * Hopefully the given object will quickly go away and everyone
3947 * will use the new object.
3949 key
.sfh_fh
= *newfh
;
3950 dupsfh
= avl_find(&mi
->mi_filehandles
, &key
, &where
);
3951 if (dupsfh
!= NULL
) {
3952 if (!(mi
->mi_vfsp
->vfs_flag
& VFS_RDONLY
) || nfs4_warn_dupfh
) {
3953 zcmn_err(mi
->mi_zone
->zone_id
, CE_WARN
, "sfh4_update: "
3954 "duplicate filehandle detected");
3955 sfh4_printfhandle(dupsfh
);
3958 avl_insert(&mi
->mi_filehandles
, sfh
, where
);
3959 mutex_enter(&sfh
->sfh_lock
);
3960 sfh
->sfh_flags
|= SFH4_IN_TREE
;
3961 mutex_exit(&sfh
->sfh_lock
);
3963 nfs_rw_exit(&mi
->mi_fh_lock
);
3967 * Copy out the current filehandle for the given shared filehandle object.
3971 sfh4_copyval(const nfs4_sharedfh_t
*sfh
, nfs4_fhandle_t
*fhp
)
3973 mntinfo4_t
*mi
= sfh
->sfh_mi
;
3975 ASSERT(sfh
->sfh_refcnt
> 0);
3977 (void) nfs_rw_enter_sig(&mi
->mi_fh_lock
, RW_READER
, 0);
3978 fhp
->fh_len
= sfh
->sfh_fh
.nfs_fh4_len
;
3979 ASSERT(fhp
->fh_len
<= NFS4_FHSIZE
);
3980 bcopy(sfh
->sfh_fh
.nfs_fh4_val
, fhp
->fh_buf
, fhp
->fh_len
);
3981 nfs_rw_exit(&mi
->mi_fh_lock
);
3985 * Print out the filehandle for the given shared filehandle object.
3989 sfh4_printfhandle(const nfs4_sharedfh_t
*sfh
)
3991 nfs4_fhandle_t fhandle
;
3993 sfh4_copyval(sfh
, &fhandle
);
3994 nfs4_printfhandle(&fhandle
);
3998 * Compare 2 fnames. Returns -1 if the first is "less" than the second, 0
3999 * if they're the same, +1 if the first is "greater" than the second. The
4000 * caller (or whoever's calling the AVL package) is responsible for
4001 * handling locking issues.
4005 fncmp(const void *p1
, const void *p2
)
4007 const nfs4_fname_t
*f1
= p1
;
4008 const nfs4_fname_t
*f2
= p2
;
4011 res
= strcmp(f1
->fn_name
, f2
->fn_name
);
4013 * The AVL package wants +/-1, not arbitrary positive or negative
4024 * Get or create an fname with the given name, as a child of the given
4025 * fname. The caller is responsible for eventually releasing the reference
4026 * (fn_rele()). parent may be NULL.
4030 fn_get(nfs4_fname_t
*parent
, char *name
, nfs4_sharedfh_t
*sfh
)
4039 * If there's already an fname registered with the given name, bump
4040 * its reference count and return it. Otherwise, create a new one
4041 * and add it to the parent's AVL tree.
4043 * fname entries we are looking for should match both name
4044 * and sfh stored in the fname.
4047 if (parent
!= NULL
) {
4048 mutex_enter(&parent
->fn_lock
);
4049 fnp
= avl_find(&parent
->fn_children
, &key
, &where
);
4052 * This hold on fnp is released below later,
4053 * in case this is not the fnp we want.
4057 if (fnp
->fn_sfh
== sfh
) {
4059 * We have found our entry.
4060 * put an hold and return it.
4062 mutex_exit(&parent
->fn_lock
);
4067 * We have found an entry that has a mismatching
4068 * fn_sfh. This could be a stale entry due to
4069 * server side rename. We will remove this entry
4070 * and make sure no such entries exist.
4072 mutex_exit(&parent
->fn_lock
);
4073 mutex_enter(&fnp
->fn_lock
);
4074 if (fnp
->fn_parent
== parent
) {
4076 * Remove ourselves from parent's
4079 mutex_enter(&parent
->fn_lock
);
4080 avl_remove(&parent
->fn_children
, fnp
);
4081 mutex_exit(&parent
->fn_lock
);
4082 fn_rele(&fnp
->fn_parent
);
4084 mutex_exit(&fnp
->fn_lock
);
4090 fnp
= kmem_alloc(sizeof (nfs4_fname_t
), KM_SLEEP
);
4091 mutex_init(&fnp
->fn_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
4092 fnp
->fn_parent
= parent
;
4095 fnp
->fn_len
= strlen(name
);
4096 ASSERT(fnp
->fn_len
< MAXNAMELEN
);
4097 fnp
->fn_name
= kmem_alloc(fnp
->fn_len
+ 1, KM_SLEEP
);
4098 (void) strcpy(fnp
->fn_name
, name
);
4102 * This hold on sfh is later released
4103 * when we do the final fn_rele() on this fname.
4108 avl_create(&fnp
->fn_children
, fncmp
, sizeof (nfs4_fname_t
),
4109 offsetof(nfs4_fname_t
, fn_tree
));
4110 NFS4_DEBUG(nfs4_fname_debug
, (CE_NOTE
,
4111 "fn_get %p:%s, a new nfs4_fname_t!",
4112 (void *)fnp
, fnp
->fn_name
));
4113 if (parent
!= NULL
) {
4114 avl_insert(&parent
->fn_children
, fnp
, where
);
4115 mutex_exit(&parent
->fn_lock
);
4122 fn_hold(nfs4_fname_t
*fnp
)
4124 atomic_inc_32(&fnp
->fn_refcnt
);
4125 NFS4_DEBUG(nfs4_fname_debug
, (CE_NOTE
,
4126 "fn_hold %p:%s, new refcnt=%d",
4127 (void *)fnp
, fnp
->fn_name
, fnp
->fn_refcnt
));
4131 * Decrement the reference count of the given fname, and destroy it if its
4132 * reference count goes to zero. Nulls out the given pointer.
4136 fn_rele(nfs4_fname_t
**fnpp
)
4138 nfs4_fname_t
*parent
;
4146 mutex_enter(&fnp
->fn_lock
);
4147 parent
= fnp
->fn_parent
;
4149 mutex_enter(&parent
->fn_lock
); /* prevent new references */
4150 newref
= atomic_dec_32_nv(&fnp
->fn_refcnt
);
4152 NFS4_DEBUG(nfs4_fname_debug
, (CE_NOTE
,
4153 "fn_rele %p:%s, new refcnt=%d",
4154 (void *)fnp
, fnp
->fn_name
, fnp
->fn_refcnt
));
4156 mutex_exit(&parent
->fn_lock
);
4157 mutex_exit(&fnp
->fn_lock
);
4161 NFS4_DEBUG(nfs4_fname_debug
, (CE_NOTE
,
4162 "fn_rele %p:%s, last reference, deleting...",
4163 (void *)fnp
, fnp
->fn_name
));
4164 if (parent
!= NULL
) {
4165 avl_remove(&parent
->fn_children
, fnp
);
4166 mutex_exit(&parent
->fn_lock
);
4168 kmem_free(fnp
->fn_name
, fnp
->fn_len
+ 1);
4169 sfh4_rele(&fnp
->fn_sfh
);
4170 mutex_destroy(&fnp
->fn_lock
);
4171 avl_destroy(&fnp
->fn_children
);
4172 kmem_free(fnp
, sizeof (nfs4_fname_t
));
4174 * Recursivly fn_rele the parent.
4175 * Use goto instead of a recursive call to avoid stack overflow.
4177 if (parent
!= NULL
) {
4184 * Returns the single component name of the given fname, in a MAXNAMELEN
4185 * string buffer, which the caller is responsible for freeing. Note that
4186 * the name may become invalid as a result of fn_move().
4190 fn_name(nfs4_fname_t
*fnp
)
4194 ASSERT(fnp
->fn_len
< MAXNAMELEN
);
4195 name
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
4196 mutex_enter(&fnp
->fn_lock
);
4197 (void) strcpy(name
, fnp
->fn_name
);
4198 mutex_exit(&fnp
->fn_lock
);
4207 * This function, used only by fn_path, constructs
4208 * a new string which looks like "prepend" + "/" + "current".
4209 * by allocating a new string and freeing the old one.
4212 fn_path_realloc(char **curses
, char *prepend
)
4214 int len
, curlen
= 0;
4217 if (*curses
== NULL
) {
4219 * Prime the pump, allocate just the
4220 * space for prepend and return that.
4222 len
= strlen(prepend
) + 1;
4223 news
= kmem_alloc(len
, KM_SLEEP
);
4224 (void) strncpy(news
, prepend
, len
);
4227 * Allocate the space for a new string
4228 * +1 +1 is for the "/" and the NULL
4229 * byte at the end of it all.
4231 curlen
= strlen(*curses
);
4232 len
= curlen
+ strlen(prepend
) + 1 + 1;
4233 news
= kmem_alloc(len
, KM_SLEEP
);
4234 (void) strncpy(news
, prepend
, len
);
4235 (void) strcat(news
, "/");
4236 (void) strcat(news
, *curses
);
4237 kmem_free(*curses
, curlen
+ 1);
4243 * Returns the path name (starting from the fs root) for the given fname.
4244 * The caller is responsible for freeing. Note that the path may be or
4245 * become invalid as a result of fn_move().
4249 fn_path(nfs4_fname_t
*fnp
)
4252 nfs4_fname_t
*nextfnp
;
4259 /* walk up the tree constructing the pathname. */
4261 fn_hold(fnp
); /* adjust for later rele */
4263 mutex_enter(&fnp
->fn_lock
);
4265 * Add fn_name in front of the current path
4267 fn_path_realloc(&path
, fnp
->fn_name
);
4268 nextfnp
= fnp
->fn_parent
;
4269 if (nextfnp
!= NULL
)
4271 mutex_exit(&fnp
->fn_lock
);
4274 } while (fnp
!= NULL
);
4280 * Return a reference to the parent of the given fname, which the caller is
4281 * responsible for eventually releasing.
4285 fn_parent(nfs4_fname_t
*fnp
)
4287 nfs4_fname_t
*parent
;
4289 mutex_enter(&fnp
->fn_lock
);
4290 parent
= fnp
->fn_parent
;
4293 mutex_exit(&fnp
->fn_lock
);
4299 * Update fnp so that its parent is newparent and its name is newname.
4303 fn_move(nfs4_fname_t
*fnp
, nfs4_fname_t
*newparent
, char *newname
)
4305 nfs4_fname_t
*parent
, *tmpfnp
;
4311 * This assert exists to catch the client trying to rename
4312 * a dir to be a child of itself. This happened at a recent
4313 * bakeoff against a 3rd party (broken) server which allowed
4314 * the rename to succeed. If it trips it means that:
4315 * a) the code in nfs4rename that detects this case is broken
4316 * b) the server is broken (since it allowed the bogus rename)
4318 * For non-DEBUG kernels, prepare for a recursive mutex_enter
4319 * panic below from: mutex_enter(&newparent->fn_lock);
4321 ASSERT(fnp
!= newparent
);
4324 * Remove fnp from its current parent, change its name, then add it
4325 * to newparent. It might happen that fnp was replaced by another
4326 * nfs4_fname_t with the same fn_name in parent->fn_children.
4327 * In such case, fnp->fn_parent is NULL and we skip the removal
4328 * of fnp from its current parent.
4330 mutex_enter(&fnp
->fn_lock
);
4331 parent
= fnp
->fn_parent
;
4332 if (parent
!= NULL
) {
4333 mutex_enter(&parent
->fn_lock
);
4334 avl_remove(&parent
->fn_children
, fnp
);
4335 mutex_exit(&parent
->fn_lock
);
4336 fn_rele(&fnp
->fn_parent
);
4339 newlen
= strlen(newname
);
4340 if (newlen
!= fnp
->fn_len
) {
4341 ASSERT(newlen
< MAXNAMELEN
);
4342 kmem_free(fnp
->fn_name
, fnp
->fn_len
+ 1);
4343 fnp
->fn_name
= kmem_alloc(newlen
+ 1, KM_SLEEP
);
4344 fnp
->fn_len
= newlen
;
4346 (void) strcpy(fnp
->fn_name
, newname
);
4349 mutex_enter(&newparent
->fn_lock
);
4350 key
.fn_name
= fnp
->fn_name
;
4351 tmpfnp
= avl_find(&newparent
->fn_children
, &key
, &where
);
4352 if (tmpfnp
!= NULL
) {
4354 * This could be due to a file that was unlinked while
4355 * open, or perhaps the rnode is in the free list. Remove
4356 * it from newparent and let it go away on its own. The
4357 * contorted code is to deal with lock order issues and
4361 mutex_exit(&newparent
->fn_lock
);
4362 mutex_enter(&tmpfnp
->fn_lock
);
4363 if (tmpfnp
->fn_parent
== newparent
) {
4364 mutex_enter(&newparent
->fn_lock
);
4365 avl_remove(&newparent
->fn_children
, tmpfnp
);
4366 mutex_exit(&newparent
->fn_lock
);
4367 fn_rele(&tmpfnp
->fn_parent
);
4369 mutex_exit(&tmpfnp
->fn_lock
);
4373 fnp
->fn_parent
= newparent
;
4375 avl_insert(&newparent
->fn_children
, fnp
, where
);
4376 mutex_exit(&newparent
->fn_lock
);
4377 mutex_exit(&fnp
->fn_lock
);
4382 * Return non-zero if the type information makes sense for the given vnode.
4386 nfs4_consistent_type(vnode_t
*vp
)
4388 rnode4_t
*rp
= VTOR4(vp
);
4390 if (nfs4_vtype_debug
&& vp
->v_type
!= VNON
&&
4391 rp
->r_attr
.va_type
!= VNON
&& vp
->v_type
!= rp
->r_attr
.va_type
) {
4392 cmn_err(CE_PANIC
, "vnode %p type mismatch; v_type=%d, "
4393 "rnode attr type=%d", (void *)vp
, vp
->v_type
,
4394 rp
->r_attr
.va_type
);