4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <sys/fs_subr.h>
29 #include <sys/errno.h>
33 #include <sys/cmn_err.h>
35 #include <sys/systm.h>
36 #include <sys/sysmacros.h>
37 #include <sys/atomic.h>
40 #include <sharefs/sharefs.h>
42 static const struct vnodeops sharefs_ops_data
;
45 * sharefs_snap_create: create a large character buffer with
46 * the shares enumerated.
49 sharefs_snap_create(shnode_t
*sft
)
57 rw_enter(&sharefs_lock
, RW_WRITER
);
58 rw_enter(&sharetab_lock
, RW_READER
);
60 if (sft
->sharefs_snap
) {
62 * Nothing has changed, so no need to grab a new copy!
64 if (sft
->sharefs_generation
== sharetab_generation
) {
65 rw_exit(&sharetab_lock
);
66 rw_exit(&sharefs_lock
);
70 ASSERT(sft
->sharefs_size
!= 0);
71 kmem_free(sft
->sharefs_snap
, sft
->sharefs_size
+ 1);
72 sft
->sharefs_snap
= NULL
;
75 sft
->sharefs_size
= sharetab_size
;
76 sft
->sharefs_count
= sharetab_count
;
78 if (sft
->sharefs_size
== 0) {
79 rw_exit(&sharetab_lock
);
80 rw_exit(&sharefs_lock
);
84 sft
->sharefs_snap
= kmem_zalloc(sft
->sharefs_size
+ 1, KM_SLEEP
);
86 buf
= sft
->sharefs_snap
;
89 * Walk the Sharetab, dumping each entry.
91 for (sht
= sharefs_sharetab
; sht
!= NULL
; sht
= sht
->s_next
) {
94 for (i
= 0; i
< SHARETAB_HASHES
; i
++) {
95 for (sh
= sht
->s_buckets
[i
].ssh_sh
;
100 if ((sWritten
+ sh
->sh_size
) >
106 * Note that sh->sh_size accounts
107 * for the field seperators.
108 * We need to add one for the EOL
109 * marker. And we should note that
110 * the space is accounted for in
111 * each share by the EOS marker.
113 n
= snprintf(&buf
[sWritten
],
115 "%s\t%s\t%s\t%s\t%s\n",
122 if (n
!= sh
->sh_size
) {
133 * We want to record the generation number and
134 * mtime inside this snapshot.
136 gethrestime(&sharetab_snap_time
);
137 sft
->sharefs_snap_time
= sharetab_snap_time
;
138 sft
->sharefs_generation
= sharetab_generation
;
140 ASSERT(iCount
== sft
->sharefs_count
);
142 rw_exit(&sharetab_lock
);
143 rw_exit(&sharefs_lock
);
148 kmem_free(sft
->sharefs_snap
, sft
->sharefs_size
+ 1);
149 sft
->sharefs_size
= 0;
150 sft
->sharefs_count
= 0;
151 sft
->sharefs_snap
= NULL
;
152 rw_exit(&sharetab_lock
);
153 rw_exit(&sharefs_lock
);
160 sharefs_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
161 caller_context_t
*ct
)
164 shnode_t
*sft
= VTOSH(vp
);
167 vap
->va_mode
= S_IRUSR
| S_IRGRP
| S_IROTH
;
168 vap
->va_nodeid
= SHAREFS_INO_FILE
;
171 rw_enter(&sharefs_lock
, RW_READER
);
174 * If we get asked about a snapped vnode, then
175 * we must report the data in that vnode.
177 * Else we report what is currently in the
180 if (sft
->sharefs_real_vp
) {
181 rw_enter(&sharetab_lock
, RW_READER
);
182 vap
->va_size
= sharetab_size
;
183 vap
->va_mtime
= sharetab_mtime
;
184 rw_exit(&sharetab_lock
);
186 vap
->va_size
= sft
->sharefs_size
;
187 vap
->va_mtime
= sft
->sharefs_snap_time
;
189 rw_exit(&sharefs_lock
);
192 vap
->va_atime
= vap
->va_ctime
= now
;
197 vap
->va_blksize
= DEV_BSIZE
;
198 vap
->va_nblocks
= howmany(vap
->va_size
, vap
->va_blksize
);
200 vap
->va_fsid
= vp
->v_vfsp
->vfs_dev
;
207 sharefs_access(vnode_t
*vp
, int mode
, int flags
, cred_t
*cr
,
208 caller_context_t
*ct
)
210 if (mode
& (VWRITE
|VEXEC
))
218 sharefs_open(vnode_t
**vpp
, int flag
, cred_t
*cr
, caller_context_t
*ct
)
229 * Create a new sharefs vnode for each operation. In order to
230 * avoid locks, we create a snapshot which can not change during
233 vp
= gfs_file_create(sizeof (shnode_t
), NULL
, &sharefs_ops_data
);
235 ((gfs_file_t
*)vp
->v_data
)->gfs_ino
= SHAREFS_INO_FILE
;
240 VFS_HOLD(ovp
->v_vfsp
);
242 VN_SET_VFS_TYPE_DEV(vp
, ovp
->v_vfsp
, VREG
, 0);
244 vp
->v_flag
|= VROOT
| VNOCACHE
| VNOMAP
| VNOSWAP
| VNOMOUNT
;
252 * No need for the lock, no other thread can be accessing
253 * this data structure.
255 atomic_inc_32(&sft
->sharefs_refs
);
256 sft
->sharefs_real_vp
= 0;
259 * Since the sharetab could easily change on us whilst we
260 * are dumping an extremely huge sharetab, we make a copy
261 * of it here and use it to dump instead.
263 error
= sharefs_snap_create(sft
);
270 sharefs_close(vnode_t
*vp
, int flag
, int count
,
271 offset_t off
, cred_t
*cr
, caller_context_t
*ct
)
273 shnode_t
*sft
= VTOSH(vp
);
278 rw_enter(&sharefs_lock
, RW_WRITER
);
279 if (vp
->v_count
== 1) {
280 if (sft
->sharefs_snap
!= NULL
) {
281 kmem_free(sft
->sharefs_snap
, sft
->sharefs_size
+ 1);
282 sft
->sharefs_size
= 0;
283 sft
->sharefs_snap
= NULL
;
284 sft
->sharefs_generation
= 0;
287 atomic_dec_32(&sft
->sharefs_refs
);
288 rw_exit(&sharefs_lock
);
295 sharefs_read(vnode_t
*vp
, uio_t
*uio
, int ioflag
, cred_t
*cr
,
296 caller_context_t
*ct
)
298 shnode_t
*sft
= VTOSH(vp
);
299 off_t off
= uio
->uio_offset
;
300 size_t len
= uio
->uio_resid
;
303 rw_enter(&sharefs_lock
, RW_READER
);
306 * First check to see if we need to grab a new snapshot.
308 if (off
== (off_t
)0) {
309 rw_exit(&sharefs_lock
);
310 error
= sharefs_snap_create(sft
);
314 rw_enter(&sharefs_lock
, RW_READER
);
318 if (len
<= 0 || off
>= sft
->sharefs_size
) {
319 rw_exit(&sharefs_lock
);
323 if ((size_t)(off
+ len
) > sft
->sharefs_size
)
324 len
= sft
->sharefs_size
- off
;
326 if (off
< 0 || len
> sft
->sharefs_size
) {
327 rw_exit(&sharefs_lock
);
332 error
= uiomove(sft
->sharefs_snap
+ off
,
336 rw_exit(&sharefs_lock
);
342 sharefs_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*tx
)
344 gfs_file_t
*fp
= vp
->v_data
;
347 sft
= (shnode_t
*)gfs_file_inactive(vp
);
349 rw_enter(&sharefs_lock
, RW_WRITER
);
350 if (sft
->sharefs_snap
!= NULL
) {
351 kmem_free(sft
->sharefs_snap
, sft
->sharefs_size
+ 1);
354 kmem_free(sft
, fp
->gfs_size
);
355 rw_exit(&sharefs_lock
);
360 sharefs_create_root_file(vfs_t
*vfsp
)
365 vp
= gfs_root_create_file(sizeof (shnode_t
),
366 vfsp
, &sharefs_ops_data
, SHAREFS_INO_FILE
);
370 sft
->sharefs_real_vp
= 1;
375 static const struct vnodeops sharefs_ops_data
= {
376 .vnop_name
= "sharefs",
377 .vop_open
= sharefs_open
,
378 .vop_close
= sharefs_close
,
379 .vop_ioctl
= fs_inval
,
380 .vop_getattr
= sharefs_getattr
,
381 .vop_access
= sharefs_access
,
382 .vop_inactive
= sharefs_inactive
,
383 .vop_read
= sharefs_read
,