4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Integros [integros.com]
27 /* Portions Copyright 2007 Jeremy Teo */
28 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
30 #include <sys/types.h>
31 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/resource.h>
36 #include <sys/resourcevar.h>
37 #include <sys/mntent.h>
38 #include <sys/u8_textprep.h>
39 #include <sys/dsl_dataset.h>
41 #include <sys/vnode.h>
44 #include <sys/errno.h>
45 #include <sys/unistd.h>
46 #include <sys/atomic.h>
47 #include <sys/zfs_dir.h>
48 #include <sys/zfs_acl.h>
49 #include <sys/zfs_ioctl.h>
50 #include <sys/zfs_rlock.h>
51 #include <sys/zfs_fuid.h>
52 #include <sys/dnode.h>
53 #include <sys/fs/zfs.h>
55 #include <sys/dmu_objset.h>
56 #include <sys/dmu_tx.h>
57 #include <sys/zfs_refcount.h>
60 #include <sys/zfs_znode.h>
62 #include <sys/zfs_sa.h>
63 #include <sys/zfs_stat.h>
66 #include "zfs_comutil.h"
68 /* Used by fstat(1). */
69 SYSCTL_INT(_debug_sizeof
, OID_AUTO
, znode
, CTLFLAG_RD
,
70 SYSCTL_NULL_INT_PTR
, sizeof (znode_t
), "sizeof(znode_t)");
73 * Define ZNODE_STATS to turn on statistic gathering. By default, it is only
74 * turned on when DEBUG is also defined.
81 #define ZNODE_STAT_ADD(stat) ((stat)++)
83 #define ZNODE_STAT_ADD(stat) /* nothing */
84 #endif /* ZNODE_STATS */
86 #if !defined(KMEM_DEBUG)
88 static uma_zone_t znode_uma_zone
;
90 static kmem_cache_t
*znode_cache
= NULL
;
93 extern struct vop_vector zfs_vnodeops
;
94 extern struct vop_vector zfs_fifoops
;
95 extern struct vop_vector zfs_shareops
;
99 * This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
100 * z_rangelock. It will modify the offset and length of the lock to reflect
101 * znode-specific information, and convert RL_APPEND to RL_WRITER. This is
102 * called with the rangelock_t's rl_lock held, which avoids races.
105 zfs_rangelock_cb(zfs_locked_range_t
*new, void *arg
)
110 * If in append mode, convert to writer and lock starting at the
111 * current end of file.
113 if (new->lr_type
== RL_APPEND
) {
114 new->lr_offset
= zp
->z_size
;
115 new->lr_type
= RL_WRITER
;
119 * If we need to grow the block size then lock the whole file range.
121 uint64_t end_size
= MAX(zp
->z_size
, new->lr_offset
+ new->lr_length
);
122 if (end_size
> zp
->z_blksz
&& (!ISP2(zp
->z_blksz
) ||
123 zp
->z_blksz
< ZTOZSB(zp
)->z_max_blksz
)) {
125 new->lr_length
= UINT64_MAX
;
130 zfs_znode_cache_constructor(void *buf
, void *arg
, int kmflags
)
134 POINTER_INVALIDATE(&zp
->z_zfsvfs
);
136 list_link_init(&zp
->z_link_node
);
138 mutex_init(&zp
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
139 mutex_init(&zp
->z_acl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
140 rw_init(&zp
->z_xattr_lock
, NULL
, RW_DEFAULT
, NULL
);
142 zfs_rangelock_init(&zp
->z_rangelock
, zfs_rangelock_cb
, zp
);
144 zp
->z_acl_cached
= NULL
;
145 zp
->z_xattr_cached
= NULL
;
146 zp
->z_xattr_parent
= 0;
148 zp
->z_sync_writes_cnt
= 0;
149 zp
->z_async_writes_cnt
= 0;
155 zfs_znode_cache_destructor(void *buf
, void *arg
)
160 ASSERT(!POINTER_IS_VALID(zp
->z_zfsvfs
));
161 ASSERT3P(zp
->z_vnode
, ==, NULL
);
162 ASSERT(!list_link_active(&zp
->z_link_node
));
163 mutex_destroy(&zp
->z_lock
);
164 mutex_destroy(&zp
->z_acl_lock
);
165 rw_destroy(&zp
->z_xattr_lock
);
166 zfs_rangelock_fini(&zp
->z_rangelock
);
168 ASSERT3P(zp
->z_acl_cached
, ==, NULL
);
169 ASSERT3P(zp
->z_xattr_cached
, ==, NULL
);
171 ASSERT0(atomic_load_32(&zp
->z_sync_writes_cnt
));
172 ASSERT0(atomic_load_32(&zp
->z_async_writes_cnt
));
180 zfs_znode_cache_constructor_smr(void *mem
, int size __unused
, void *private,
183 return (zfs_znode_cache_constructor(mem
, private, flags
));
187 zfs_znode_cache_destructor_smr(void *mem
, int size __unused
, void *private)
189 zfs_znode_cache_destructor(mem
, private);
198 ASSERT3P(znode_uma_zone
, ==, NULL
);
199 znode_uma_zone
= uma_zcreate("zfs_znode_cache",
200 sizeof (znode_t
), zfs_znode_cache_constructor_smr
,
201 zfs_znode_cache_destructor_smr
, NULL
, NULL
, 0, 0);
202 VFS_SMR_ZONE_SET(znode_uma_zone
);
206 zfs_znode_alloc_kmem(int flags
)
208 return (uma_zalloc_smr(znode_uma_zone
, flags
));
212 zfs_znode_free_kmem(znode_t
*zp
)
214 if (zp
->z_xattr_cached
) {
215 nvlist_free(zp
->z_xattr_cached
);
216 zp
->z_xattr_cached
= NULL
;
218 uma_zfree_smr(znode_uma_zone
, zp
);
227 ASSERT3P(znode_cache
, ==, NULL
);
228 znode_cache
= kmem_cache_create("zfs_znode_cache",
229 sizeof (znode_t
), 0, zfs_znode_cache_constructor
,
230 zfs_znode_cache_destructor
, NULL
, NULL
, NULL
, KMC_RECLAIMABLE
);
234 zfs_znode_alloc_kmem(int flags
)
236 return (kmem_cache_alloc(znode_cache
, flags
));
240 zfs_znode_free_kmem(znode_t
*zp
)
242 if (zp
->z_xattr_cached
) {
243 nvlist_free(zp
->z_xattr_cached
);
244 zp
->z_xattr_cached
= NULL
;
246 kmem_cache_free(znode_cache
, zp
);
257 if (znode_uma_zone
) {
258 uma_zdestroy(znode_uma_zone
);
259 znode_uma_zone
= NULL
;
263 kmem_cache_destroy(znode_cache
);
271 zfs_create_share_dir(zfsvfs_t
*zfsvfs
, dmu_tx_t
*tx
)
273 zfs_acl_ids_t acl_ids
;
279 vattr
.va_mask
= AT_MODE
|AT_UID
|AT_GID
;
280 vattr
.va_type
= VDIR
;
281 vattr
.va_mode
= S_IFDIR
|0555;
282 vattr
.va_uid
= crgetuid(kcred
);
283 vattr
.va_gid
= crgetgid(kcred
);
285 sharezp
= zfs_znode_alloc_kmem(KM_SLEEP
);
286 ASSERT(!POINTER_IS_VALID(sharezp
->z_zfsvfs
));
287 sharezp
->z_unlinked
= 0;
288 sharezp
->z_atime_dirty
= 0;
289 sharezp
->z_zfsvfs
= zfsvfs
;
290 sharezp
->z_is_sa
= zfsvfs
->z_use_sa
;
292 VERIFY0(zfs_acl_ids_create(sharezp
, IS_ROOT_NODE
, &vattr
,
293 kcred
, NULL
, &acl_ids
, NULL
));
294 zfs_mknode(sharezp
, &vattr
, tx
, kcred
, IS_ROOT_NODE
, &zp
, &acl_ids
);
295 ASSERT3P(zp
, ==, sharezp
);
296 POINTER_INVALIDATE(&sharezp
->z_zfsvfs
);
297 error
= zap_add(zfsvfs
->z_os
, MASTER_NODE_OBJ
,
298 ZFS_SHARES_DIR
, 8, 1, &sharezp
->z_id
, tx
);
299 zfsvfs
->z_shares_dir
= sharezp
->z_id
;
301 zfs_acl_ids_free(&acl_ids
);
302 sa_handle_destroy(sharezp
->z_sa_hdl
);
303 zfs_znode_free_kmem(sharezp
);
309 * define a couple of values we need available
310 * for both 64 and 32 bit environments.
313 #define NBITSMINOR64 32
316 #define MAXMAJ64 0xffffffffUL
319 #define MAXMIN64 0xffffffffUL
323 * Create special expldev for ZFS private use.
324 * Can't use standard expldev since it doesn't do
325 * what we want. The standard expldev() takes a
326 * dev32_t in LP64 and expands it to a long dev_t.
327 * We need an interface that takes a dev32_t in ILP32
328 * and expands it to a long dev_t.
331 zfs_expldev(dev_t dev
)
333 return (((uint64_t)major(dev
) << NBITSMINOR64
) | minor(dev
));
336 * Special cmpldev for ZFS private use.
337 * Can't use standard cmpldev since it takes
338 * a long dev_t and compresses it to dev32_t in
339 * LP64. We need to do a compaction of a long dev_t
340 * to a dev32_t in ILP32.
343 zfs_cmpldev(uint64_t dev
)
345 return (makedev((dev
>> NBITSMINOR64
), (dev
& MAXMIN64
)));
349 zfs_znode_sa_init(zfsvfs_t
*zfsvfs
, znode_t
*zp
,
350 dmu_buf_t
*db
, dmu_object_type_t obj_type
, sa_handle_t
*sa_hdl
)
352 ASSERT(!POINTER_IS_VALID(zp
->z_zfsvfs
) || (zfsvfs
== zp
->z_zfsvfs
));
353 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs
, zp
->z_id
)));
355 ASSERT3P(zp
->z_sa_hdl
, ==, NULL
);
356 ASSERT3P(zp
->z_acl_cached
, ==, NULL
);
357 if (sa_hdl
== NULL
) {
358 VERIFY0(sa_handle_get_from_db(zfsvfs
->z_os
, db
, zp
,
359 SA_HDL_SHARED
, &zp
->z_sa_hdl
));
361 zp
->z_sa_hdl
= sa_hdl
;
362 sa_set_userp(sa_hdl
, zp
);
365 zp
->z_is_sa
= (obj_type
== DMU_OT_SA
) ? B_TRUE
: B_FALSE
;
368 * Slap on VROOT if we are the root znode unless we are the root
369 * node of a snapshot mounted under .zfs.
371 if (zp
->z_id
== zfsvfs
->z_root
&& zfsvfs
->z_parent
== zfsvfs
)
372 ZTOV(zp
)->v_flag
|= VROOT
;
378 zfs_znode_dmu_fini(znode_t
*zp
)
380 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp
->z_zfsvfs
, zp
->z_id
)) ||
381 ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zp
->z_zfsvfs
));
383 sa_handle_destroy(zp
->z_sa_hdl
);
388 zfs_vnode_forget(vnode_t
*vp
)
391 /* copied from insmntque_stddtr */
393 vp
->v_op
= &dead_vnodeops
;
399 * Construct a new znode/vnode and initialize.
401 * This does not do a call to dmu_set_user() that is
402 * up to the caller to do, in case you don't want to
406 zfs_znode_alloc(zfsvfs_t
*zfsvfs
, dmu_buf_t
*db
, int blksz
,
407 dmu_object_type_t obj_type
, sa_handle_t
*hdl
)
414 uint64_t mtime
[2], ctime
[2];
416 uint64_t projid
= ZFS_DEFAULT_PROJID
;
417 sa_bulk_attr_t bulk
[9];
421 zp
= zfs_znode_alloc_kmem(KM_SLEEP
);
424 KASSERT((zfsvfs
->z_parent
->z_vfs
->mnt_kern_flag
& MNTK_FPLOOKUP
) == 0,
425 ("%s: fast path lookup enabled without smr", __func__
));
428 KASSERT(curthread
->td_vp_reserved
!= NULL
,
429 ("zfs_znode_alloc: getnewvnode without any vnodes reserved"));
430 error
= getnewvnode("zfs", zfsvfs
->z_parent
->z_vfs
, &zfs_vnodeops
, &vp
);
432 zfs_znode_free_kmem(zp
);
439 * Acquire the vnode lock before any possible interaction with the
440 * outside world. Specifically, there is an error path that calls
441 * zfs_vnode_forget() and the vnode should be exclusively locked.
443 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
445 ASSERT(!POINTER_IS_VALID(zp
->z_zfsvfs
));
449 zp
->z_atime_dirty
= 0;
451 zp
->z_id
= db
->db_object
;
453 zp
->z_seq
= 0x7A4653;
455 zp
->z_sync_writes_cnt
= 0;
456 zp
->z_async_writes_cnt
= 0;
457 atomic_store_ptr(&zp
->z_cached_symlink
, NULL
);
459 zfs_znode_sa_init(zfsvfs
, zp
, db
, obj_type
, hdl
);
461 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
, &mode
, 8);
462 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GEN(zfsvfs
), NULL
, &zp
->z_gen
, 8);
463 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
465 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_LINKS(zfsvfs
), NULL
,
467 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
469 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_PARENT(zfsvfs
), NULL
, &parent
, 8);
470 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
473 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
475 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
478 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
480 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
,
483 if (sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
) != 0 || zp
->z_gen
== 0 ||
484 (dmu_objset_projectquota_enabled(zfsvfs
->z_os
) &&
485 (zp
->z_pflags
& ZFS_PROJID
) &&
486 sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PROJID(zfsvfs
), &projid
, 8) != 0)) {
488 sa_handle_destroy(zp
->z_sa_hdl
);
489 zfs_vnode_forget(vp
);
491 zfs_znode_free_kmem(zp
);
495 zp
->z_projid
= projid
;
498 /* Cache the xattr parent id */
499 if (zp
->z_pflags
& ZFS_XATTR
)
500 zp
->z_xattr_parent
= parent
;
502 vp
->v_type
= IFTOVT((mode_t
)mode
);
504 switch (vp
->v_type
) {
506 zp
->z_zn_prefetch
= B_TRUE
; /* z_prefetch default is enabled */
509 vp
->v_op
= &zfs_fifoops
;
512 if (parent
== zfsvfs
->z_shares_dir
) {
515 vp
->v_op
= &zfs_shareops
;
522 mutex_enter(&zfsvfs
->z_znodes_lock
);
523 list_insert_tail(&zfsvfs
->z_all_znodes
, zp
);
524 zp
->z_zfsvfs
= zfsvfs
;
525 mutex_exit(&zfsvfs
->z_znodes_lock
);
527 #if __FreeBSD_version >= 1400077
528 vn_set_state(vp
, VSTATE_CONSTRUCTED
);
531 if (vp
->v_type
!= VFIFO
)
537 static uint64_t empty_xattr
;
538 static uint64_t pad
[4];
539 static zfs_acl_phys_t acl_phys
;
541 * Create a new DMU object to hold a zfs znode.
543 * IN: dzp - parent directory for new znode
544 * vap - file attributes for new znode
545 * tx - dmu transaction id for zap operations
546 * cr - credentials of caller
548 * IS_ROOT_NODE - new object will be root
549 * IS_XATTR - new object is an attribute
550 * bonuslen - length of bonus buffer
551 * setaclp - File/Dir initial ACL
552 * fuidp - Tracks fuid allocation.
554 * OUT: zpp - allocated znode
558 zfs_mknode(znode_t
*dzp
, vattr_t
*vap
, dmu_tx_t
*tx
, cred_t
*cr
,
559 uint_t flag
, znode_t
**zpp
, zfs_acl_ids_t
*acl_ids
)
561 uint64_t crtime
[2], atime
[2], mtime
[2], ctime
[2];
562 uint64_t mode
, size
, links
, parent
, pflags
;
563 uint64_t dzp_pflags
= 0;
565 zfsvfs_t
*zfsvfs
= dzp
->z_zfsvfs
;
572 dmu_object_type_t obj_type
;
573 sa_bulk_attr_t
*sa_attrs
;
575 zfs_acl_locator_cb_t locate
= { 0 };
577 ASSERT3P(vap
, !=, NULL
);
578 ASSERT3U((vap
->va_mask
& AT_MODE
), ==, AT_MODE
);
580 if (zfsvfs
->z_replay
) {
581 obj
= vap
->va_nodeid
;
582 now
= vap
->va_ctime
; /* see zfs_replay_create() */
583 gen
= vap
->va_nblocks
; /* ditto */
584 dnodesize
= vap
->va_fsid
; /* ditto */
588 gen
= dmu_tx_get_txg(tx
);
589 dnodesize
= dmu_objset_dnodesize(zfsvfs
->z_os
);
593 dnodesize
= DNODE_MIN_SIZE
;
595 obj_type
= zfsvfs
->z_use_sa
? DMU_OT_SA
: DMU_OT_ZNODE
;
596 bonuslen
= (obj_type
== DMU_OT_SA
) ?
597 DN_BONUS_SIZE(dnodesize
) : ZFS_OLD_ZNODE_PHYS_SIZE
;
600 * Create a new DMU object.
603 * There's currently no mechanism for pre-reading the blocks that will
604 * be needed to allocate a new object, so we accept the small chance
605 * that there will be an i/o error and we will fail one of the
608 if (vap
->va_type
== VDIR
) {
609 if (zfsvfs
->z_replay
) {
610 VERIFY0(zap_create_claim_norm_dnsize(zfsvfs
->z_os
, obj
,
611 zfsvfs
->z_norm
, DMU_OT_DIRECTORY_CONTENTS
,
612 obj_type
, bonuslen
, dnodesize
, tx
));
614 obj
= zap_create_norm_dnsize(zfsvfs
->z_os
,
615 zfsvfs
->z_norm
, DMU_OT_DIRECTORY_CONTENTS
,
616 obj_type
, bonuslen
, dnodesize
, tx
);
619 if (zfsvfs
->z_replay
) {
620 VERIFY0(dmu_object_claim_dnsize(zfsvfs
->z_os
, obj
,
621 DMU_OT_PLAIN_FILE_CONTENTS
, 0,
622 obj_type
, bonuslen
, dnodesize
, tx
));
624 obj
= dmu_object_alloc_dnsize(zfsvfs
->z_os
,
625 DMU_OT_PLAIN_FILE_CONTENTS
, 0,
626 obj_type
, bonuslen
, dnodesize
, tx
);
630 ZFS_OBJ_HOLD_ENTER(zfsvfs
, obj
);
631 VERIFY0(sa_buf_hold(zfsvfs
->z_os
, obj
, NULL
, &db
));
634 * If this is the root, fix up the half-initialized parent pointer
635 * to reference the just-allocated physical data area.
637 if (flag
& IS_ROOT_NODE
) {
640 dzp_pflags
= dzp
->z_pflags
;
644 * If parent is an xattr, so am I.
646 if (dzp_pflags
& ZFS_XATTR
) {
650 if (zfsvfs
->z_use_fuids
)
651 pflags
= ZFS_ARCHIVE
| ZFS_AV_MODIFIED
;
655 if (vap
->va_type
== VDIR
) {
656 size
= 2; /* contents ("." and "..") */
657 links
= (flag
& (IS_ROOT_NODE
| IS_XATTR
)) ? 2 : 1;
662 if (vap
->va_type
== VBLK
|| vap
->va_type
== VCHR
) {
663 rdev
= zfs_expldev(vap
->va_rdev
);
667 mode
= acl_ids
->z_mode
;
672 * No execs denied will be determined when zfs_mode_compute() is called.
674 pflags
|= acl_ids
->z_aclp
->z_hints
&
675 (ZFS_ACL_TRIVIAL
|ZFS_INHERIT_ACE
|ZFS_ACL_AUTO_INHERIT
|
676 ZFS_ACL_DEFAULTED
|ZFS_ACL_PROTECTED
);
678 ZFS_TIME_ENCODE(&now
, crtime
);
679 ZFS_TIME_ENCODE(&now
, ctime
);
681 if (vap
->va_mask
& AT_ATIME
) {
682 ZFS_TIME_ENCODE(&vap
->va_atime
, atime
);
684 ZFS_TIME_ENCODE(&now
, atime
);
687 if (vap
->va_mask
& AT_MTIME
) {
688 ZFS_TIME_ENCODE(&vap
->va_mtime
, mtime
);
690 ZFS_TIME_ENCODE(&now
, mtime
);
693 /* Now add in all of the "SA" attributes */
694 VERIFY0(sa_handle_get_from_db(zfsvfs
->z_os
, db
, NULL
, SA_HDL_SHARED
,
698 * Setup the array of attributes to be replaced/set on the new file
700 * order for DMU_OT_ZNODE is critical since it needs to be constructed
701 * in the old znode_phys_t format. Don't change this ordering
703 sa_attrs
= kmem_alloc(sizeof (sa_bulk_attr_t
) * ZPL_END
, KM_SLEEP
);
705 if (obj_type
== DMU_OT_ZNODE
) {
706 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_ATIME(zfsvfs
),
708 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_MTIME(zfsvfs
),
710 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_CTIME(zfsvfs
),
712 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_CRTIME(zfsvfs
),
714 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_GEN(zfsvfs
),
716 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_MODE(zfsvfs
),
718 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_SIZE(zfsvfs
),
720 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_PARENT(zfsvfs
),
723 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_MODE(zfsvfs
),
725 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_SIZE(zfsvfs
),
727 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_GEN(zfsvfs
),
729 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_UID(zfsvfs
),
730 NULL
, &acl_ids
->z_fuid
, 8);
731 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_GID(zfsvfs
),
732 NULL
, &acl_ids
->z_fgid
, 8);
733 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_PARENT(zfsvfs
),
735 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_FLAGS(zfsvfs
),
737 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_ATIME(zfsvfs
),
739 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_MTIME(zfsvfs
),
741 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_CTIME(zfsvfs
),
743 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_CRTIME(zfsvfs
),
747 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_LINKS(zfsvfs
), NULL
, &links
, 8);
749 if (obj_type
== DMU_OT_ZNODE
) {
750 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_XATTR(zfsvfs
), NULL
,
753 if (obj_type
== DMU_OT_ZNODE
||
754 (vap
->va_type
== VBLK
|| vap
->va_type
== VCHR
)) {
755 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_RDEV(zfsvfs
),
759 if (obj_type
== DMU_OT_ZNODE
) {
760 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_FLAGS(zfsvfs
),
762 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_UID(zfsvfs
), NULL
,
763 &acl_ids
->z_fuid
, 8);
764 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_GID(zfsvfs
), NULL
,
765 &acl_ids
->z_fgid
, 8);
766 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_PAD(zfsvfs
), NULL
, pad
,
767 sizeof (uint64_t) * 4);
768 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_ZNODE_ACL(zfsvfs
), NULL
,
769 &acl_phys
, sizeof (zfs_acl_phys_t
));
770 } else if (acl_ids
->z_aclp
->z_version
>= ZFS_ACL_VERSION_FUID
) {
771 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_DACL_COUNT(zfsvfs
), NULL
,
772 &acl_ids
->z_aclp
->z_acl_count
, 8);
773 locate
.cb_aclp
= acl_ids
->z_aclp
;
774 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_DACL_ACES(zfsvfs
),
775 zfs_acl_data_locator
, &locate
,
776 acl_ids
->z_aclp
->z_acl_bytes
);
777 mode
= zfs_mode_compute(mode
, acl_ids
->z_aclp
, &pflags
,
778 acl_ids
->z_fuid
, acl_ids
->z_fgid
);
781 VERIFY0(sa_replace_all_by_template(sa_hdl
, sa_attrs
, cnt
, tx
));
783 if (!(flag
& IS_ROOT_NODE
)) {
784 *zpp
= zfs_znode_alloc(zfsvfs
, db
, 0, obj_type
, sa_hdl
);
785 ASSERT3P(*zpp
, !=, NULL
);
788 * If we are creating the root node, the "parent" we
789 * passed in is the znode for the root.
793 (*zpp
)->z_sa_hdl
= sa_hdl
;
796 (*zpp
)->z_pflags
= pflags
;
797 (*zpp
)->z_mode
= mode
;
798 (*zpp
)->z_dnodesize
= dnodesize
;
800 if (vap
->va_mask
& AT_XVATTR
)
801 zfs_xvattr_set(*zpp
, (xvattr_t
*)vap
, tx
);
803 if (obj_type
== DMU_OT_ZNODE
||
804 acl_ids
->z_aclp
->z_version
< ZFS_ACL_VERSION_FUID
) {
805 VERIFY0(zfs_aclset_common(*zpp
, acl_ids
->z_aclp
, cr
, tx
));
807 if (!(flag
& IS_ROOT_NODE
)) {
808 vnode_t
*vp
= ZTOV(*zpp
);
809 vp
->v_vflag
|= VV_FORCEINSMQ
;
810 int err
= insmntque(vp
, zfsvfs
->z_vfs
);
811 vp
->v_vflag
&= ~VV_FORCEINSMQ
;
813 KASSERT(err
== 0, ("insmntque() failed: error %d", err
));
815 kmem_free(sa_attrs
, sizeof (sa_bulk_attr_t
) * ZPL_END
);
816 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj
);
820 * Update in-core attributes. It is assumed the caller will be doing an
821 * sa_bulk_update to push the changes out.
824 zfs_xvattr_set(znode_t
*zp
, xvattr_t
*xvap
, dmu_tx_t
*tx
)
828 xoap
= xva_getxoptattr(xvap
);
829 ASSERT3P(xoap
, !=, NULL
);
831 if (zp
->z_zfsvfs
->z_replay
== B_FALSE
) {
832 ASSERT_VOP_IN_SEQC(ZTOV(zp
));
835 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)) {
837 ZFS_TIME_ENCODE(&xoap
->xoa_createtime
, times
);
838 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_CRTIME(zp
->z_zfsvfs
),
839 ×
, sizeof (times
), tx
);
840 XVA_SET_RTN(xvap
, XAT_CREATETIME
);
842 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
)) {
843 ZFS_ATTR_SET(zp
, ZFS_READONLY
, xoap
->xoa_readonly
,
845 XVA_SET_RTN(xvap
, XAT_READONLY
);
847 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
)) {
848 ZFS_ATTR_SET(zp
, ZFS_HIDDEN
, xoap
->xoa_hidden
,
850 XVA_SET_RTN(xvap
, XAT_HIDDEN
);
852 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)) {
853 ZFS_ATTR_SET(zp
, ZFS_SYSTEM
, xoap
->xoa_system
,
855 XVA_SET_RTN(xvap
, XAT_SYSTEM
);
857 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
)) {
858 ZFS_ATTR_SET(zp
, ZFS_ARCHIVE
, xoap
->xoa_archive
,
860 XVA_SET_RTN(xvap
, XAT_ARCHIVE
);
862 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
863 ZFS_ATTR_SET(zp
, ZFS_IMMUTABLE
, xoap
->xoa_immutable
,
865 XVA_SET_RTN(xvap
, XAT_IMMUTABLE
);
867 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
868 ZFS_ATTR_SET(zp
, ZFS_NOUNLINK
, xoap
->xoa_nounlink
,
870 XVA_SET_RTN(xvap
, XAT_NOUNLINK
);
872 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
873 ZFS_ATTR_SET(zp
, ZFS_APPENDONLY
, xoap
->xoa_appendonly
,
875 XVA_SET_RTN(xvap
, XAT_APPENDONLY
);
877 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
878 ZFS_ATTR_SET(zp
, ZFS_NODUMP
, xoap
->xoa_nodump
,
880 XVA_SET_RTN(xvap
, XAT_NODUMP
);
882 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
)) {
883 ZFS_ATTR_SET(zp
, ZFS_OPAQUE
, xoap
->xoa_opaque
,
885 XVA_SET_RTN(xvap
, XAT_OPAQUE
);
887 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
888 ZFS_ATTR_SET(zp
, ZFS_AV_QUARANTINED
,
889 xoap
->xoa_av_quarantined
, zp
->z_pflags
, tx
);
890 XVA_SET_RTN(xvap
, XAT_AV_QUARANTINED
);
892 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
893 ZFS_ATTR_SET(zp
, ZFS_AV_MODIFIED
, xoap
->xoa_av_modified
,
895 XVA_SET_RTN(xvap
, XAT_AV_MODIFIED
);
897 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
)) {
898 zfs_sa_set_scanstamp(zp
, xvap
, tx
);
899 XVA_SET_RTN(xvap
, XAT_AV_SCANSTAMP
);
901 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
902 ZFS_ATTR_SET(zp
, ZFS_REPARSE
, xoap
->xoa_reparse
,
904 XVA_SET_RTN(xvap
, XAT_REPARSE
);
906 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
)) {
907 ZFS_ATTR_SET(zp
, ZFS_OFFLINE
, xoap
->xoa_offline
,
909 XVA_SET_RTN(xvap
, XAT_OFFLINE
);
911 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
)) {
912 ZFS_ATTR_SET(zp
, ZFS_SPARSE
, xoap
->xoa_sparse
,
914 XVA_SET_RTN(xvap
, XAT_SPARSE
);
919 zfs_zget(zfsvfs_t
*zfsvfs
, uint64_t obj_num
, znode_t
**zpp
)
921 dmu_object_info_t doi
;
929 getnewvnode_reserve();
932 ZFS_OBJ_HOLD_ENTER(zfsvfs
, obj_num
);
934 err
= sa_buf_hold(zfsvfs
->z_os
, obj_num
, NULL
, &db
);
936 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
937 getnewvnode_drop_reserve();
941 dmu_object_info_from_db(db
, &doi
);
942 if (doi
.doi_bonus_type
!= DMU_OT_SA
&&
943 (doi
.doi_bonus_type
!= DMU_OT_ZNODE
||
944 (doi
.doi_bonus_type
== DMU_OT_ZNODE
&&
945 doi
.doi_bonus_size
< sizeof (znode_phys_t
)))) {
946 sa_buf_rele(db
, NULL
);
947 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
948 getnewvnode_drop_reserve();
949 return (SET_ERROR(EINVAL
));
952 hdl
= dmu_buf_get_user(db
);
954 zp
= sa_get_userdata(hdl
);
957 * Since "SA" does immediate eviction we
958 * should never find a sa handle that doesn't
959 * know about the znode.
961 ASSERT3P(zp
, !=, NULL
);
962 ASSERT3U(zp
->z_id
, ==, obj_num
);
963 if (zp
->z_unlinked
) {
964 err
= SET_ERROR(ENOENT
);
968 * Don't let the vnode disappear after
976 sa_buf_rele(db
, NULL
);
977 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
980 getnewvnode_drop_reserve();
984 locked
= VOP_ISLOCKED(vp
);
986 if (VN_IS_DOOMED(vp
) && locked
!= LK_EXCLUSIVE
) {
988 * The vnode is doomed and this thread doesn't
989 * hold the exclusive lock on it, so the vnode
990 * must be being reclaimed by another thread.
991 * Otherwise the doomed vnode is being reclaimed
992 * by this thread and zfs_zget is called from
998 * XXX vrele() locks the vnode when the last reference
999 * is dropped. Although in this case the vnode is
1000 * doomed / dead and so no inactivation is required,
1001 * the vnode lock is still acquired. That could result
1002 * in a LOR with z_teardown_lock if another thread holds
1003 * the vnode's lock and tries to take z_teardown_lock.
1004 * But that is only possible if the other thread peforms
1005 * a ZFS vnode operation on the vnode. That either
1006 * should not happen if the vnode is dead or the thread
1007 * should also have a reference to the vnode and thus
1008 * our reference is not last.
1014 getnewvnode_drop_reserve();
1019 * Not found create new znode/vnode
1020 * but only if file exists.
1022 * There is a small window where zfs_vget() could
1023 * find this object while a file create is still in
1024 * progress. This is checked for in zfs_znode_alloc()
1026 * if zfs_znode_alloc() fails it will drop the hold on the
1029 zp
= zfs_znode_alloc(zfsvfs
, db
, doi
.doi_data_block_size
,
1030 doi
.doi_bonus_type
, NULL
);
1032 err
= SET_ERROR(ENOENT
);
1037 vnode_t
*vp
= ZTOV(zp
);
1039 err
= insmntque(vp
, zfsvfs
->z_vfs
);
1041 vp
->v_hash
= obj_num
;
1045 zfs_znode_dmu_fini(zp
);
1050 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
1051 getnewvnode_drop_reserve();
1056 zfs_rezget(znode_t
*zp
)
1058 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1059 dmu_object_info_t doi
;
1062 uint64_t obj_num
= zp
->z_id
;
1063 uint64_t mode
, size
;
1064 sa_bulk_attr_t bulk
[8];
1070 * Remove cached pages before reloading the znode, so that they are not
1071 * lingering after we run into any error. Ideally, we should vgone()
1072 * the vnode in case of error, but currently we cannot do that
1073 * because of the LOR between the vnode lock and z_teardown_lock.
1074 * So, instead, we have to "doom" the znode in the illumos style.
1076 * Ignore invalid pages during the scan. This is to avoid deadlocks
1077 * between page busying and the teardown lock, as pages are busied prior
1078 * to a VOP_GETPAGES operation, which acquires the teardown read lock.
1079 * Such pages will be invalid and can safely be skipped here.
1082 #if __FreeBSD_version >= 1400042
1083 vn_pages_remove_valid(vp
, 0, 0);
1085 vn_pages_remove(vp
, 0, 0);
1088 ZFS_OBJ_HOLD_ENTER(zfsvfs
, obj_num
);
1090 mutex_enter(&zp
->z_acl_lock
);
1091 if (zp
->z_acl_cached
) {
1092 zfs_acl_free(zp
->z_acl_cached
);
1093 zp
->z_acl_cached
= NULL
;
1095 mutex_exit(&zp
->z_acl_lock
);
1097 rw_enter(&zp
->z_xattr_lock
, RW_WRITER
);
1098 if (zp
->z_xattr_cached
) {
1099 nvlist_free(zp
->z_xattr_cached
);
1100 zp
->z_xattr_cached
= NULL
;
1102 rw_exit(&zp
->z_xattr_lock
);
1104 ASSERT3P(zp
->z_sa_hdl
, ==, NULL
);
1105 err
= sa_buf_hold(zfsvfs
->z_os
, obj_num
, NULL
, &db
);
1107 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
1111 dmu_object_info_from_db(db
, &doi
);
1112 if (doi
.doi_bonus_type
!= DMU_OT_SA
&&
1113 (doi
.doi_bonus_type
!= DMU_OT_ZNODE
||
1114 (doi
.doi_bonus_type
== DMU_OT_ZNODE
&&
1115 doi
.doi_bonus_size
< sizeof (znode_phys_t
)))) {
1116 sa_buf_rele(db
, NULL
);
1117 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
1118 return (SET_ERROR(EINVAL
));
1121 zfs_znode_sa_init(zfsvfs
, zp
, db
, doi
.doi_bonus_type
, NULL
);
1124 /* reload cached values */
1125 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GEN(zfsvfs
), NULL
,
1126 &gen
, sizeof (gen
));
1127 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
1128 &zp
->z_size
, sizeof (zp
->z_size
));
1129 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_LINKS(zfsvfs
), NULL
,
1130 &zp
->z_links
, sizeof (zp
->z_links
));
1131 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
1132 &zp
->z_pflags
, sizeof (zp
->z_pflags
));
1133 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
1134 &zp
->z_atime
, sizeof (zp
->z_atime
));
1135 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
1136 &zp
->z_uid
, sizeof (zp
->z_uid
));
1137 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
,
1138 &zp
->z_gid
, sizeof (zp
->z_gid
));
1139 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
1140 &mode
, sizeof (mode
));
1142 if (sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
)) {
1143 zfs_znode_dmu_fini(zp
);
1144 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
1145 return (SET_ERROR(EIO
));
1150 if (gen
!= zp
->z_gen
) {
1151 zfs_znode_dmu_fini(zp
);
1152 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
1153 return (SET_ERROR(EIO
));
1157 * It is highly improbable but still quite possible that two
1158 * objects in different datasets are created with the same
1159 * object numbers and in transaction groups with the same
1160 * numbers. znodes corresponding to those objects would
1161 * have the same z_id and z_gen, but their other attributes
1163 * zfs recv -F may replace one of such objects with the other.
1164 * As a result file properties recorded in the replaced
1165 * object's vnode may no longer match the received object's
1166 * properties. At present the only cached property is the
1167 * files type recorded in v_type.
1168 * So, handle this case by leaving the old vnode and znode
1169 * disassociated from the actual object. A new vnode and a
1170 * znode will be created if the object is accessed
1171 * (e.g. via a look-up). The old vnode and znode will be
1172 * recycled when the last vnode reference is dropped.
1174 if (vp
->v_type
!= IFTOVT((mode_t
)zp
->z_mode
)) {
1175 zfs_znode_dmu_fini(zp
);
1176 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
1177 return (SET_ERROR(EIO
));
1181 * If the file has zero links, then it has been unlinked on the send
1182 * side and it must be in the received unlinked set.
1183 * We call zfs_znode_dmu_fini() now to prevent any accesses to the
1184 * stale data and to prevent automatically removal of the file in
1185 * zfs_zinactive(). The file will be removed either when it is removed
1186 * on the send side and the next incremental stream is received or
1187 * when the unlinked set gets processed.
1189 zp
->z_unlinked
= (zp
->z_links
== 0);
1190 if (zp
->z_unlinked
) {
1191 zfs_znode_dmu_fini(zp
);
1192 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
1196 zp
->z_blksz
= doi
.doi_data_block_size
;
1197 if (zp
->z_size
!= size
)
1198 vnode_pager_setsize(vp
, zp
->z_size
);
1200 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj_num
);
1206 zfs_znode_delete(znode_t
*zp
, dmu_tx_t
*tx
)
1208 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1209 objset_t
*os
= zfsvfs
->z_os
;
1210 uint64_t obj
= zp
->z_id
;
1211 uint64_t acl_obj
= zfs_external_acl(zp
);
1213 ZFS_OBJ_HOLD_ENTER(zfsvfs
, obj
);
1215 VERIFY(!zp
->z_is_sa
);
1216 VERIFY0(dmu_object_free(os
, acl_obj
, tx
));
1218 VERIFY0(dmu_object_free(os
, obj
, tx
));
1219 zfs_znode_dmu_fini(zp
);
1220 ZFS_OBJ_HOLD_EXIT(zfsvfs
, obj
);
1224 zfs_zinactive(znode_t
*zp
)
1226 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1227 uint64_t z_id
= zp
->z_id
;
1229 ASSERT3P(zp
->z_sa_hdl
, !=, NULL
);
1232 * Don't allow a zfs_zget() while were trying to release this znode
1234 ZFS_OBJ_HOLD_ENTER(zfsvfs
, z_id
);
1237 * If this was the last reference to a file with no links, remove
1238 * the file from the file system unless the file system is mounted
1239 * read-only. That can happen, for example, if the file system was
1240 * originally read-write, the file was opened, then unlinked and
1241 * the file system was made read-only before the file was finally
1242 * closed. The file will remain in the unlinked set.
1244 if (zp
->z_unlinked
) {
1245 ASSERT(!zfsvfs
->z_issnap
);
1246 if ((zfsvfs
->z_vfs
->vfs_flag
& VFS_RDONLY
) == 0) {
1247 ZFS_OBJ_HOLD_EXIT(zfsvfs
, z_id
);
1253 zfs_znode_dmu_fini(zp
);
1254 ZFS_OBJ_HOLD_EXIT(zfsvfs
, z_id
);
1259 zfs_znode_free(znode_t
*zp
)
1261 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1264 ASSERT3P(zp
->z_sa_hdl
, ==, NULL
);
1266 mutex_enter(&zfsvfs
->z_znodes_lock
);
1267 POINTER_INVALIDATE(&zp
->z_zfsvfs
);
1268 list_remove(&zfsvfs
->z_all_znodes
, zp
);
1269 mutex_exit(&zfsvfs
->z_znodes_lock
);
1271 symlink
= atomic_load_ptr(&zp
->z_cached_symlink
);
1272 if (symlink
!= NULL
) {
1273 atomic_store_rel_ptr((uintptr_t *)&zp
->z_cached_symlink
,
1275 cache_symlink_free(symlink
, strlen(symlink
) + 1);
1278 if (zp
->z_acl_cached
) {
1279 zfs_acl_free(zp
->z_acl_cached
);
1280 zp
->z_acl_cached
= NULL
;
1283 zfs_znode_free_kmem(zp
);
1287 zfs_tstamp_update_setup_ext(znode_t
*zp
, uint_t flag
, uint64_t mtime
[2],
1288 uint64_t ctime
[2], boolean_t have_tx
)
1292 vfs_timestamp(&now
);
1294 if (have_tx
) { /* will sa_bulk_update happen really soon? */
1295 zp
->z_atime_dirty
= 0;
1298 zp
->z_atime_dirty
= 1;
1301 if (flag
& AT_ATIME
) {
1302 ZFS_TIME_ENCODE(&now
, zp
->z_atime
);
1305 if (flag
& AT_MTIME
) {
1306 ZFS_TIME_ENCODE(&now
, mtime
);
1307 if (zp
->z_zfsvfs
->z_use_fuids
) {
1308 zp
->z_pflags
|= (ZFS_ARCHIVE
|
1313 if (flag
& AT_CTIME
) {
1314 ZFS_TIME_ENCODE(&now
, ctime
);
1315 if (zp
->z_zfsvfs
->z_use_fuids
)
1316 zp
->z_pflags
|= ZFS_ARCHIVE
;
1322 zfs_tstamp_update_setup(znode_t
*zp
, uint_t flag
, uint64_t mtime
[2],
1325 zfs_tstamp_update_setup_ext(zp
, flag
, mtime
, ctime
, B_TRUE
);
1328 * Grow the block size for a file.
1330 * IN: zp - znode of file to free data in.
1331 * size - requested block size
1332 * tx - open transaction.
1334 * NOTE: this function assumes that the znode is write locked.
1337 zfs_grow_blocksize(znode_t
*zp
, uint64_t size
, dmu_tx_t
*tx
)
1342 if (size
<= zp
->z_blksz
)
1345 * If the file size is already greater than the current blocksize,
1346 * we will not grow. If there is more than one block in a file,
1347 * the blocksize cannot change.
1349 if (zp
->z_blksz
&& zp
->z_size
> zp
->z_blksz
)
1352 error
= dmu_object_set_blocksize(zp
->z_zfsvfs
->z_os
, zp
->z_id
,
1355 if (error
== ENOTSUP
)
1359 /* What blocksize did we actually get? */
1360 dmu_object_size_from_db(sa_get_db(zp
->z_sa_hdl
), &zp
->z_blksz
, &dummy
);
1364 * Increase the file length
1366 * IN: zp - znode of file to free data in.
1367 * end - new end-of-file
1369 * RETURN: 0 on success, error code on failure
1372 zfs_extend(znode_t
*zp
, uint64_t end
)
1374 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1376 zfs_locked_range_t
*lr
;
1381 * We will change zp_size, lock the whole file.
1383 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, 0, UINT64_MAX
, RL_WRITER
);
1386 * Nothing to do if file already at desired length.
1388 if (end
<= zp
->z_size
) {
1389 zfs_rangelock_exit(lr
);
1392 tx
= dmu_tx_create(zfsvfs
->z_os
);
1393 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1394 zfs_sa_upgrade_txholds(tx
, zp
);
1395 if (end
> zp
->z_blksz
&&
1396 (!ISP2(zp
->z_blksz
) || zp
->z_blksz
< zfsvfs
->z_max_blksz
)) {
1398 * We are growing the file past the current block size.
1400 if (zp
->z_blksz
> zp
->z_zfsvfs
->z_max_blksz
) {
1402 * File's blocksize is already larger than the
1403 * "recordsize" property. Only let it grow to
1404 * the next power of 2.
1406 ASSERT(!ISP2(zp
->z_blksz
));
1407 newblksz
= MIN(end
, 1 << highbit64(zp
->z_blksz
));
1409 newblksz
= MIN(end
, zp
->z_zfsvfs
->z_max_blksz
);
1411 dmu_tx_hold_write(tx
, zp
->z_id
, 0, newblksz
);
1416 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1419 zfs_rangelock_exit(lr
);
1424 zfs_grow_blocksize(zp
, newblksz
, tx
);
1428 VERIFY0(sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zp
->z_zfsvfs
),
1429 &zp
->z_size
, sizeof (zp
->z_size
), tx
));
1431 vnode_pager_setsize(ZTOV(zp
), end
);
1433 zfs_rangelock_exit(lr
);
1441 * Free space in a file.
1443 * IN: zp - znode of file to free data in.
1444 * off - start of section to free.
1445 * len - length of section to free.
1447 * RETURN: 0 on success, error code on failure
1450 zfs_free_range(znode_t
*zp
, uint64_t off
, uint64_t len
)
1452 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1453 zfs_locked_range_t
*lr
;
1457 * Lock the range being freed.
1459 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, off
, len
, RL_WRITER
);
1462 * Nothing to do if file already at desired length.
1464 if (off
>= zp
->z_size
) {
1465 zfs_rangelock_exit(lr
);
1469 if (off
+ len
> zp
->z_size
)
1470 len
= zp
->z_size
- off
;
1472 error
= dmu_free_long_range(zfsvfs
->z_os
, zp
->z_id
, off
, len
);
1475 #if __FreeBSD_version >= 1400032
1476 vnode_pager_purge_range(ZTOV(zp
), off
, off
+ len
);
1479 * Before __FreeBSD_version 1400032 we cannot free block in the
1480 * middle of a file, but only at the end of a file, so this code
1481 * path should never happen.
1483 vnode_pager_setsize(ZTOV(zp
), off
);
1487 zfs_rangelock_exit(lr
);
1495 * IN: zp - znode of file to free data in.
1496 * end - new end-of-file.
1498 * RETURN: 0 on success, error code on failure
1501 zfs_trunc(znode_t
*zp
, uint64_t end
)
1503 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1504 vnode_t
*vp
= ZTOV(zp
);
1506 zfs_locked_range_t
*lr
;
1508 sa_bulk_attr_t bulk
[2];
1512 * We will change zp_size, lock the whole file.
1514 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, 0, UINT64_MAX
, RL_WRITER
);
1517 * Nothing to do if file already at desired length.
1519 if (end
>= zp
->z_size
) {
1520 zfs_rangelock_exit(lr
);
1524 error
= dmu_free_long_range(zfsvfs
->z_os
, zp
->z_id
, end
,
1527 zfs_rangelock_exit(lr
);
1530 tx
= dmu_tx_create(zfsvfs
->z_os
);
1531 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1532 zfs_sa_upgrade_txholds(tx
, zp
);
1533 dmu_tx_mark_netfree(tx
);
1534 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1537 zfs_rangelock_exit(lr
);
1542 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
),
1543 NULL
, &zp
->z_size
, sizeof (zp
->z_size
));
1546 zp
->z_pflags
&= ~ZFS_SPARSE
;
1547 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
),
1548 NULL
, &zp
->z_pflags
, 8);
1550 VERIFY0(sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
));
1555 * Clear any mapped pages in the truncated region. This has to
1556 * happen outside of the transaction to avoid the possibility of
1557 * a deadlock with someone trying to push a page that we are
1558 * about to invalidate.
1560 vnode_pager_setsize(vp
, end
);
1562 zfs_rangelock_exit(lr
);
1568 * Free space in a file
1570 * IN: zp - znode of file to free data in.
1571 * off - start of range
1572 * len - end of range (0 => EOF)
1573 * flag - current file open mode flags.
1574 * log - TRUE if this action should be logged
1576 * RETURN: 0 on success, error code on failure
1579 zfs_freesp(znode_t
*zp
, uint64_t off
, uint64_t len
, int flag
, boolean_t log
)
1582 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1583 zilog_t
*zilog
= zfsvfs
->z_log
;
1585 uint64_t mtime
[2], ctime
[2];
1586 sa_bulk_attr_t bulk
[3];
1590 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_MODE(zfsvfs
), &mode
,
1591 sizeof (mode
))) != 0)
1594 if (off
> zp
->z_size
) {
1595 error
= zfs_extend(zp
, off
+len
);
1596 if (error
== 0 && log
)
1603 error
= zfs_trunc(zp
, off
);
1605 if ((error
= zfs_free_range(zp
, off
, len
)) == 0 &&
1606 off
+ len
> zp
->z_size
)
1607 error
= zfs_extend(zp
, off
+len
);
1612 tx
= dmu_tx_create(zfsvfs
->z_os
);
1613 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1614 zfs_sa_upgrade_txholds(tx
, zp
);
1615 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1621 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, mtime
, 16);
1622 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, ctime
, 16);
1623 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
),
1624 NULL
, &zp
->z_pflags
, 8);
1625 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
);
1626 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
1629 zfs_log_truncate(zilog
, tx
, TX_TRUNCATE
, zp
, off
, len
);
1636 zfs_create_fs(objset_t
*os
, cred_t
*cr
, nvlist_t
*zplprops
, dmu_tx_t
*tx
)
1638 uint64_t moid
, obj
, sa_obj
, version
;
1639 uint64_t sense
= ZFS_CASE_SENSITIVE
;
1644 znode_t
*rootzp
= NULL
;
1648 zfs_acl_ids_t acl_ids
;
1651 * First attempt to create master node.
1654 * In an empty objset, there are no blocks to read and thus
1655 * there can be no i/o errors (which we assert below).
1657 moid
= MASTER_NODE_OBJ
;
1658 error
= zap_create_claim(os
, moid
, DMU_OT_MASTER_NODE
,
1659 DMU_OT_NONE
, 0, tx
);
1663 * Set starting attributes.
1665 version
= zfs_zpl_version_map(spa_version(dmu_objset_spa(os
)));
1667 while ((elem
= nvlist_next_nvpair(zplprops
, elem
)) != NULL
) {
1668 /* For the moment we expect all zpl props to be uint64_ts */
1672 ASSERT3S(nvpair_type(elem
), ==, DATA_TYPE_UINT64
);
1673 val
= fnvpair_value_uint64(elem
);
1674 name
= nvpair_name(elem
);
1675 if (strcmp(name
, zfs_prop_to_name(ZFS_PROP_VERSION
)) == 0) {
1679 error
= zap_update(os
, moid
, name
, 8, 1, &val
, tx
);
1682 if (strcmp(name
, zfs_prop_to_name(ZFS_PROP_NORMALIZE
)) == 0)
1684 else if (strcmp(name
, zfs_prop_to_name(ZFS_PROP_CASE
)) == 0)
1687 ASSERT3U(version
, !=, 0);
1688 error
= zap_update(os
, moid
, ZPL_VERSION_STR
, 8, 1, &version
, tx
);
1692 * Create zap object used for SA attribute registration
1695 if (version
>= ZPL_VERSION_SA
) {
1696 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
1697 DMU_OT_NONE
, 0, tx
);
1698 error
= zap_add(os
, moid
, ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
1704 * Create a delete queue.
1706 obj
= zap_create(os
, DMU_OT_UNLINKED_SET
, DMU_OT_NONE
, 0, tx
);
1708 error
= zap_add(os
, moid
, ZFS_UNLINKED_SET
, 8, 1, &obj
, tx
);
1712 * Create root znode. Create minimal znode/vnode/zfsvfs
1713 * to allow zfs_mknode to work.
1716 vattr
.va_mask
= AT_MODE
|AT_UID
|AT_GID
;
1717 vattr
.va_type
= VDIR
;
1718 vattr
.va_mode
= S_IFDIR
|0755;
1719 vattr
.va_uid
= crgetuid(cr
);
1720 vattr
.va_gid
= crgetgid(cr
);
1722 zfsvfs
= kmem_zalloc(sizeof (zfsvfs_t
), KM_SLEEP
);
1724 rootzp
= zfs_znode_alloc_kmem(KM_SLEEP
);
1725 ASSERT(!POINTER_IS_VALID(rootzp
->z_zfsvfs
));
1726 rootzp
->z_unlinked
= 0;
1727 rootzp
->z_atime_dirty
= 0;
1728 rootzp
->z_is_sa
= USE_SA(version
, os
);
1731 zfsvfs
->z_parent
= zfsvfs
;
1732 zfsvfs
->z_version
= version
;
1733 zfsvfs
->z_use_fuids
= USE_FUIDS(version
, os
);
1734 zfsvfs
->z_use_sa
= USE_SA(version
, os
);
1735 zfsvfs
->z_norm
= norm
;
1737 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
1738 &zfsvfs
->z_attr_table
);
1743 * Fold case on file systems that are always or sometimes case
1746 if (sense
== ZFS_CASE_INSENSITIVE
|| sense
== ZFS_CASE_MIXED
)
1747 zfsvfs
->z_norm
|= U8_TEXTPREP_TOUPPER
;
1749 mutex_init(&zfsvfs
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1750 list_create(&zfsvfs
->z_all_znodes
, sizeof (znode_t
),
1751 offsetof(znode_t
, z_link_node
));
1753 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
1754 mutex_init(&zfsvfs
->z_hold_mtx
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
1756 rootzp
->z_zfsvfs
= zfsvfs
;
1757 VERIFY0(zfs_acl_ids_create(rootzp
, IS_ROOT_NODE
, &vattr
,
1758 cr
, NULL
, &acl_ids
, NULL
));
1759 zfs_mknode(rootzp
, &vattr
, tx
, cr
, IS_ROOT_NODE
, &zp
, &acl_ids
);
1760 ASSERT3P(zp
, ==, rootzp
);
1761 error
= zap_add(os
, moid
, ZFS_ROOT_OBJ
, 8, 1, &rootzp
->z_id
, tx
);
1763 zfs_acl_ids_free(&acl_ids
);
1764 POINTER_INVALIDATE(&rootzp
->z_zfsvfs
);
1766 sa_handle_destroy(rootzp
->z_sa_hdl
);
1767 zfs_znode_free_kmem(rootzp
);
1770 * Create shares directory
1773 error
= zfs_create_share_dir(zfsvfs
, tx
);
1777 for (i
= 0; i
!= ZFS_OBJ_MTX_SZ
; i
++)
1778 mutex_destroy(&zfsvfs
->z_hold_mtx
[i
]);
1779 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1783 zfs_znode_update_vfs(znode_t
*zp
)
1787 if ((object
= ZTOV(zp
)->v_object
) == NULL
||
1788 zp
->z_size
== object
->un_pager
.vnp
.vnp_size
)
1791 vnode_pager_setsize(ZTOV(zp
), zp
->z_size
);
1795 zfs_znode_parent_and_name(znode_t
*zp
, znode_t
**dzpp
, char *buf
,
1798 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
1803 /* Extended attributes should not be visible as regular files. */
1804 if ((zp
->z_pflags
& ZFS_XATTR
) != 0)
1805 return (SET_ERROR(EINVAL
));
1807 err
= zfs_obj_to_pobj(zfsvfs
->z_os
, zp
->z_sa_hdl
, zfsvfs
->z_attr_table
,
1808 &parent
, &is_xattrdir
);
1811 ASSERT0(is_xattrdir
);
1813 /* No name as this is a root object. */
1814 if (parent
== zp
->z_id
)
1815 return (SET_ERROR(EINVAL
));
1817 err
= zap_value_search(zfsvfs
->z_os
, parent
, zp
->z_id
,
1818 ZFS_DIRENT_OBJ(-1ULL), buf
, buflen
);
1821 err
= zfs_zget(zfsvfs
, parent
, dzpp
);
1826 zfs_rlimit_fsize(off_t fsize
)
1828 struct thread
*td
= curthread
;
1834 lim
= lim_cur(td
, RLIMIT_FSIZE
);
1835 if (__predict_true((uoff_t
)fsize
<= lim
))
1839 * The limit is reached.
1841 PROC_LOCK(td
->td_proc
);
1842 kern_psignal(td
->td_proc
, SIGXFSZ
);
1843 PROC_UNLOCK(td
->td_proc
);