4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
26 /* Portions Copyright 2007 Jeremy Teo */
29 #include <sys/types.h>
30 #include <sys/param.h>
32 #include <sys/sysmacros.h>
33 #include <sys/mntent.h>
34 #include <sys/u8_textprep.h>
35 #include <sys/dsl_dataset.h>
37 #include <sys/vnode.h>
40 #include <sys/errno.h>
41 #include <sys/atomic.h>
42 #include <sys/zfs_dir.h>
43 #include <sys/zfs_acl.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/zfs_rlock.h>
46 #include <sys/zfs_fuid.h>
47 #include <sys/zfs_vnops.h>
48 #include <sys/zfs_ctldir.h>
49 #include <sys/dnode.h>
50 #include <sys/fs/zfs.h>
55 #include <sys/dmu_objset.h>
56 #include <sys/dmu_tx.h>
57 #include <sys/zfs_refcount.h>
60 #include <sys/zfs_znode.h>
62 #include <sys/zfs_sa.h>
63 #include <sys/zfs_stat.h>
66 #include "zfs_comutil.h"
69 * Functions needed for userland (ie: libzpool) are not put under
70 * #ifdef_KERNEL; the rest of the functions have dependencies
71 * (such as VFS logic) that will not compile easily in userland.
75 static kmem_cache_t
*znode_cache
= NULL
;
76 static kmem_cache_t
*znode_hold_cache
= NULL
;
77 unsigned int zfs_object_mutex_size
= ZFS_OBJ_MTX_SZ
;
80 * This is used by the test suite so that it can delay znodes from being
81 * freed in order to inspect the unlinked set.
83 static int zfs_unlink_suspend_progress
= 0;
86 * This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
87 * z_rangelock. It will modify the offset and length of the lock to reflect
88 * znode-specific information, and convert RL_APPEND to RL_WRITER. This is
89 * called with the rangelock_t's rl_lock held, which avoids races.
92 zfs_rangelock_cb(zfs_locked_range_t
*new, void *arg
)
97 * If in append mode, convert to writer and lock starting at the
98 * current end of file.
100 if (new->lr_type
== RL_APPEND
) {
101 new->lr_offset
= zp
->z_size
;
102 new->lr_type
= RL_WRITER
;
106 * If we need to grow the block size then lock the whole file range.
108 uint64_t end_size
= MAX(zp
->z_size
, new->lr_offset
+ new->lr_length
);
109 if (end_size
> zp
->z_blksz
&& (!ISP2(zp
->z_blksz
) ||
110 zp
->z_blksz
< ZTOZSB(zp
)->z_max_blksz
)) {
112 new->lr_length
= UINT64_MAX
;
117 zfs_znode_cache_constructor(void *buf
, void *arg
, int kmflags
)
119 (void) arg
, (void) kmflags
;
122 inode_init_once(ZTOI(zp
));
123 list_link_init(&zp
->z_link_node
);
125 mutex_init(&zp
->z_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
126 rw_init(&zp
->z_parent_lock
, NULL
, RW_DEFAULT
, NULL
);
127 rw_init(&zp
->z_name_lock
, NULL
, RW_NOLOCKDEP
, NULL
);
128 mutex_init(&zp
->z_acl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
129 rw_init(&zp
->z_xattr_lock
, NULL
, RW_DEFAULT
, NULL
);
131 zfs_rangelock_init(&zp
->z_rangelock
, zfs_rangelock_cb
, zp
);
133 zp
->z_dirlocks
= NULL
;
134 zp
->z_acl_cached
= NULL
;
135 zp
->z_xattr_cached
= NULL
;
136 zp
->z_xattr_parent
= 0;
137 zp
->z_sync_writes_cnt
= 0;
138 zp
->z_async_writes_cnt
= 0;
144 zfs_znode_cache_destructor(void *buf
, void *arg
)
149 ASSERT(!list_link_active(&zp
->z_link_node
));
150 mutex_destroy(&zp
->z_lock
);
151 rw_destroy(&zp
->z_parent_lock
);
152 rw_destroy(&zp
->z_name_lock
);
153 mutex_destroy(&zp
->z_acl_lock
);
154 rw_destroy(&zp
->z_xattr_lock
);
155 zfs_rangelock_fini(&zp
->z_rangelock
);
157 ASSERT3P(zp
->z_dirlocks
, ==, NULL
);
158 ASSERT3P(zp
->z_acl_cached
, ==, NULL
);
159 ASSERT3P(zp
->z_xattr_cached
, ==, NULL
);
161 ASSERT0(atomic_load_32(&zp
->z_sync_writes_cnt
));
162 ASSERT0(atomic_load_32(&zp
->z_async_writes_cnt
));
166 zfs_znode_hold_cache_constructor(void *buf
, void *arg
, int kmflags
)
168 (void) arg
, (void) kmflags
;
169 znode_hold_t
*zh
= buf
;
171 mutex_init(&zh
->zh_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
172 zfs_refcount_create(&zh
->zh_refcount
);
173 zh
->zh_obj
= ZFS_NO_OBJECT
;
179 zfs_znode_hold_cache_destructor(void *buf
, void *arg
)
182 znode_hold_t
*zh
= buf
;
184 mutex_destroy(&zh
->zh_lock
);
185 zfs_refcount_destroy(&zh
->zh_refcount
);
192 * Initialize zcache. The KMC_SLAB hint is used in order that it be
193 * backed by kmalloc() when on the Linux slab in order that any
194 * wait_on_bit() operations on the related inode operate properly.
196 ASSERT(znode_cache
== NULL
);
197 znode_cache
= kmem_cache_create("zfs_znode_cache",
198 sizeof (znode_t
), 0, zfs_znode_cache_constructor
,
199 zfs_znode_cache_destructor
, NULL
, NULL
, NULL
, KMC_SLAB
);
201 ASSERT(znode_hold_cache
== NULL
);
202 znode_hold_cache
= kmem_cache_create("zfs_znode_hold_cache",
203 sizeof (znode_hold_t
), 0, zfs_znode_hold_cache_constructor
,
204 zfs_znode_hold_cache_destructor
, NULL
, NULL
, NULL
, 0);
214 kmem_cache_destroy(znode_cache
);
217 if (znode_hold_cache
)
218 kmem_cache_destroy(znode_hold_cache
);
219 znode_hold_cache
= NULL
;
223 * The zfs_znode_hold_enter() / zfs_znode_hold_exit() functions are used to
224 * serialize access to a znode and its SA buffer while the object is being
225 * created or destroyed. This kind of locking would normally reside in the
226 * znode itself but in this case that's impossible because the znode and SA
227 * buffer may not yet exist. Therefore the locking is handled externally
228 * with an array of mutexes and AVLs trees which contain per-object locks.
230 * In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
231 * in to the correct AVL tree and finally the per-object lock is held. In
232 * zfs_znode_hold_exit() the process is reversed. The per-object lock is
233 * released, removed from the AVL tree and destroyed if there are no waiters.
235 * This scheme has two important properties:
237 * 1) No memory allocations are performed while holding one of the z_hold_locks.
238 * This ensures evict(), which can be called from direct memory reclaim, will
239 * never block waiting on a z_hold_locks which just happens to have hashed
242 * 2) All locks used to serialize access to an object are per-object and never
243 * shared. This minimizes lock contention without creating a large number
244 * of dedicated locks.
246 * On the downside it does require znode_lock_t structures to be frequently
247 * allocated and freed. However, because these are backed by a kmem cache
248 * and very short lived this cost is minimal.
251 zfs_znode_hold_compare(const void *a
, const void *b
)
253 const znode_hold_t
*zh_a
= (const znode_hold_t
*)a
;
254 const znode_hold_t
*zh_b
= (const znode_hold_t
*)b
;
256 return (TREE_CMP(zh_a
->zh_obj
, zh_b
->zh_obj
));
259 static boolean_t __maybe_unused
260 zfs_znode_held(zfsvfs_t
*zfsvfs
, uint64_t obj
)
262 znode_hold_t
*zh
, search
;
263 int i
= ZFS_OBJ_HASH(zfsvfs
, obj
);
268 mutex_enter(&zfsvfs
->z_hold_locks
[i
]);
269 zh
= avl_find(&zfsvfs
->z_hold_trees
[i
], &search
, NULL
);
270 held
= (zh
&& MUTEX_HELD(&zh
->zh_lock
)) ? B_TRUE
: B_FALSE
;
271 mutex_exit(&zfsvfs
->z_hold_locks
[i
]);
276 static znode_hold_t
*
277 zfs_znode_hold_enter(zfsvfs_t
*zfsvfs
, uint64_t obj
)
279 znode_hold_t
*zh
, *zh_new
, search
;
280 int i
= ZFS_OBJ_HASH(zfsvfs
, obj
);
281 boolean_t found
= B_FALSE
;
283 zh_new
= kmem_cache_alloc(znode_hold_cache
, KM_SLEEP
);
284 zh_new
->zh_obj
= obj
;
287 mutex_enter(&zfsvfs
->z_hold_locks
[i
]);
288 zh
= avl_find(&zfsvfs
->z_hold_trees
[i
], &search
, NULL
);
289 if (likely(zh
== NULL
)) {
291 avl_add(&zfsvfs
->z_hold_trees
[i
], zh
);
293 ASSERT3U(zh
->zh_obj
, ==, obj
);
296 zfs_refcount_add(&zh
->zh_refcount
, NULL
);
297 mutex_exit(&zfsvfs
->z_hold_locks
[i
]);
300 kmem_cache_free(znode_hold_cache
, zh_new
);
302 ASSERT(MUTEX_NOT_HELD(&zh
->zh_lock
));
303 ASSERT3S(zfs_refcount_count(&zh
->zh_refcount
), >, 0);
304 mutex_enter(&zh
->zh_lock
);
310 zfs_znode_hold_exit(zfsvfs_t
*zfsvfs
, znode_hold_t
*zh
)
312 int i
= ZFS_OBJ_HASH(zfsvfs
, zh
->zh_obj
);
313 boolean_t remove
= B_FALSE
;
315 ASSERT(zfs_znode_held(zfsvfs
, zh
->zh_obj
));
316 ASSERT3S(zfs_refcount_count(&zh
->zh_refcount
), >, 0);
317 mutex_exit(&zh
->zh_lock
);
319 mutex_enter(&zfsvfs
->z_hold_locks
[i
]);
320 if (zfs_refcount_remove(&zh
->zh_refcount
, NULL
) == 0) {
321 avl_remove(&zfsvfs
->z_hold_trees
[i
], zh
);
324 mutex_exit(&zfsvfs
->z_hold_locks
[i
]);
326 if (remove
== B_TRUE
)
327 kmem_cache_free(znode_hold_cache
, zh
);
331 zfs_cmpldev(uint64_t dev
)
337 zfs_znode_sa_init(zfsvfs_t
*zfsvfs
, znode_t
*zp
,
338 dmu_buf_t
*db
, dmu_object_type_t obj_type
, sa_handle_t
*sa_hdl
)
340 ASSERT(zfs_znode_held(zfsvfs
, zp
->z_id
));
342 mutex_enter(&zp
->z_lock
);
344 ASSERT(zp
->z_sa_hdl
== NULL
);
345 ASSERT(zp
->z_acl_cached
== NULL
);
346 if (sa_hdl
== NULL
) {
347 VERIFY(0 == sa_handle_get_from_db(zfsvfs
->z_os
, db
, zp
,
348 SA_HDL_SHARED
, &zp
->z_sa_hdl
));
350 zp
->z_sa_hdl
= sa_hdl
;
351 sa_set_userp(sa_hdl
, zp
);
354 zp
->z_is_sa
= (obj_type
== DMU_OT_SA
) ? B_TRUE
: B_FALSE
;
356 mutex_exit(&zp
->z_lock
);
360 zfs_znode_dmu_fini(znode_t
*zp
)
362 ASSERT(zfs_znode_held(ZTOZSB(zp
), zp
->z_id
) || zp
->z_unlinked
||
363 RW_WRITE_HELD(&ZTOZSB(zp
)->z_teardown_inactive_lock
));
365 sa_handle_destroy(zp
->z_sa_hdl
);
370 * Called by new_inode() to allocate a new inode.
373 zfs_inode_alloc(struct super_block
*sb
, struct inode
**ip
)
377 zp
= kmem_cache_alloc(znode_cache
, KM_SLEEP
);
384 * Called in multiple places when an inode should be destroyed.
387 zfs_inode_destroy(struct inode
*ip
)
389 znode_t
*zp
= ITOZ(ip
);
390 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
392 mutex_enter(&zfsvfs
->z_znodes_lock
);
393 if (list_link_active(&zp
->z_link_node
)) {
394 list_remove(&zfsvfs
->z_all_znodes
, zp
);
395 zfsvfs
->z_nr_znodes
--;
397 mutex_exit(&zfsvfs
->z_znodes_lock
);
399 if (zp
->z_acl_cached
) {
400 zfs_acl_free(zp
->z_acl_cached
);
401 zp
->z_acl_cached
= NULL
;
404 if (zp
->z_xattr_cached
) {
405 nvlist_free(zp
->z_xattr_cached
);
406 zp
->z_xattr_cached
= NULL
;
409 kmem_cache_free(znode_cache
, zp
);
413 zfs_inode_set_ops(zfsvfs_t
*zfsvfs
, struct inode
*ip
)
417 switch (ip
->i_mode
& S_IFMT
) {
419 ip
->i_op
= &zpl_inode_operations
;
420 ip
->i_fop
= &zpl_file_operations
;
421 ip
->i_mapping
->a_ops
= &zpl_address_space_operations
;
425 ip
->i_op
= &zpl_dir_inode_operations
;
426 ip
->i_fop
= &zpl_dir_file_operations
;
427 ITOZ(ip
)->z_zn_prefetch
= B_TRUE
;
431 ip
->i_op
= &zpl_symlink_inode_operations
;
435 * rdev is only stored in a SA only for device files.
439 (void) sa_lookup(ITOZ(ip
)->z_sa_hdl
, SA_ZPL_RDEV(zfsvfs
), &rdev
,
444 init_special_inode(ip
, ip
->i_mode
, rdev
);
445 ip
->i_op
= &zpl_special_inode_operations
;
449 zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
450 (u_longlong_t
)ip
->i_ino
, ip
->i_mode
);
452 /* Assume the inode is a file and attempt to continue */
453 ip
->i_mode
= S_IFREG
| 0644;
454 ip
->i_op
= &zpl_inode_operations
;
455 ip
->i_fop
= &zpl_file_operations
;
456 ip
->i_mapping
->a_ops
= &zpl_address_space_operations
;
462 zfs_set_inode_flags(znode_t
*zp
, struct inode
*ip
)
465 * Linux and Solaris have different sets of file attributes, so we
466 * restrict this conversion to the intersection of the two.
468 #ifdef HAVE_INODE_SET_FLAGS
469 unsigned int flags
= 0;
470 if (zp
->z_pflags
& ZFS_IMMUTABLE
)
471 flags
|= S_IMMUTABLE
;
472 if (zp
->z_pflags
& ZFS_APPENDONLY
)
475 inode_set_flags(ip
, flags
, S_IMMUTABLE
|S_APPEND
);
477 if (zp
->z_pflags
& ZFS_IMMUTABLE
)
478 ip
->i_flags
|= S_IMMUTABLE
;
480 ip
->i_flags
&= ~S_IMMUTABLE
;
482 if (zp
->z_pflags
& ZFS_APPENDONLY
)
483 ip
->i_flags
|= S_APPEND
;
485 ip
->i_flags
&= ~S_APPEND
;
490 * Update the embedded inode given the znode.
493 zfs_znode_update_vfs(znode_t
*zp
)
498 u_longlong_t i_blocks
;
504 /* Skip .zfs control nodes which do not exist on disk. */
505 if (zfsctl_is_node(ip
))
508 dmu_object_size_from_db(sa_get_db(zp
->z_sa_hdl
), &blksize
, &i_blocks
);
510 spin_lock(&ip
->i_lock
);
511 ip
->i_mode
= zp
->z_mode
;
512 ip
->i_blocks
= i_blocks
;
513 i_size_write(ip
, zp
->z_size
);
514 spin_unlock(&ip
->i_lock
);
519 * Construct a znode+inode and initialize.
521 * This does not do a call to dmu_set_user() that is
522 * up to the caller to do, in case you don't want to
526 zfs_znode_alloc(zfsvfs_t
*zfsvfs
, dmu_buf_t
*db
, int blksz
,
527 dmu_object_type_t obj_type
, sa_handle_t
*hdl
)
535 uint64_t z_uid
, z_gid
;
536 uint64_t atime
[2], mtime
[2], ctime
[2], btime
[2];
537 uint64_t projid
= ZFS_DEFAULT_PROJID
;
538 sa_bulk_attr_t bulk
[12];
541 ASSERT(zfsvfs
!= NULL
);
543 ip
= new_inode(zfsvfs
->z_sb
);
548 ASSERT(zp
->z_dirlocks
== NULL
);
549 ASSERT3P(zp
->z_acl_cached
, ==, NULL
);
550 ASSERT3P(zp
->z_xattr_cached
, ==, NULL
);
551 zp
->z_unlinked
= B_FALSE
;
552 zp
->z_atime_dirty
= B_FALSE
;
553 zp
->z_is_mapped
= B_FALSE
;
554 zp
->z_is_ctldir
= B_FALSE
;
555 zp
->z_is_stale
= B_FALSE
;
556 zp
->z_suspended
= B_FALSE
;
559 zp
->z_id
= db
->db_object
;
561 zp
->z_seq
= 0x7A4653;
563 zp
->z_sync_writes_cnt
= 0;
564 zp
->z_async_writes_cnt
= 0;
566 zfs_znode_sa_init(zfsvfs
, zp
, db
, obj_type
, hdl
);
568 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
, &mode
, 8);
569 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GEN(zfsvfs
), NULL
, &tmp_gen
, 8);
570 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
572 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_LINKS(zfsvfs
), NULL
, &links
, 8);
573 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
575 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_PARENT(zfsvfs
), NULL
,
577 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
, &z_uid
, 8);
578 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
, &z_gid
, 8);
579 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
, &atime
, 16);
580 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
581 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
582 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CRTIME(zfsvfs
), NULL
, &btime
, 16);
584 if (sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
) != 0 || tmp_gen
== 0 ||
585 (dmu_objset_projectquota_enabled(zfsvfs
->z_os
) &&
586 (zp
->z_pflags
& ZFS_PROJID
) &&
587 sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PROJID(zfsvfs
), &projid
, 8) != 0)) {
589 sa_handle_destroy(zp
->z_sa_hdl
);
594 zp
->z_projid
= projid
;
595 zp
->z_mode
= ip
->i_mode
= mode
;
596 ip
->i_generation
= (uint32_t)tmp_gen
;
597 ip
->i_blkbits
= SPA_MINBLOCKSHIFT
;
598 set_nlink(ip
, (uint32_t)links
);
599 zfs_uid_write(ip
, z_uid
);
600 zfs_gid_write(ip
, z_gid
);
601 zfs_set_inode_flags(zp
, ip
);
603 /* Cache the xattr parent id */
604 if (zp
->z_pflags
& ZFS_XATTR
)
605 zp
->z_xattr_parent
= parent
;
607 ZFS_TIME_DECODE(&ip
->i_atime
, atime
);
608 ZFS_TIME_DECODE(&ip
->i_mtime
, mtime
);
609 ZFS_TIME_DECODE(&ip
->i_ctime
, ctime
);
610 ZFS_TIME_DECODE(&zp
->z_btime
, btime
);
612 ip
->i_ino
= zp
->z_id
;
613 zfs_znode_update_vfs(zp
);
614 zfs_inode_set_ops(zfsvfs
, ip
);
617 * The only way insert_inode_locked() can fail is if the ip->i_ino
618 * number is already hashed for this super block. This can never
619 * happen because the inode numbers map 1:1 with the object numbers.
621 * Exceptions include rolling back a mounted file system, either
622 * from the zfs rollback or zfs recv command.
624 * Active inodes are unhashed during the rollback, but since zrele
625 * can happen asynchronously, we can't guarantee they've been
626 * unhashed. This can cause hash collisions in unlinked drain
627 * processing so do not hash unlinked znodes.
630 VERIFY3S(insert_inode_locked(ip
), ==, 0);
632 mutex_enter(&zfsvfs
->z_znodes_lock
);
633 list_insert_tail(&zfsvfs
->z_all_znodes
, zp
);
634 zfsvfs
->z_nr_znodes
++;
635 mutex_exit(&zfsvfs
->z_znodes_lock
);
638 unlock_new_inode(ip
);
647 * Safely mark an inode dirty. Inodes which are part of a read-only
648 * file system or snapshot may not be dirtied.
651 zfs_mark_inode_dirty(struct inode
*ip
)
653 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
655 if (zfs_is_readonly(zfsvfs
) || dmu_objset_is_snapshot(zfsvfs
->z_os
))
658 mark_inode_dirty(ip
);
661 static uint64_t empty_xattr
;
662 static uint64_t pad
[4];
663 static zfs_acl_phys_t acl_phys
;
665 * Create a new DMU object to hold a zfs znode.
667 * IN: dzp - parent directory for new znode
668 * vap - file attributes for new znode
669 * tx - dmu transaction id for zap operations
670 * cr - credentials of caller
672 * IS_ROOT_NODE - new object will be root
673 * IS_TMPFILE - new object is of O_TMPFILE
674 * IS_XATTR - new object is an attribute
675 * acl_ids - ACL related attributes
677 * OUT: zpp - allocated znode (set to dzp if IS_ROOT_NODE)
681 zfs_mknode(znode_t
*dzp
, vattr_t
*vap
, dmu_tx_t
*tx
, cred_t
*cr
,
682 uint_t flag
, znode_t
**zpp
, zfs_acl_ids_t
*acl_ids
)
684 uint64_t crtime
[2], atime
[2], mtime
[2], ctime
[2];
685 uint64_t mode
, size
, links
, parent
, pflags
;
686 uint64_t projid
= ZFS_DEFAULT_PROJID
;
688 zfsvfs_t
*zfsvfs
= ZTOZSB(dzp
);
690 inode_timespec_t now
;
695 dmu_object_type_t obj_type
;
696 sa_bulk_attr_t
*sa_attrs
;
698 zfs_acl_locator_cb_t locate
= { 0 };
701 if (zfsvfs
->z_replay
) {
702 obj
= vap
->va_nodeid
;
703 now
= vap
->va_ctime
; /* see zfs_replay_create() */
704 gen
= vap
->va_nblocks
; /* ditto */
705 dnodesize
= vap
->va_fsid
; /* ditto */
709 gen
= dmu_tx_get_txg(tx
);
710 dnodesize
= dmu_objset_dnodesize(zfsvfs
->z_os
);
714 dnodesize
= DNODE_MIN_SIZE
;
716 obj_type
= zfsvfs
->z_use_sa
? DMU_OT_SA
: DMU_OT_ZNODE
;
718 bonuslen
= (obj_type
== DMU_OT_SA
) ?
719 DN_BONUS_SIZE(dnodesize
) : ZFS_OLD_ZNODE_PHYS_SIZE
;
722 * Create a new DMU object.
725 * There's currently no mechanism for pre-reading the blocks that will
726 * be needed to allocate a new object, so we accept the small chance
727 * that there will be an i/o error and we will fail one of the
730 if (S_ISDIR(vap
->va_mode
)) {
731 if (zfsvfs
->z_replay
) {
732 VERIFY0(zap_create_claim_norm_dnsize(zfsvfs
->z_os
, obj
,
733 zfsvfs
->z_norm
, DMU_OT_DIRECTORY_CONTENTS
,
734 obj_type
, bonuslen
, dnodesize
, tx
));
736 obj
= zap_create_norm_dnsize(zfsvfs
->z_os
,
737 zfsvfs
->z_norm
, DMU_OT_DIRECTORY_CONTENTS
,
738 obj_type
, bonuslen
, dnodesize
, tx
);
741 if (zfsvfs
->z_replay
) {
742 VERIFY0(dmu_object_claim_dnsize(zfsvfs
->z_os
, obj
,
743 DMU_OT_PLAIN_FILE_CONTENTS
, 0,
744 obj_type
, bonuslen
, dnodesize
, tx
));
746 obj
= dmu_object_alloc_dnsize(zfsvfs
->z_os
,
747 DMU_OT_PLAIN_FILE_CONTENTS
, 0,
748 obj_type
, bonuslen
, dnodesize
, tx
);
752 zh
= zfs_znode_hold_enter(zfsvfs
, obj
);
753 VERIFY0(sa_buf_hold(zfsvfs
->z_os
, obj
, NULL
, &db
));
756 * If this is the root, fix up the half-initialized parent pointer
757 * to reference the just-allocated physical data area.
759 if (flag
& IS_ROOT_NODE
) {
764 * If parent is an xattr, so am I.
766 if (dzp
->z_pflags
& ZFS_XATTR
) {
770 if (zfsvfs
->z_use_fuids
)
771 pflags
= ZFS_ARCHIVE
| ZFS_AV_MODIFIED
;
775 if (S_ISDIR(vap
->va_mode
)) {
776 size
= 2; /* contents ("." and "..") */
780 links
= (flag
& IS_TMPFILE
) ? 0 : 1;
783 if (S_ISBLK(vap
->va_mode
) || S_ISCHR(vap
->va_mode
))
787 mode
= acl_ids
->z_mode
;
791 if (S_ISREG(vap
->va_mode
) || S_ISDIR(vap
->va_mode
)) {
793 * With ZFS_PROJID flag, we can easily know whether there is
794 * project ID stored on disk or not. See zfs_space_delta_cb().
796 if (obj_type
!= DMU_OT_ZNODE
&&
797 dmu_objset_projectquota_enabled(zfsvfs
->z_os
))
798 pflags
|= ZFS_PROJID
;
801 * Inherit project ID from parent if required.
803 projid
= zfs_inherit_projid(dzp
);
804 if (dzp
->z_pflags
& ZFS_PROJINHERIT
)
805 pflags
|= ZFS_PROJINHERIT
;
809 * No execs denied will be determined when zfs_mode_compute() is called.
811 pflags
|= acl_ids
->z_aclp
->z_hints
&
812 (ZFS_ACL_TRIVIAL
|ZFS_INHERIT_ACE
|ZFS_ACL_AUTO_INHERIT
|
813 ZFS_ACL_DEFAULTED
|ZFS_ACL_PROTECTED
);
815 ZFS_TIME_ENCODE(&now
, crtime
);
816 ZFS_TIME_ENCODE(&now
, ctime
);
818 if (vap
->va_mask
& ATTR_ATIME
) {
819 ZFS_TIME_ENCODE(&vap
->va_atime
, atime
);
821 ZFS_TIME_ENCODE(&now
, atime
);
824 if (vap
->va_mask
& ATTR_MTIME
) {
825 ZFS_TIME_ENCODE(&vap
->va_mtime
, mtime
);
827 ZFS_TIME_ENCODE(&now
, mtime
);
830 /* Now add in all of the "SA" attributes */
831 VERIFY(0 == sa_handle_get_from_db(zfsvfs
->z_os
, db
, NULL
, SA_HDL_SHARED
,
835 * Setup the array of attributes to be replaced/set on the new file
837 * order for DMU_OT_ZNODE is critical since it needs to be constructed
838 * in the old znode_phys_t format. Don't change this ordering
840 sa_attrs
= kmem_alloc(sizeof (sa_bulk_attr_t
) * ZPL_END
, KM_SLEEP
);
842 if (obj_type
== DMU_OT_ZNODE
) {
843 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_ATIME(zfsvfs
),
845 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_MTIME(zfsvfs
),
847 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_CTIME(zfsvfs
),
849 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_CRTIME(zfsvfs
),
851 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_GEN(zfsvfs
),
853 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_MODE(zfsvfs
),
855 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_SIZE(zfsvfs
),
857 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_PARENT(zfsvfs
),
860 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_MODE(zfsvfs
),
862 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_SIZE(zfsvfs
),
864 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_GEN(zfsvfs
),
866 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_UID(zfsvfs
),
867 NULL
, &acl_ids
->z_fuid
, 8);
868 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_GID(zfsvfs
),
869 NULL
, &acl_ids
->z_fgid
, 8);
870 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_PARENT(zfsvfs
),
872 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_FLAGS(zfsvfs
),
874 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_ATIME(zfsvfs
),
876 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_MTIME(zfsvfs
),
878 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_CTIME(zfsvfs
),
880 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_CRTIME(zfsvfs
),
884 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_LINKS(zfsvfs
), NULL
, &links
, 8);
886 if (obj_type
== DMU_OT_ZNODE
) {
887 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_XATTR(zfsvfs
), NULL
,
889 } else if (dmu_objset_projectquota_enabled(zfsvfs
->z_os
) &&
890 pflags
& ZFS_PROJID
) {
891 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_PROJID(zfsvfs
),
894 if (obj_type
== DMU_OT_ZNODE
||
895 (S_ISBLK(vap
->va_mode
) || S_ISCHR(vap
->va_mode
))) {
896 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_RDEV(zfsvfs
),
899 if (obj_type
== DMU_OT_ZNODE
) {
900 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_FLAGS(zfsvfs
),
902 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_UID(zfsvfs
), NULL
,
903 &acl_ids
->z_fuid
, 8);
904 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_GID(zfsvfs
), NULL
,
905 &acl_ids
->z_fgid
, 8);
906 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_PAD(zfsvfs
), NULL
, pad
,
907 sizeof (uint64_t) * 4);
908 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_ZNODE_ACL(zfsvfs
), NULL
,
909 &acl_phys
, sizeof (zfs_acl_phys_t
));
910 } else if (acl_ids
->z_aclp
->z_version
>= ZFS_ACL_VERSION_FUID
) {
911 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_DACL_COUNT(zfsvfs
), NULL
,
912 &acl_ids
->z_aclp
->z_acl_count
, 8);
913 locate
.cb_aclp
= acl_ids
->z_aclp
;
914 SA_ADD_BULK_ATTR(sa_attrs
, cnt
, SA_ZPL_DACL_ACES(zfsvfs
),
915 zfs_acl_data_locator
, &locate
,
916 acl_ids
->z_aclp
->z_acl_bytes
);
917 mode
= zfs_mode_compute(mode
, acl_ids
->z_aclp
, &pflags
,
918 acl_ids
->z_fuid
, acl_ids
->z_fgid
);
921 VERIFY(sa_replace_all_by_template(sa_hdl
, sa_attrs
, cnt
, tx
) == 0);
923 if (!(flag
& IS_ROOT_NODE
)) {
925 * The call to zfs_znode_alloc() may fail if memory is low
926 * via the call path: alloc_inode() -> inode_init_always() ->
927 * security_inode_alloc() -> inode_alloc_security(). Since
928 * the existing code is written such that zfs_mknode() can
929 * not fail retry until sufficient memory has been reclaimed.
932 *zpp
= zfs_znode_alloc(zfsvfs
, db
, 0, obj_type
, sa_hdl
);
933 } while (*zpp
== NULL
);
935 VERIFY(*zpp
!= NULL
);
939 * If we are creating the root node, the "parent" we
940 * passed in is the znode for the root.
944 (*zpp
)->z_sa_hdl
= sa_hdl
;
947 (*zpp
)->z_pflags
= pflags
;
948 (*zpp
)->z_mode
= ZTOI(*zpp
)->i_mode
= mode
;
949 (*zpp
)->z_dnodesize
= dnodesize
;
950 (*zpp
)->z_projid
= projid
;
952 if (obj_type
== DMU_OT_ZNODE
||
953 acl_ids
->z_aclp
->z_version
< ZFS_ACL_VERSION_FUID
) {
954 VERIFY0(zfs_aclset_common(*zpp
, acl_ids
->z_aclp
, cr
, tx
));
956 kmem_free(sa_attrs
, sizeof (sa_bulk_attr_t
) * ZPL_END
);
957 zfs_znode_hold_exit(zfsvfs
, zh
);
961 * Update in-core attributes. It is assumed the caller will be doing an
962 * sa_bulk_update to push the changes out.
965 zfs_xvattr_set(znode_t
*zp
, xvattr_t
*xvap
, dmu_tx_t
*tx
)
968 boolean_t update_inode
= B_FALSE
;
970 xoap
= xva_getxoptattr(xvap
);
973 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)) {
975 ZFS_TIME_ENCODE(&xoap
->xoa_createtime
, times
);
976 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_CRTIME(ZTOZSB(zp
)),
977 ×
, sizeof (times
), tx
);
978 XVA_SET_RTN(xvap
, XAT_CREATETIME
);
980 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
)) {
981 ZFS_ATTR_SET(zp
, ZFS_READONLY
, xoap
->xoa_readonly
,
983 XVA_SET_RTN(xvap
, XAT_READONLY
);
985 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
)) {
986 ZFS_ATTR_SET(zp
, ZFS_HIDDEN
, xoap
->xoa_hidden
,
988 XVA_SET_RTN(xvap
, XAT_HIDDEN
);
990 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)) {
991 ZFS_ATTR_SET(zp
, ZFS_SYSTEM
, xoap
->xoa_system
,
993 XVA_SET_RTN(xvap
, XAT_SYSTEM
);
995 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
)) {
996 ZFS_ATTR_SET(zp
, ZFS_ARCHIVE
, xoap
->xoa_archive
,
998 XVA_SET_RTN(xvap
, XAT_ARCHIVE
);
1000 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
1001 ZFS_ATTR_SET(zp
, ZFS_IMMUTABLE
, xoap
->xoa_immutable
,
1003 XVA_SET_RTN(xvap
, XAT_IMMUTABLE
);
1005 update_inode
= B_TRUE
;
1007 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
1008 ZFS_ATTR_SET(zp
, ZFS_NOUNLINK
, xoap
->xoa_nounlink
,
1010 XVA_SET_RTN(xvap
, XAT_NOUNLINK
);
1012 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
1013 ZFS_ATTR_SET(zp
, ZFS_APPENDONLY
, xoap
->xoa_appendonly
,
1015 XVA_SET_RTN(xvap
, XAT_APPENDONLY
);
1017 update_inode
= B_TRUE
;
1019 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
1020 ZFS_ATTR_SET(zp
, ZFS_NODUMP
, xoap
->xoa_nodump
,
1022 XVA_SET_RTN(xvap
, XAT_NODUMP
);
1024 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
)) {
1025 ZFS_ATTR_SET(zp
, ZFS_OPAQUE
, xoap
->xoa_opaque
,
1027 XVA_SET_RTN(xvap
, XAT_OPAQUE
);
1029 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
1030 ZFS_ATTR_SET(zp
, ZFS_AV_QUARANTINED
,
1031 xoap
->xoa_av_quarantined
, zp
->z_pflags
, tx
);
1032 XVA_SET_RTN(xvap
, XAT_AV_QUARANTINED
);
1034 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
1035 ZFS_ATTR_SET(zp
, ZFS_AV_MODIFIED
, xoap
->xoa_av_modified
,
1037 XVA_SET_RTN(xvap
, XAT_AV_MODIFIED
);
1039 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
)) {
1040 zfs_sa_set_scanstamp(zp
, xvap
, tx
);
1041 XVA_SET_RTN(xvap
, XAT_AV_SCANSTAMP
);
1043 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
1044 ZFS_ATTR_SET(zp
, ZFS_REPARSE
, xoap
->xoa_reparse
,
1046 XVA_SET_RTN(xvap
, XAT_REPARSE
);
1048 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
)) {
1049 ZFS_ATTR_SET(zp
, ZFS_OFFLINE
, xoap
->xoa_offline
,
1051 XVA_SET_RTN(xvap
, XAT_OFFLINE
);
1053 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
)) {
1054 ZFS_ATTR_SET(zp
, ZFS_SPARSE
, xoap
->xoa_sparse
,
1056 XVA_SET_RTN(xvap
, XAT_SPARSE
);
1058 if (XVA_ISSET_REQ(xvap
, XAT_PROJINHERIT
)) {
1059 ZFS_ATTR_SET(zp
, ZFS_PROJINHERIT
, xoap
->xoa_projinherit
,
1061 XVA_SET_RTN(xvap
, XAT_PROJINHERIT
);
1065 zfs_set_inode_flags(zp
, ZTOI(zp
));
1069 zfs_zget(zfsvfs_t
*zfsvfs
, uint64_t obj_num
, znode_t
**zpp
)
1071 dmu_object_info_t doi
;
1081 zh
= zfs_znode_hold_enter(zfsvfs
, obj_num
);
1083 err
= sa_buf_hold(zfsvfs
->z_os
, obj_num
, NULL
, &db
);
1085 zfs_znode_hold_exit(zfsvfs
, zh
);
1089 dmu_object_info_from_db(db
, &doi
);
1090 if (doi
.doi_bonus_type
!= DMU_OT_SA
&&
1091 (doi
.doi_bonus_type
!= DMU_OT_ZNODE
||
1092 (doi
.doi_bonus_type
== DMU_OT_ZNODE
&&
1093 doi
.doi_bonus_size
< sizeof (znode_phys_t
)))) {
1094 sa_buf_rele(db
, NULL
);
1095 zfs_znode_hold_exit(zfsvfs
, zh
);
1096 return (SET_ERROR(EINVAL
));
1099 hdl
= dmu_buf_get_user(db
);
1101 zp
= sa_get_userdata(hdl
);
1105 * Since "SA" does immediate eviction we
1106 * should never find a sa handle that doesn't
1107 * know about the znode.
1110 ASSERT3P(zp
, !=, NULL
);
1112 mutex_enter(&zp
->z_lock
);
1113 ASSERT3U(zp
->z_id
, ==, obj_num
);
1115 * If zp->z_unlinked is set, the znode is already marked
1116 * for deletion and should not be discovered. Check this
1117 * after checking igrab() due to fsetxattr() & O_TMPFILE.
1119 * If igrab() returns NULL the VFS has independently
1120 * determined the inode should be evicted and has
1121 * called iput_final() to start the eviction process.
1122 * The SA handle is still valid but because the VFS
1123 * requires that the eviction succeed we must drop
1124 * our locks and references to allow the eviction to
1125 * complete. The zfs_zget() may then be retried.
1127 * This unlikely case could be optimized by registering
1128 * a sops->drop_inode() callback. The callback would
1129 * need to detect the active SA hold thereby informing
1130 * the VFS that this inode should not be evicted.
1132 if (igrab(ZTOI(zp
)) == NULL
) {
1134 err
= SET_ERROR(ENOENT
);
1136 err
= SET_ERROR(EAGAIN
);
1142 mutex_exit(&zp
->z_lock
);
1143 sa_buf_rele(db
, NULL
);
1144 zfs_znode_hold_exit(zfsvfs
, zh
);
1146 if (err
== EAGAIN
) {
1147 /* inode might need this to finish evict */
1155 * Not found create new znode/vnode but only if file exists.
1157 * There is a small window where zfs_vget() could
1158 * find this object while a file create is still in
1159 * progress. This is checked for in zfs_znode_alloc()
1161 * if zfs_znode_alloc() fails it will drop the hold on the
1164 zp
= zfs_znode_alloc(zfsvfs
, db
, doi
.doi_data_block_size
,
1165 doi
.doi_bonus_type
, NULL
);
1167 err
= SET_ERROR(ENOENT
);
1171 zfs_znode_hold_exit(zfsvfs
, zh
);
1176 zfs_rezget(znode_t
*zp
)
1178 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1179 dmu_object_info_t doi
;
1181 uint64_t obj_num
= zp
->z_id
;
1184 sa_bulk_attr_t bulk
[11];
1188 uint64_t z_uid
, z_gid
;
1189 uint64_t atime
[2], mtime
[2], ctime
[2], btime
[2];
1190 uint64_t projid
= ZFS_DEFAULT_PROJID
;
1194 * skip ctldir, otherwise they will always get invalidated. This will
1195 * cause funny behaviour for the mounted snapdirs. Especially for
1196 * Linux >= 3.18, d_invalidate will detach the mountpoint and prevent
1197 * anyone automount it again as long as someone is still using the
1200 if (zp
->z_is_ctldir
)
1203 zh
= zfs_znode_hold_enter(zfsvfs
, obj_num
);
1205 mutex_enter(&zp
->z_acl_lock
);
1206 if (zp
->z_acl_cached
) {
1207 zfs_acl_free(zp
->z_acl_cached
);
1208 zp
->z_acl_cached
= NULL
;
1210 mutex_exit(&zp
->z_acl_lock
);
1212 rw_enter(&zp
->z_xattr_lock
, RW_WRITER
);
1213 if (zp
->z_xattr_cached
) {
1214 nvlist_free(zp
->z_xattr_cached
);
1215 zp
->z_xattr_cached
= NULL
;
1217 rw_exit(&zp
->z_xattr_lock
);
1219 ASSERT(zp
->z_sa_hdl
== NULL
);
1220 err
= sa_buf_hold(zfsvfs
->z_os
, obj_num
, NULL
, &db
);
1222 zfs_znode_hold_exit(zfsvfs
, zh
);
1226 dmu_object_info_from_db(db
, &doi
);
1227 if (doi
.doi_bonus_type
!= DMU_OT_SA
&&
1228 (doi
.doi_bonus_type
!= DMU_OT_ZNODE
||
1229 (doi
.doi_bonus_type
== DMU_OT_ZNODE
&&
1230 doi
.doi_bonus_size
< sizeof (znode_phys_t
)))) {
1231 sa_buf_rele(db
, NULL
);
1232 zfs_znode_hold_exit(zfsvfs
, zh
);
1233 return (SET_ERROR(EINVAL
));
1236 zfs_znode_sa_init(zfsvfs
, zp
, db
, doi
.doi_bonus_type
, NULL
);
1238 /* reload cached values */
1239 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GEN(zfsvfs
), NULL
,
1240 &gen
, sizeof (gen
));
1241 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
1242 &zp
->z_size
, sizeof (zp
->z_size
));
1243 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_LINKS(zfsvfs
), NULL
,
1244 &links
, sizeof (links
));
1245 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
1246 &zp
->z_pflags
, sizeof (zp
->z_pflags
));
1247 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
1248 &z_uid
, sizeof (z_uid
));
1249 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
,
1250 &z_gid
, sizeof (z_gid
));
1251 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
1252 &mode
, sizeof (mode
));
1253 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
1255 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
1257 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
1259 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CRTIME(zfsvfs
), NULL
, &btime
, 16);
1261 if (sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
)) {
1262 zfs_znode_dmu_fini(zp
);
1263 zfs_znode_hold_exit(zfsvfs
, zh
);
1264 return (SET_ERROR(EIO
));
1267 if (dmu_objset_projectquota_enabled(zfsvfs
->z_os
)) {
1268 err
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PROJID(zfsvfs
),
1270 if (err
!= 0 && err
!= ENOENT
) {
1271 zfs_znode_dmu_fini(zp
);
1272 zfs_znode_hold_exit(zfsvfs
, zh
);
1273 return (SET_ERROR(err
));
1277 zp
->z_projid
= projid
;
1278 zp
->z_mode
= ZTOI(zp
)->i_mode
= mode
;
1279 zfs_uid_write(ZTOI(zp
), z_uid
);
1280 zfs_gid_write(ZTOI(zp
), z_gid
);
1282 ZFS_TIME_DECODE(&ZTOI(zp
)->i_atime
, atime
);
1283 ZFS_TIME_DECODE(&ZTOI(zp
)->i_mtime
, mtime
);
1284 ZFS_TIME_DECODE(&ZTOI(zp
)->i_ctime
, ctime
);
1285 ZFS_TIME_DECODE(&zp
->z_btime
, btime
);
1287 if ((uint32_t)gen
!= ZTOI(zp
)->i_generation
) {
1288 zfs_znode_dmu_fini(zp
);
1289 zfs_znode_hold_exit(zfsvfs
, zh
);
1290 return (SET_ERROR(EIO
));
1293 set_nlink(ZTOI(zp
), (uint32_t)links
);
1294 zfs_set_inode_flags(zp
, ZTOI(zp
));
1296 zp
->z_blksz
= doi
.doi_data_block_size
;
1297 zp
->z_atime_dirty
= B_FALSE
;
1298 zfs_znode_update_vfs(zp
);
1301 * If the file has zero links, then it has been unlinked on the send
1302 * side and it must be in the received unlinked set.
1303 * We call zfs_znode_dmu_fini() now to prevent any accesses to the
1304 * stale data and to prevent automatic removal of the file in
1305 * zfs_zinactive(). The file will be removed either when it is removed
1306 * on the send side and the next incremental stream is received or
1307 * when the unlinked set gets processed.
1309 zp
->z_unlinked
= (ZTOI(zp
)->i_nlink
== 0);
1311 zfs_znode_dmu_fini(zp
);
1313 zfs_znode_hold_exit(zfsvfs
, zh
);
1319 zfs_znode_delete(znode_t
*zp
, dmu_tx_t
*tx
)
1321 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1322 objset_t
*os
= zfsvfs
->z_os
;
1323 uint64_t obj
= zp
->z_id
;
1324 uint64_t acl_obj
= zfs_external_acl(zp
);
1327 zh
= zfs_znode_hold_enter(zfsvfs
, obj
);
1329 VERIFY(!zp
->z_is_sa
);
1330 VERIFY(0 == dmu_object_free(os
, acl_obj
, tx
));
1332 VERIFY(0 == dmu_object_free(os
, obj
, tx
));
1333 zfs_znode_dmu_fini(zp
);
1334 zfs_znode_hold_exit(zfsvfs
, zh
);
1338 zfs_zinactive(znode_t
*zp
)
1340 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1341 uint64_t z_id
= zp
->z_id
;
1344 ASSERT(zp
->z_sa_hdl
);
1347 * Don't allow a zfs_zget() while were trying to release this znode.
1349 zh
= zfs_znode_hold_enter(zfsvfs
, z_id
);
1351 mutex_enter(&zp
->z_lock
);
1354 * If this was the last reference to a file with no links, remove
1355 * the file from the file system unless the file system is mounted
1356 * read-only. That can happen, for example, if the file system was
1357 * originally read-write, the file was opened, then unlinked and
1358 * the file system was made read-only before the file was finally
1359 * closed. The file will remain in the unlinked set.
1361 if (zp
->z_unlinked
) {
1362 ASSERT(!zfsvfs
->z_issnap
);
1363 if (!zfs_is_readonly(zfsvfs
) && !zfs_unlink_suspend_progress
) {
1364 mutex_exit(&zp
->z_lock
);
1365 zfs_znode_hold_exit(zfsvfs
, zh
);
1371 mutex_exit(&zp
->z_lock
);
1372 zfs_znode_dmu_fini(zp
);
1374 zfs_znode_hold_exit(zfsvfs
, zh
);
1377 #if defined(HAVE_INODE_TIMESPEC64_TIMES)
1378 #define zfs_compare_timespec timespec64_compare
1380 #define zfs_compare_timespec timespec_compare
1384 * Determine whether the znode's atime must be updated. The logic mostly
1385 * duplicates the Linux kernel's relatime_need_update() functionality.
1386 * This function is only called if the underlying filesystem actually has
1387 * atime updates enabled.
1390 zfs_relatime_need_update(const struct inode
*ip
)
1392 inode_timespec_t now
;
1396 * In relatime mode, only update the atime if the previous atime
1397 * is earlier than either the ctime or mtime or if at least a day
1398 * has passed since the last update of atime.
1400 if (zfs_compare_timespec(&ip
->i_mtime
, &ip
->i_atime
) >= 0)
1403 if (zfs_compare_timespec(&ip
->i_ctime
, &ip
->i_atime
) >= 0)
1406 if ((hrtime_t
)now
.tv_sec
- (hrtime_t
)ip
->i_atime
.tv_sec
>= 24*60*60)
1413 * Prepare to update znode time stamps.
1415 * IN: zp - znode requiring timestamp update
1416 * flag - ATTR_MTIME, ATTR_CTIME flags
1422 * Note: We don't update atime here, because we rely on Linux VFS to do
1426 zfs_tstamp_update_setup(znode_t
*zp
, uint_t flag
, uint64_t mtime
[2],
1429 inode_timespec_t now
;
1435 if (flag
& ATTR_MTIME
) {
1436 ZFS_TIME_ENCODE(&now
, mtime
);
1437 ZFS_TIME_DECODE(&(ZTOI(zp
)->i_mtime
), mtime
);
1438 if (ZTOZSB(zp
)->z_use_fuids
) {
1439 zp
->z_pflags
|= (ZFS_ARCHIVE
|
1444 if (flag
& ATTR_CTIME
) {
1445 ZFS_TIME_ENCODE(&now
, ctime
);
1446 ZFS_TIME_DECODE(&(ZTOI(zp
)->i_ctime
), ctime
);
1447 if (ZTOZSB(zp
)->z_use_fuids
)
1448 zp
->z_pflags
|= ZFS_ARCHIVE
;
1453 * Grow the block size for a file.
1455 * IN: zp - znode of file to free data in.
1456 * size - requested block size
1457 * tx - open transaction.
1459 * NOTE: this function assumes that the znode is write locked.
1462 zfs_grow_blocksize(znode_t
*zp
, uint64_t size
, dmu_tx_t
*tx
)
1467 if (size
<= zp
->z_blksz
)
1470 * If the file size is already greater than the current blocksize,
1471 * we will not grow. If there is more than one block in a file,
1472 * the blocksize cannot change.
1474 if (zp
->z_blksz
&& zp
->z_size
> zp
->z_blksz
)
1477 error
= dmu_object_set_blocksize(ZTOZSB(zp
)->z_os
, zp
->z_id
,
1480 if (error
== ENOTSUP
)
1484 /* What blocksize did we actually get? */
1485 dmu_object_size_from_db(sa_get_db(zp
->z_sa_hdl
), &zp
->z_blksz
, &dummy
);
1489 * Increase the file length
1491 * IN: zp - znode of file to free data in.
1492 * end - new end-of-file
1494 * RETURN: 0 on success, error code on failure
1497 zfs_extend(znode_t
*zp
, uint64_t end
)
1499 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1501 zfs_locked_range_t
*lr
;
1506 * We will change zp_size, lock the whole file.
1508 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, 0, UINT64_MAX
, RL_WRITER
);
1511 * Nothing to do if file already at desired length.
1513 if (end
<= zp
->z_size
) {
1514 zfs_rangelock_exit(lr
);
1517 tx
= dmu_tx_create(zfsvfs
->z_os
);
1518 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1519 zfs_sa_upgrade_txholds(tx
, zp
);
1520 if (end
> zp
->z_blksz
&&
1521 (!ISP2(zp
->z_blksz
) || zp
->z_blksz
< zfsvfs
->z_max_blksz
)) {
1523 * We are growing the file past the current block size.
1525 if (zp
->z_blksz
> ZTOZSB(zp
)->z_max_blksz
) {
1527 * File's blocksize is already larger than the
1528 * "recordsize" property. Only let it grow to
1529 * the next power of 2.
1531 ASSERT(!ISP2(zp
->z_blksz
));
1532 newblksz
= MIN(end
, 1 << highbit64(zp
->z_blksz
));
1534 newblksz
= MIN(end
, ZTOZSB(zp
)->z_max_blksz
);
1536 dmu_tx_hold_write(tx
, zp
->z_id
, 0, newblksz
);
1541 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1544 zfs_rangelock_exit(lr
);
1549 zfs_grow_blocksize(zp
, newblksz
, tx
);
1553 VERIFY(0 == sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(ZTOZSB(zp
)),
1554 &zp
->z_size
, sizeof (zp
->z_size
), tx
));
1556 zfs_rangelock_exit(lr
);
1564 * zfs_zero_partial_page - Modeled after update_pages() but
1565 * with different arguments and semantics for use by zfs_freesp().
1567 * Zeroes a piece of a single page cache entry for zp at offset
1568 * start and length len.
1570 * Caller must acquire a range lock on the file for the region
1571 * being zeroed in order that the ARC and page cache stay in sync.
1574 zfs_zero_partial_page(znode_t
*zp
, uint64_t start
, uint64_t len
)
1576 struct address_space
*mp
= ZTOI(zp
)->i_mapping
;
1581 ASSERT((start
& PAGE_MASK
) == ((start
+ len
- 1) & PAGE_MASK
));
1583 off
= start
& (PAGE_SIZE
- 1);
1586 pp
= find_lock_page(mp
, start
>> PAGE_SHIFT
);
1588 if (mapping_writably_mapped(mp
))
1589 flush_dcache_page(pp
);
1592 memset(pb
+ off
, 0, len
);
1595 if (mapping_writably_mapped(mp
))
1596 flush_dcache_page(pp
);
1598 mark_page_accessed(pp
);
1599 SetPageUptodate(pp
);
1607 * Free space in a file.
1609 * IN: zp - znode of file to free data in.
1610 * off - start of section to free.
1611 * len - length of section to free.
1613 * RETURN: 0 on success, error code on failure
1616 zfs_free_range(znode_t
*zp
, uint64_t off
, uint64_t len
)
1618 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1619 zfs_locked_range_t
*lr
;
1623 * Lock the range being freed.
1625 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, off
, len
, RL_WRITER
);
1628 * Nothing to do if file already at desired length.
1630 if (off
>= zp
->z_size
) {
1631 zfs_rangelock_exit(lr
);
1635 if (off
+ len
> zp
->z_size
)
1636 len
= zp
->z_size
- off
;
1638 error
= dmu_free_long_range(zfsvfs
->z_os
, zp
->z_id
, off
, len
);
1641 * Zero partial page cache entries. This must be done under a
1642 * range lock in order to keep the ARC and page cache in sync.
1644 if (zp
->z_is_mapped
) {
1645 loff_t first_page
, last_page
, page_len
;
1646 loff_t first_page_offset
, last_page_offset
;
1648 /* first possible full page in hole */
1649 first_page
= (off
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1650 /* last page of hole */
1651 last_page
= (off
+ len
) >> PAGE_SHIFT
;
1653 /* offset of first_page */
1654 first_page_offset
= first_page
<< PAGE_SHIFT
;
1655 /* offset of last_page */
1656 last_page_offset
= last_page
<< PAGE_SHIFT
;
1658 /* truncate whole pages */
1659 if (last_page_offset
> first_page_offset
) {
1660 truncate_inode_pages_range(ZTOI(zp
)->i_mapping
,
1661 first_page_offset
, last_page_offset
- 1);
1664 /* truncate sub-page ranges */
1665 if (first_page
> last_page
) {
1666 /* entire punched area within a single page */
1667 zfs_zero_partial_page(zp
, off
, len
);
1669 /* beginning of punched area at the end of a page */
1670 page_len
= first_page_offset
- off
;
1672 zfs_zero_partial_page(zp
, off
, page_len
);
1674 /* end of punched area at the beginning of a page */
1675 page_len
= off
+ len
- last_page_offset
;
1677 zfs_zero_partial_page(zp
, last_page_offset
,
1681 zfs_rangelock_exit(lr
);
1689 * IN: zp - znode of file to free data in.
1690 * end - new end-of-file.
1692 * RETURN: 0 on success, error code on failure
1695 zfs_trunc(znode_t
*zp
, uint64_t end
)
1697 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1699 zfs_locked_range_t
*lr
;
1701 sa_bulk_attr_t bulk
[2];
1705 * We will change zp_size, lock the whole file.
1707 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, 0, UINT64_MAX
, RL_WRITER
);
1710 * Nothing to do if file already at desired length.
1712 if (end
>= zp
->z_size
) {
1713 zfs_rangelock_exit(lr
);
1717 error
= dmu_free_long_range(zfsvfs
->z_os
, zp
->z_id
, end
,
1720 zfs_rangelock_exit(lr
);
1723 tx
= dmu_tx_create(zfsvfs
->z_os
);
1724 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1725 zfs_sa_upgrade_txholds(tx
, zp
);
1726 dmu_tx_mark_netfree(tx
);
1727 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1730 zfs_rangelock_exit(lr
);
1735 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
),
1736 NULL
, &zp
->z_size
, sizeof (zp
->z_size
));
1739 zp
->z_pflags
&= ~ZFS_SPARSE
;
1740 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
),
1741 NULL
, &zp
->z_pflags
, 8);
1743 VERIFY(sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
) == 0);
1746 zfs_rangelock_exit(lr
);
1752 * Free space in a file
1754 * IN: zp - znode of file to free data in.
1755 * off - start of range
1756 * len - end of range (0 => EOF)
1757 * flag - current file open mode flags.
1758 * log - TRUE if this action should be logged
1760 * RETURN: 0 on success, error code on failure
1763 zfs_freesp(znode_t
*zp
, uint64_t off
, uint64_t len
, int flag
, boolean_t log
)
1766 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1767 zilog_t
*zilog
= zfsvfs
->z_log
;
1769 uint64_t mtime
[2], ctime
[2];
1770 sa_bulk_attr_t bulk
[3];
1774 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_MODE(zfsvfs
), &mode
,
1775 sizeof (mode
))) != 0)
1778 if (off
> zp
->z_size
) {
1779 error
= zfs_extend(zp
, off
+len
);
1780 if (error
== 0 && log
)
1786 error
= zfs_trunc(zp
, off
);
1788 if ((error
= zfs_free_range(zp
, off
, len
)) == 0 &&
1789 off
+ len
> zp
->z_size
)
1790 error
= zfs_extend(zp
, off
+len
);
1795 tx
= dmu_tx_create(zfsvfs
->z_os
);
1796 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1797 zfs_sa_upgrade_txholds(tx
, zp
);
1798 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1804 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, mtime
, 16);
1805 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, ctime
, 16);
1806 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
),
1807 NULL
, &zp
->z_pflags
, 8);
1808 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
);
1809 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
1812 zfs_log_truncate(zilog
, tx
, TX_TRUNCATE
, zp
, off
, len
);
1816 zfs_znode_update_vfs(zp
);
1821 * Truncate the page cache - for file truncate operations, use
1822 * the purpose-built API for truncations. For punching operations,
1823 * the truncation is handled under a range lock in zfs_free_range.
1826 truncate_setsize(ZTOI(zp
), off
);
1831 zfs_create_fs(objset_t
*os
, cred_t
*cr
, nvlist_t
*zplprops
, dmu_tx_t
*tx
)
1833 struct super_block
*sb
;
1835 uint64_t moid
, obj
, sa_obj
, version
;
1836 uint64_t sense
= ZFS_CASE_SENSITIVE
;
1842 znode_t
*rootzp
= NULL
;
1845 zfs_acl_ids_t acl_ids
;
1848 * First attempt to create master node.
1851 * In an empty objset, there are no blocks to read and thus
1852 * there can be no i/o errors (which we assert below).
1854 moid
= MASTER_NODE_OBJ
;
1855 error
= zap_create_claim(os
, moid
, DMU_OT_MASTER_NODE
,
1856 DMU_OT_NONE
, 0, tx
);
1860 * Set starting attributes.
1862 version
= zfs_zpl_version_map(spa_version(dmu_objset_spa(os
)));
1864 while ((elem
= nvlist_next_nvpair(zplprops
, elem
)) != NULL
) {
1865 /* For the moment we expect all zpl props to be uint64_ts */
1869 ASSERT(nvpair_type(elem
) == DATA_TYPE_UINT64
);
1870 VERIFY(nvpair_value_uint64(elem
, &val
) == 0);
1871 name
= nvpair_name(elem
);
1872 if (strcmp(name
, zfs_prop_to_name(ZFS_PROP_VERSION
)) == 0) {
1876 error
= zap_update(os
, moid
, name
, 8, 1, &val
, tx
);
1879 if (strcmp(name
, zfs_prop_to_name(ZFS_PROP_NORMALIZE
)) == 0)
1881 else if (strcmp(name
, zfs_prop_to_name(ZFS_PROP_CASE
)) == 0)
1884 ASSERT(version
!= 0);
1885 error
= zap_update(os
, moid
, ZPL_VERSION_STR
, 8, 1, &version
, tx
);
1888 * Create zap object used for SA attribute registration
1891 if (version
>= ZPL_VERSION_SA
) {
1892 sa_obj
= zap_create(os
, DMU_OT_SA_MASTER_NODE
,
1893 DMU_OT_NONE
, 0, tx
);
1894 error
= zap_add(os
, moid
, ZFS_SA_ATTRS
, 8, 1, &sa_obj
, tx
);
1900 * Create a delete queue.
1902 obj
= zap_create(os
, DMU_OT_UNLINKED_SET
, DMU_OT_NONE
, 0, tx
);
1904 error
= zap_add(os
, moid
, ZFS_UNLINKED_SET
, 8, 1, &obj
, tx
);
1908 * Create root znode. Create minimal znode/inode/zfsvfs/sb
1909 * to allow zfs_mknode to work.
1911 vattr
.va_mask
= ATTR_MODE
|ATTR_UID
|ATTR_GID
;
1912 vattr
.va_mode
= S_IFDIR
|0755;
1913 vattr
.va_uid
= crgetuid(cr
);
1914 vattr
.va_gid
= crgetgid(cr
);
1916 rootzp
= kmem_cache_alloc(znode_cache
, KM_SLEEP
);
1917 rootzp
->z_unlinked
= B_FALSE
;
1918 rootzp
->z_atime_dirty
= B_FALSE
;
1919 rootzp
->z_is_sa
= USE_SA(version
, os
);
1920 rootzp
->z_pflags
= 0;
1922 zfsvfs
= kmem_zalloc(sizeof (zfsvfs_t
), KM_SLEEP
);
1924 zfsvfs
->z_parent
= zfsvfs
;
1925 zfsvfs
->z_version
= version
;
1926 zfsvfs
->z_use_fuids
= USE_FUIDS(version
, os
);
1927 zfsvfs
->z_use_sa
= USE_SA(version
, os
);
1928 zfsvfs
->z_norm
= norm
;
1930 sb
= kmem_zalloc(sizeof (struct super_block
), KM_SLEEP
);
1931 sb
->s_fs_info
= zfsvfs
;
1933 ZTOI(rootzp
)->i_sb
= sb
;
1935 error
= sa_setup(os
, sa_obj
, zfs_attr_table
, ZPL_END
,
1936 &zfsvfs
->z_attr_table
);
1941 * Fold case on file systems that are always or sometimes case
1944 if (sense
== ZFS_CASE_INSENSITIVE
|| sense
== ZFS_CASE_MIXED
)
1945 zfsvfs
->z_norm
|= U8_TEXTPREP_TOUPPER
;
1947 mutex_init(&zfsvfs
->z_znodes_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1948 list_create(&zfsvfs
->z_all_znodes
, sizeof (znode_t
),
1949 offsetof(znode_t
, z_link_node
));
1951 size
= MIN(1 << (highbit64(zfs_object_mutex_size
)-1), ZFS_OBJ_MTX_MAX
);
1952 zfsvfs
->z_hold_size
= size
;
1953 zfsvfs
->z_hold_trees
= vmem_zalloc(sizeof (avl_tree_t
) * size
,
1955 zfsvfs
->z_hold_locks
= vmem_zalloc(sizeof (kmutex_t
) * size
, KM_SLEEP
);
1956 for (i
= 0; i
!= size
; i
++) {
1957 avl_create(&zfsvfs
->z_hold_trees
[i
], zfs_znode_hold_compare
,
1958 sizeof (znode_hold_t
), offsetof(znode_hold_t
, zh_node
));
1959 mutex_init(&zfsvfs
->z_hold_locks
[i
], NULL
, MUTEX_DEFAULT
, NULL
);
1962 VERIFY(0 == zfs_acl_ids_create(rootzp
, IS_ROOT_NODE
, &vattr
,
1963 cr
, NULL
, &acl_ids
));
1964 zfs_mknode(rootzp
, &vattr
, tx
, cr
, IS_ROOT_NODE
, &zp
, &acl_ids
);
1965 ASSERT3P(zp
, ==, rootzp
);
1966 error
= zap_add(os
, moid
, ZFS_ROOT_OBJ
, 8, 1, &rootzp
->z_id
, tx
);
1968 zfs_acl_ids_free(&acl_ids
);
1970 atomic_set(&ZTOI(rootzp
)->i_count
, 0);
1971 sa_handle_destroy(rootzp
->z_sa_hdl
);
1972 kmem_cache_free(znode_cache
, rootzp
);
1974 for (i
= 0; i
!= size
; i
++) {
1975 avl_destroy(&zfsvfs
->z_hold_trees
[i
]);
1976 mutex_destroy(&zfsvfs
->z_hold_locks
[i
]);
1979 mutex_destroy(&zfsvfs
->z_znodes_lock
);
1981 vmem_free(zfsvfs
->z_hold_trees
, sizeof (avl_tree_t
) * size
);
1982 vmem_free(zfsvfs
->z_hold_locks
, sizeof (kmutex_t
) * size
);
1983 kmem_free(sb
, sizeof (struct super_block
));
1984 kmem_free(zfsvfs
, sizeof (zfsvfs_t
));
1986 #endif /* _KERNEL */
1989 zfs_sa_setup(objset_t
*osp
, sa_attr_type_t
**sa_table
)
1991 uint64_t sa_obj
= 0;
1994 error
= zap_lookup(osp
, MASTER_NODE_OBJ
, ZFS_SA_ATTRS
, 8, 1, &sa_obj
);
1995 if (error
!= 0 && error
!= ENOENT
)
1998 error
= sa_setup(osp
, sa_obj
, zfs_attr_table
, ZPL_END
, sa_table
);
2003 zfs_grab_sa_handle(objset_t
*osp
, uint64_t obj
, sa_handle_t
**hdlp
,
2004 dmu_buf_t
**db
, const void *tag
)
2006 dmu_object_info_t doi
;
2009 if ((error
= sa_buf_hold(osp
, obj
, tag
, db
)) != 0)
2012 dmu_object_info_from_db(*db
, &doi
);
2013 if ((doi
.doi_bonus_type
!= DMU_OT_SA
&&
2014 doi
.doi_bonus_type
!= DMU_OT_ZNODE
) ||
2015 (doi
.doi_bonus_type
== DMU_OT_ZNODE
&&
2016 doi
.doi_bonus_size
< sizeof (znode_phys_t
))) {
2017 sa_buf_rele(*db
, tag
);
2018 return (SET_ERROR(ENOTSUP
));
2021 error
= sa_handle_get(osp
, obj
, NULL
, SA_HDL_PRIVATE
, hdlp
);
2023 sa_buf_rele(*db
, tag
);
2031 zfs_release_sa_handle(sa_handle_t
*hdl
, dmu_buf_t
*db
, const void *tag
)
2033 sa_handle_destroy(hdl
);
2034 sa_buf_rele(db
, tag
);
2038 * Given an object number, return its parent object number and whether
2039 * or not the object is an extended attribute directory.
2042 zfs_obj_to_pobj(objset_t
*osp
, sa_handle_t
*hdl
, sa_attr_type_t
*sa_table
,
2043 uint64_t *pobjp
, int *is_xattrdir
)
2048 uint64_t parent_mode
;
2049 sa_bulk_attr_t bulk
[3];
2050 sa_handle_t
*sa_hdl
;
2055 SA_ADD_BULK_ATTR(bulk
, count
, sa_table
[ZPL_PARENT
], NULL
,
2056 &parent
, sizeof (parent
));
2057 SA_ADD_BULK_ATTR(bulk
, count
, sa_table
[ZPL_FLAGS
], NULL
,
2058 &pflags
, sizeof (pflags
));
2059 SA_ADD_BULK_ATTR(bulk
, count
, sa_table
[ZPL_MODE
], NULL
,
2060 &mode
, sizeof (mode
));
2062 if ((error
= sa_bulk_lookup(hdl
, bulk
, count
)) != 0)
2066 * When a link is removed its parent pointer is not changed and will
2067 * be invalid. There are two cases where a link is removed but the
2068 * file stays around, when it goes to the delete queue and when there
2069 * are additional links.
2071 error
= zfs_grab_sa_handle(osp
, parent
, &sa_hdl
, &sa_db
, FTAG
);
2075 error
= sa_lookup(sa_hdl
, ZPL_MODE
, &parent_mode
, sizeof (parent_mode
));
2076 zfs_release_sa_handle(sa_hdl
, sa_db
, FTAG
);
2080 *is_xattrdir
= ((pflags
& ZFS_XATTR
) != 0) && S_ISDIR(mode
);
2083 * Extended attributes can be applied to files, directories, etc.
2084 * Otherwise the parent must be a directory.
2086 if (!*is_xattrdir
&& !S_ISDIR(parent_mode
))
2087 return (SET_ERROR(EINVAL
));
2095 * Given an object number, return some zpl level statistics
2098 zfs_obj_to_stats_impl(sa_handle_t
*hdl
, sa_attr_type_t
*sa_table
,
2101 sa_bulk_attr_t bulk
[4];
2104 SA_ADD_BULK_ATTR(bulk
, count
, sa_table
[ZPL_MODE
], NULL
,
2105 &sb
->zs_mode
, sizeof (sb
->zs_mode
));
2106 SA_ADD_BULK_ATTR(bulk
, count
, sa_table
[ZPL_GEN
], NULL
,
2107 &sb
->zs_gen
, sizeof (sb
->zs_gen
));
2108 SA_ADD_BULK_ATTR(bulk
, count
, sa_table
[ZPL_LINKS
], NULL
,
2109 &sb
->zs_links
, sizeof (sb
->zs_links
));
2110 SA_ADD_BULK_ATTR(bulk
, count
, sa_table
[ZPL_CTIME
], NULL
,
2111 &sb
->zs_ctime
, sizeof (sb
->zs_ctime
));
2113 return (sa_bulk_lookup(hdl
, bulk
, count
));
2117 zfs_obj_to_path_impl(objset_t
*osp
, uint64_t obj
, sa_handle_t
*hdl
,
2118 sa_attr_type_t
*sa_table
, char *buf
, int len
)
2120 sa_handle_t
*sa_hdl
;
2121 sa_handle_t
*prevhdl
= NULL
;
2122 dmu_buf_t
*prevdb
= NULL
;
2123 dmu_buf_t
*sa_db
= NULL
;
2124 char *path
= buf
+ len
- 1;
2130 uint64_t deleteq_obj
;
2131 VERIFY0(zap_lookup(osp
, MASTER_NODE_OBJ
,
2132 ZFS_UNLINKED_SET
, sizeof (uint64_t), 1, &deleteq_obj
));
2133 error
= zap_lookup_int(osp
, deleteq_obj
, obj
);
2136 } else if (error
!= ENOENT
) {
2143 char component
[MAXNAMELEN
+ 2];
2145 int is_xattrdir
= 0;
2148 ASSERT(prevhdl
!= NULL
);
2149 zfs_release_sa_handle(prevhdl
, prevdb
, FTAG
);
2152 if ((error
= zfs_obj_to_pobj(osp
, sa_hdl
, sa_table
, &pobj
,
2153 &is_xattrdir
)) != 0)
2164 strcpy(component
+ 1, "<xattrdir>");
2166 error
= zap_value_search(osp
, pobj
, obj
,
2167 ZFS_DIRENT_OBJ(-1ULL), component
+ 1);
2172 complen
= strlen(component
);
2174 ASSERT(path
>= buf
);
2175 memcpy(path
, component
, complen
);
2178 if (sa_hdl
!= hdl
) {
2182 error
= zfs_grab_sa_handle(osp
, obj
, &sa_hdl
, &sa_db
, FTAG
);
2190 if (sa_hdl
!= NULL
&& sa_hdl
!= hdl
) {
2191 ASSERT(sa_db
!= NULL
);
2192 zfs_release_sa_handle(sa_hdl
, sa_db
, FTAG
);
2196 (void) memmove(buf
, path
, buf
+ len
- path
);
2202 zfs_obj_to_path(objset_t
*osp
, uint64_t obj
, char *buf
, int len
)
2204 sa_attr_type_t
*sa_table
;
2209 error
= zfs_sa_setup(osp
, &sa_table
);
2213 error
= zfs_grab_sa_handle(osp
, obj
, &hdl
, &db
, FTAG
);
2217 error
= zfs_obj_to_path_impl(osp
, obj
, hdl
, sa_table
, buf
, len
);
2219 zfs_release_sa_handle(hdl
, db
, FTAG
);
2224 zfs_obj_to_stats(objset_t
*osp
, uint64_t obj
, zfs_stat_t
*sb
,
2227 char *path
= buf
+ len
- 1;
2228 sa_attr_type_t
*sa_table
;
2235 error
= zfs_sa_setup(osp
, &sa_table
);
2239 error
= zfs_grab_sa_handle(osp
, obj
, &hdl
, &db
, FTAG
);
2243 error
= zfs_obj_to_stats_impl(hdl
, sa_table
, sb
);
2245 zfs_release_sa_handle(hdl
, db
, FTAG
);
2249 error
= zfs_obj_to_path_impl(osp
, obj
, hdl
, sa_table
, buf
, len
);
2251 zfs_release_sa_handle(hdl
, db
, FTAG
);
2255 #if defined(_KERNEL)
2256 EXPORT_SYMBOL(zfs_create_fs
);
2257 EXPORT_SYMBOL(zfs_obj_to_path
);
2260 module_param(zfs_object_mutex_size
, uint
, 0644);
2261 MODULE_PARM_DESC(zfs_object_mutex_size
, "Size of znode hold array");
2262 module_param(zfs_unlink_suspend_progress
, int, 0644);
2263 MODULE_PARM_DESC(zfs_unlink_suspend_progress
, "Set to prevent async unlinks "
2264 "(debug - leaks space into the unlinked set)");