4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
26 #include <sys/zfs_znode.h>
27 #include <sys/zfs_vfsops.h>
28 #include <sys/zfs_vnops.h>
29 #include <sys/zfs_ctldir.h>
34 zpl_inode_alloc(struct super_block
*sb
)
38 VERIFY3S(zfs_inode_alloc(sb
, &ip
), ==, 0);
39 inode_set_iversion(ip
, 1);
45 zpl_inode_destroy(struct inode
*ip
)
47 ASSERT(atomic_read(&ip
->i_count
) == 0);
48 zfs_inode_destroy(ip
);
52 * Called from __mark_inode_dirty() to reflect that something in the
53 * inode has changed. We use it to ensure the znode system attributes
54 * are always strictly update to date with respect to the inode.
56 #ifdef HAVE_DIRTY_INODE_WITH_FLAGS
58 zpl_dirty_inode(struct inode
*ip
, int flags
)
60 fstrans_cookie_t cookie
;
62 cookie
= spl_fstrans_mark();
63 zfs_dirty_inode(ip
, flags
);
64 spl_fstrans_unmark(cookie
);
68 zpl_dirty_inode(struct inode
*ip
)
70 fstrans_cookie_t cookie
;
72 cookie
= spl_fstrans_mark();
73 zfs_dirty_inode(ip
, 0);
74 spl_fstrans_unmark(cookie
);
76 #endif /* HAVE_DIRTY_INODE_WITH_FLAGS */
79 * When ->drop_inode() is called its return value indicates if the
80 * inode should be evicted from the inode cache. If the inode is
81 * unhashed and has no links the default policy is to evict it
84 * The ->evict_inode() callback must minimally truncate the inode pages,
85 * and call clear_inode(). For 2.6.35 and later kernels this will
86 * simply update the inode state, with the sync occurring before the
87 * truncate in evict(). For earlier kernels clear_inode() maps to
88 * end_writeback() which is responsible for completing all outstanding
89 * write back. In either case, once this is done it is safe to cleanup
90 * any remaining inode specific data via zfs_inactive().
91 * remaining filesystem specific data.
94 zpl_evict_inode(struct inode
*ip
)
96 fstrans_cookie_t cookie
;
98 cookie
= spl_fstrans_mark();
99 truncate_setsize(ip
, 0);
102 spl_fstrans_unmark(cookie
);
106 zpl_put_super(struct super_block
*sb
)
108 fstrans_cookie_t cookie
;
111 cookie
= spl_fstrans_mark();
112 error
= -zfs_umount(sb
);
113 spl_fstrans_unmark(cookie
);
114 ASSERT3S(error
, <=, 0);
118 zpl_sync_fs(struct super_block
*sb
, int wait
)
120 fstrans_cookie_t cookie
;
125 cookie
= spl_fstrans_mark();
126 error
= -zfs_sync(sb
, wait
, cr
);
127 spl_fstrans_unmark(cookie
);
129 ASSERT3S(error
, <=, 0);
135 zpl_statfs(struct dentry
*dentry
, struct kstatfs
*statp
)
137 fstrans_cookie_t cookie
;
140 cookie
= spl_fstrans_mark();
141 error
= -zfs_statvfs(dentry
->d_inode
, statp
);
142 spl_fstrans_unmark(cookie
);
143 ASSERT3S(error
, <=, 0);
146 * If required by a 32-bit system call, dynamically scale the
147 * block size up to 16MiB and decrease the block counts. This
148 * allows for a maximum size of 64EiB to be reported. The file
149 * counts must be artificially capped at 2^32-1.
151 if (unlikely(zpl_is_32bit_api())) {
152 while (statp
->f_blocks
> UINT32_MAX
&&
153 statp
->f_bsize
< SPA_MAXBLOCKSIZE
) {
154 statp
->f_frsize
<<= 1;
155 statp
->f_bsize
<<= 1;
157 statp
->f_blocks
>>= 1;
158 statp
->f_bfree
>>= 1;
159 statp
->f_bavail
>>= 1;
162 uint64_t usedobjs
= statp
->f_files
- statp
->f_ffree
;
163 statp
->f_ffree
= MIN(statp
->f_ffree
, UINT32_MAX
- usedobjs
);
164 statp
->f_files
= statp
->f_ffree
+ usedobjs
;
171 zpl_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
173 zfs_mnt_t zm
= { .mnt_osname
= NULL
, .mnt_data
= data
};
174 fstrans_cookie_t cookie
;
177 cookie
= spl_fstrans_mark();
178 error
= -zfs_remount(sb
, flags
, &zm
);
179 spl_fstrans_unmark(cookie
);
180 ASSERT3S(error
, <=, 0);
186 __zpl_show_devname(struct seq_file
*seq
, zfsvfs_t
*zfsvfs
)
190 char *fsname
= kmem_alloc(ZFS_MAX_DATASET_NAME_LEN
, KM_SLEEP
);
191 dmu_objset_name(zfsvfs
->z_os
, fsname
);
193 for (int i
= 0; fsname
[i
] != 0; i
++) {
195 * Spaces in the dataset name must be converted to their
196 * octal escape sequence for getmntent(3) to correctly
197 * parse then fsname portion of /proc/self/mounts.
199 if (fsname
[i
] == ' ') {
200 seq_puts(seq
, "\\040");
202 seq_putc(seq
, fsname
[i
]);
206 kmem_free(fsname
, ZFS_MAX_DATASET_NAME_LEN
);
214 zpl_show_devname(struct seq_file
*seq
, struct dentry
*root
)
216 return (__zpl_show_devname(seq
, root
->d_sb
->s_fs_info
));
220 __zpl_show_options(struct seq_file
*seq
, zfsvfs_t
*zfsvfs
)
222 seq_printf(seq
, ",%s",
223 zfsvfs
->z_flags
& ZSB_XATTR
? "xattr" : "noxattr");
225 #ifdef CONFIG_FS_POSIX_ACL
226 switch (zfsvfs
->z_acl_type
) {
227 case ZFS_ACLTYPE_POSIX
:
228 seq_puts(seq
, ",posixacl");
231 seq_puts(seq
, ",noacl");
234 #endif /* CONFIG_FS_POSIX_ACL */
240 zpl_show_options(struct seq_file
*seq
, struct dentry
*root
)
242 return (__zpl_show_options(seq
, root
->d_sb
->s_fs_info
));
246 zpl_fill_super(struct super_block
*sb
, void *data
, int silent
)
248 zfs_mnt_t
*zm
= (zfs_mnt_t
*)data
;
249 fstrans_cookie_t cookie
;
252 cookie
= spl_fstrans_mark();
253 error
= -zfs_domount(sb
, zm
, silent
);
254 spl_fstrans_unmark(cookie
);
255 ASSERT3S(error
, <=, 0);
261 zpl_test_super(struct super_block
*s
, void *data
)
263 zfsvfs_t
*zfsvfs
= s
->s_fs_info
;
269 return (os
== zfsvfs
->z_os
);
272 static struct super_block
*
273 zpl_mount_impl(struct file_system_type
*fs_type
, int flags
, zfs_mnt_t
*zm
)
275 struct super_block
*s
;
279 err
= dmu_objset_hold(zm
->mnt_osname
, FTAG
, &os
);
281 return (ERR_PTR(-err
));
284 * The dsl pool lock must be released prior to calling sget().
285 * It is possible sget() may block on the lock in grab_super()
286 * while deactivate_super() holds that same lock and waits for
287 * a txg sync. If the dsl_pool lock is held over sget()
288 * this can prevent the pool sync and cause a deadlock.
290 dsl_dataset_long_hold(dmu_objset_ds(os
), FTAG
);
291 dsl_pool_rele(dmu_objset_pool(os
), FTAG
);
293 s
= sget(fs_type
, zpl_test_super
, set_anon_super
, flags
, os
);
295 dsl_dataset_long_rele(dmu_objset_ds(os
), FTAG
);
296 dsl_dataset_rele(dmu_objset_ds(os
), FTAG
);
299 return (ERR_CAST(s
));
301 if (s
->s_root
== NULL
) {
302 err
= zpl_fill_super(s
, zm
, flags
& SB_SILENT
? 1 : 0);
304 deactivate_locked_super(s
);
305 return (ERR_PTR(err
));
307 s
->s_flags
|= SB_ACTIVE
;
308 } else if ((flags
^ s
->s_flags
) & SB_RDONLY
) {
309 deactivate_locked_super(s
);
310 return (ERR_PTR(-EBUSY
));
316 static struct dentry
*
317 zpl_mount(struct file_system_type
*fs_type
, int flags
,
318 const char *osname
, void *data
)
320 zfs_mnt_t zm
= { .mnt_osname
= osname
, .mnt_data
= data
};
322 struct super_block
*sb
= zpl_mount_impl(fs_type
, flags
, &zm
);
324 return (ERR_CAST(sb
));
326 return (dget(sb
->s_root
));
330 zpl_kill_sb(struct super_block
*sb
)
337 zpl_prune_sb(int64_t nr_to_scan
, void *arg
)
339 struct super_block
*sb
= (struct super_block
*)arg
;
342 (void) -zfs_prune(sb
, nr_to_scan
, &objects
);
345 const struct super_operations zpl_super_operations
= {
346 .alloc_inode
= zpl_inode_alloc
,
347 .destroy_inode
= zpl_inode_destroy
,
348 .dirty_inode
= zpl_dirty_inode
,
350 .evict_inode
= zpl_evict_inode
,
351 .put_super
= zpl_put_super
,
352 .sync_fs
= zpl_sync_fs
,
353 .statfs
= zpl_statfs
,
354 .remount_fs
= zpl_remount_fs
,
355 .show_devname
= zpl_show_devname
,
356 .show_options
= zpl_show_options
,
360 struct file_system_type zpl_fs_type
= {
361 .owner
= THIS_MODULE
,
364 .kill_sb
= zpl_kill_sb
,