Allow disabling of unmapped I/O on FreeBSD
[zfs.git] / module / zfs / zfs_sa.c
blob67be131da63bf038fe7dffc8c6a10e03acd7d165
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
26 #include <sys/vnode.h>
27 #include <sys/sa.h>
28 #include <sys/zfs_acl.h>
29 #include <sys/zfs_sa.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/sa_impl.h>
34 * ZPL attribute registration table.
35 * Order of attributes doesn't matter
36 * a unique value will be assigned for each
37 * attribute that is file system specific
39 * This is just the set of ZPL attributes that this
40 * version of ZFS deals with natively. The file system
41 * could have other attributes stored in files, but they will be
42 * ignored. The SA framework will preserve them, just that
43 * this version of ZFS won't change or delete them.
46 sa_attr_reg_t zfs_attr_table[ZPL_END+1] = {
47 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0},
48 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1},
49 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2},
50 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 3},
51 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY, 4},
52 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY, 5},
53 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY, 6},
54 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY, 7},
55 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY, 8},
56 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY, 9},
57 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY, 10},
58 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY, 11},
59 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY, 12},
60 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY, 13},
61 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY, 14},
62 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15},
63 {"ZPL_DACL_COUNT", sizeof (uint64_t), SA_UINT64_ARRAY, 0},
64 {"ZPL_SYMLINK", 0, SA_UINT8_ARRAY, 0},
65 {"ZPL_SCANSTAMP", 32, SA_UINT8_ARRAY, 0},
66 {"ZPL_DACL_ACES", 0, SA_ACL, 0},
67 {"ZPL_DXATTR", 0, SA_UINT8_ARRAY, 0},
68 {"ZPL_PROJID", sizeof (uint64_t), SA_UINT64_ARRAY, 0},
69 {NULL, 0, 0, 0}
72 #ifdef _KERNEL
73 int
74 zfs_sa_readlink(znode_t *zp, zfs_uio_t *uio)
76 dmu_buf_t *db = sa_get_db(zp->z_sa_hdl);
77 size_t bufsz;
78 int error;
80 bufsz = zp->z_size;
81 if (bufsz + ZFS_OLD_ZNODE_PHYS_SIZE <= db->db_size) {
82 error = zfs_uiomove((caddr_t)db->db_data +
83 ZFS_OLD_ZNODE_PHYS_SIZE,
84 MIN((size_t)bufsz, zfs_uio_resid(uio)), UIO_READ, uio);
85 } else {
86 dmu_buf_t *dbp;
87 if ((error = dmu_buf_hold(ZTOZSB(zp)->z_os, zp->z_id,
88 0, FTAG, &dbp, DMU_READ_NO_PREFETCH)) == 0) {
89 error = zfs_uiomove(dbp->db_data,
90 MIN((size_t)bufsz, zfs_uio_resid(uio)), UIO_READ,
91 uio);
92 dmu_buf_rele(dbp, FTAG);
95 return (error);
98 void
99 zfs_sa_symlink(znode_t *zp, char *link, int len, dmu_tx_t *tx)
101 dmu_buf_t *db = sa_get_db(zp->z_sa_hdl);
103 if (ZFS_OLD_ZNODE_PHYS_SIZE + len <= dmu_bonus_max()) {
104 VERIFY0(dmu_set_bonus(db, len + ZFS_OLD_ZNODE_PHYS_SIZE, tx));
105 if (len) {
106 bcopy(link, (caddr_t)db->db_data +
107 ZFS_OLD_ZNODE_PHYS_SIZE, len);
109 } else {
110 dmu_buf_t *dbp;
112 zfs_grow_blocksize(zp, len, tx);
113 VERIFY0(dmu_buf_hold(ZTOZSB(zp)->z_os, zp->z_id, 0, FTAG, &dbp,
114 DMU_READ_NO_PREFETCH));
116 dmu_buf_will_dirty(dbp, tx);
118 ASSERT3U(len, <=, dbp->db_size);
119 bcopy(link, dbp->db_data, len);
120 dmu_buf_rele(dbp, FTAG);
124 void
125 zfs_sa_get_scanstamp(znode_t *zp, xvattr_t *xvap)
127 zfsvfs_t *zfsvfs = ZTOZSB(zp);
128 xoptattr_t *xoap;
130 ASSERT(MUTEX_HELD(&zp->z_lock));
131 VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
132 if (zp->z_is_sa) {
133 if (sa_lookup(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
134 &xoap->xoa_av_scanstamp,
135 sizeof (xoap->xoa_av_scanstamp)) != 0)
136 return;
137 } else {
138 dmu_object_info_t doi;
139 dmu_buf_t *db = sa_get_db(zp->z_sa_hdl);
140 int len;
142 if (!(zp->z_pflags & ZFS_BONUS_SCANSTAMP))
143 return;
145 sa_object_info(zp->z_sa_hdl, &doi);
146 len = sizeof (xoap->xoa_av_scanstamp) +
147 ZFS_OLD_ZNODE_PHYS_SIZE;
149 if (len <= doi.doi_bonus_size) {
150 (void) memcpy(xoap->xoa_av_scanstamp,
151 (caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
152 sizeof (xoap->xoa_av_scanstamp));
155 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
158 void
159 zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
161 zfsvfs_t *zfsvfs = ZTOZSB(zp);
162 xoptattr_t *xoap;
164 ASSERT(MUTEX_HELD(&zp->z_lock));
165 VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
166 if (zp->z_is_sa)
167 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
168 &xoap->xoa_av_scanstamp,
169 sizeof (xoap->xoa_av_scanstamp), tx));
170 else {
171 dmu_object_info_t doi;
172 dmu_buf_t *db = sa_get_db(zp->z_sa_hdl);
173 int len;
175 sa_object_info(zp->z_sa_hdl, &doi);
176 len = sizeof (xoap->xoa_av_scanstamp) +
177 ZFS_OLD_ZNODE_PHYS_SIZE;
178 if (len > doi.doi_bonus_size)
179 VERIFY(dmu_set_bonus(db, len, tx) == 0);
180 (void) memcpy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
181 xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp));
183 zp->z_pflags |= ZFS_BONUS_SCANSTAMP;
184 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
185 &zp->z_pflags, sizeof (uint64_t), tx));
190 zfs_sa_get_xattr(znode_t *zp)
192 zfsvfs_t *zfsvfs = ZTOZSB(zp);
193 char *obj;
194 int size;
195 int error;
197 ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
198 ASSERT(!zp->z_xattr_cached);
199 ASSERT(zp->z_is_sa);
201 error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), &size);
202 if (error) {
203 if (error == ENOENT)
204 return nvlist_alloc(&zp->z_xattr_cached,
205 NV_UNIQUE_NAME, KM_SLEEP);
206 else
207 return (error);
210 obj = vmem_alloc(size, KM_SLEEP);
212 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), obj, size);
213 if (error == 0)
214 error = nvlist_unpack(obj, size, &zp->z_xattr_cached, KM_SLEEP);
216 vmem_free(obj, size);
218 return (error);
222 zfs_sa_set_xattr(znode_t *zp)
224 zfsvfs_t *zfsvfs = ZTOZSB(zp);
225 dmu_tx_t *tx;
226 char *obj;
227 size_t size;
228 int error;
230 ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
231 ASSERT(zp->z_xattr_cached);
232 ASSERT(zp->z_is_sa);
234 error = nvlist_size(zp->z_xattr_cached, &size, NV_ENCODE_XDR);
235 if ((error == 0) && (size > SA_ATTR_MAX_LEN))
236 error = SET_ERROR(EFBIG);
237 if (error)
238 goto out;
240 obj = vmem_alloc(size, KM_SLEEP);
242 error = nvlist_pack(zp->z_xattr_cached, &obj, &size,
243 NV_ENCODE_XDR, KM_SLEEP);
244 if (error)
245 goto out_free;
247 tx = dmu_tx_create(zfsvfs->z_os);
248 dmu_tx_hold_sa_create(tx, size);
249 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
251 error = dmu_tx_assign(tx, TXG_WAIT);
252 if (error) {
253 dmu_tx_abort(tx);
254 } else {
255 int count = 0;
256 sa_bulk_attr_t bulk[2];
257 uint64_t ctime[2];
259 zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime);
260 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DXATTR(zfsvfs),
261 NULL, obj, size);
262 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
263 NULL, &ctime, 16);
264 VERIFY0(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
266 dmu_tx_commit(tx);
268 out_free:
269 vmem_free(obj, size);
270 out:
271 return (error);
275 * I'm not convinced we should do any of this upgrade.
276 * since the SA code can read both old/new znode formats
277 * with probably little to no performance difference.
279 * All new files will be created with the new format.
282 void
283 zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
285 dmu_buf_t *db = sa_get_db(hdl);
286 znode_t *zp = sa_get_userdata(hdl);
287 zfsvfs_t *zfsvfs = ZTOZSB(zp);
288 int count = 0;
289 sa_bulk_attr_t *bulk, *sa_attrs;
290 zfs_acl_locator_cb_t locate = { 0 };
291 uint64_t uid, gid, mode, rdev, xattr, parent, tmp_gen;
292 uint64_t crtime[2], mtime[2], ctime[2], atime[2];
293 uint64_t links;
294 zfs_acl_phys_t znode_acl;
295 char scanstamp[AV_SCANSTAMP_SZ];
296 boolean_t drop_lock = B_FALSE;
299 * No upgrade if ACL isn't cached
300 * since we won't know which locks are held
301 * and ready the ACL would require special "locked"
302 * interfaces that would be messy
304 if (zp->z_acl_cached == NULL || Z_ISLNK(ZTOTYPE(zp)))
305 return;
308 * If the z_lock is held and we aren't the owner
309 * the just return since we don't want to deadlock
310 * trying to update the status of z_is_sa. This
311 * file can then be upgraded at a later time.
313 * Otherwise, we know we are doing the
314 * sa_update() that caused us to enter this function.
316 if (MUTEX_NOT_HELD(&zp->z_lock)) {
317 if (mutex_tryenter(&zp->z_lock) == 0)
318 return;
319 else
320 drop_lock = B_TRUE;
323 /* First do a bulk query of the attributes that aren't cached */
324 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
325 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
326 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
327 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
328 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
329 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
330 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
331 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8);
332 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8);
333 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
334 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
335 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
336 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
337 &znode_acl, 88);
339 if (sa_bulk_lookup_locked(hdl, bulk, count) != 0)
340 goto done;
342 if (dmu_objset_projectquota_enabled(hdl->sa_os) &&
343 !(zp->z_pflags & ZFS_PROJID)) {
344 zp->z_pflags |= ZFS_PROJID;
345 zp->z_projid = ZFS_DEFAULT_PROJID;
349 * While the order here doesn't matter its best to try and organize
350 * it is such a way to pick up an already existing layout number
352 count = 0;
353 sa_attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
354 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
355 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
356 &zp->z_size, 8);
357 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zfsvfs),
358 NULL, &tmp_gen, 8);
359 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
360 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
361 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zfsvfs),
362 NULL, &parent, 8);
363 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
364 &zp->z_pflags, 8);
365 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zfsvfs), NULL,
366 &atime, 16);
367 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zfsvfs), NULL,
368 &mtime, 16);
369 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zfsvfs), NULL,
370 &ctime, 16);
371 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
372 &crtime, 16);
373 links = ZTONLNK(zp);
374 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL,
375 &links, 8);
376 if (dmu_objset_projectquota_enabled(hdl->sa_os))
377 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PROJID(zfsvfs), NULL,
378 &zp->z_projid, 8);
379 if (Z_ISBLK(ZTOTYPE(zp)) || Z_ISCHR(ZTOTYPE(zp)))
380 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
381 &rdev, 8);
382 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
383 &zp->z_acl_cached->z_acl_count, 8);
385 if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
386 zfs_acl_xform(zp, zp->z_acl_cached, CRED());
388 locate.cb_aclp = zp->z_acl_cached;
389 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
390 zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes);
392 if (xattr)
393 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs),
394 NULL, &xattr, 8);
396 /* if scanstamp then add scanstamp */
398 if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
399 bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
400 scanstamp, AV_SCANSTAMP_SZ);
401 SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zfsvfs),
402 NULL, scanstamp, AV_SCANSTAMP_SZ);
403 zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
406 VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
407 VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
408 count, tx) == 0);
409 if (znode_acl.z_acl_extern_obj)
410 VERIFY(0 == dmu_object_free(zfsvfs->z_os,
411 znode_acl.z_acl_extern_obj, tx));
413 zp->z_is_sa = B_TRUE;
414 kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
415 done:
416 kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
417 if (drop_lock)
418 mutex_exit(&zp->z_lock);
421 void
422 zfs_sa_upgrade_txholds(dmu_tx_t *tx, znode_t *zp)
424 if (!ZTOZSB(zp)->z_use_sa || zp->z_is_sa)
425 return;
428 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
430 if (zfs_external_acl(zp)) {
431 dmu_tx_hold_free(tx, zfs_external_acl(zp), 0,
432 DMU_OBJECT_END);
436 EXPORT_SYMBOL(zfs_attr_table);
437 EXPORT_SYMBOL(zfs_sa_readlink);
438 EXPORT_SYMBOL(zfs_sa_symlink);
439 EXPORT_SYMBOL(zfs_sa_get_scanstamp);
440 EXPORT_SYMBOL(zfs_sa_set_scanstamp);
441 EXPORT_SYMBOL(zfs_sa_get_xattr);
442 EXPORT_SYMBOL(zfs_sa_set_xattr);
443 EXPORT_SYMBOL(zfs_sa_upgrade);
444 EXPORT_SYMBOL(zfs_sa_upgrade_txholds);
446 #endif