FreeBSD: Lock vnode in zfs_ioctl()
[zfs.git] / module / zfs / sa.c
blob31d30c1b406d6deec235e3225a65c4b87fbc1c36
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2023 RackTop Systems, Inc.
29 #include <sys/zfs_context.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/sysmacros.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_impl.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dmu_tx.h>
37 #include <sys/dbuf.h>
38 #include <sys/dnode.h>
39 #include <sys/zap.h>
40 #include <sys/sa.h>
41 #include <sys/sunddi.h>
42 #include <sys/sa_impl.h>
43 #include <sys/errno.h>
44 #include <sys/zfs_context.h>
46 #ifdef _KERNEL
47 #include <sys/zfs_znode.h>
48 #endif
51 * ZFS System attributes:
53 * A generic mechanism to allow for arbitrary attributes
54 * to be stored in a dnode. The data will be stored in the bonus buffer of
55 * the dnode and if necessary a special "spill" block will be used to handle
56 * overflow situations. The spill block will be sized to fit the data
57 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
58 * spill block is stored at the end of the current bonus buffer. Any
59 * attributes that would be in the way of the blkptr_t will be relocated
60 * into the spill block.
62 * Attribute registration:
64 * Stored persistently on a per dataset basis
65 * a mapping between attribute "string" names and their actual attribute
66 * numeric values, length, and byteswap function. The names are only used
67 * during registration. All attributes are known by their unique attribute
68 * id value. If an attribute can have a variable size then the value
69 * 0 will be used to indicate this.
71 * Attribute Layout:
73 * Attribute layouts are a way to compactly store multiple attributes, but
74 * without taking the overhead associated with managing each attribute
75 * individually. Since you will typically have the same set of attributes
76 * stored in the same order a single table will be used to represent that
77 * layout. The ZPL for example will usually have only about 10 different
78 * layouts (regular files, device files, symlinks,
79 * regular files + scanstamp, files/dir with extended attributes, and then
80 * you have the possibility of all of those minus ACL, because it would
81 * be kicked out into the spill block)
83 * Layouts are simply an array of the attributes and their
84 * ordering i.e. [0, 1, 4, 5, 2]
86 * Each distinct layout is given a unique layout number and that is what's
87 * stored in the header at the beginning of the SA data buffer.
89 * A layout only covers a single dbuf (bonus or spill). If a set of
90 * attributes is split up between the bonus buffer and a spill buffer then
91 * two different layouts will be used. This allows us to byteswap the
92 * spill without looking at the bonus buffer and keeps the on disk format of
93 * the bonus and spill buffer the same.
95 * Adding a single attribute will cause the entire set of attributes to
96 * be rewritten and could result in a new layout number being constructed
97 * as part of the rewrite if no such layout exists for the new set of
98 * attributes. The new attribute will be appended to the end of the already
99 * existing attributes.
101 * Both the attribute registration and attribute layout information are
102 * stored in normal ZAP attributes. Their should be a small number of
103 * known layouts and the set of attributes is assumed to typically be quite
104 * small.
106 * The registered attributes and layout "table" information is maintained
107 * in core and a special "sa_os_t" is attached to the objset_t.
109 * A special interface is provided to allow for quickly applying
110 * a large set of attributes at once. sa_replace_all_by_template() is
111 * used to set an array of attributes. This is used by the ZPL when
112 * creating a brand new file. The template that is passed into the function
113 * specifies the attribute, size for variable length attributes, location of
114 * data and special "data locator" function if the data isn't in a contiguous
115 * location.
117 * Byteswap implications:
119 * Since the SA attributes are not entirely self describing we can't do
120 * the normal byteswap processing. The special ZAP layout attribute and
121 * attribute registration attributes define the byteswap function and the
122 * size of the attributes, unless it is variable sized.
123 * The normal ZFS byteswapping infrastructure assumes you don't need
124 * to read any objects in order to do the necessary byteswapping. Whereas
125 * SA attributes can only be properly byteswapped if the dataset is opened
126 * and the layout/attribute ZAP attributes are available. Because of this
127 * the SA attributes will be byteswapped when they are first accessed by
128 * the SA code that will read the SA data.
131 typedef void (sa_iterfunc_t)(void *hdr, void *addr, sa_attr_type_t,
132 uint16_t length, int length_idx, boolean_t, void *userp);
134 static int sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype);
135 static void sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab);
136 static sa_idx_tab_t *sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype,
137 sa_hdr_phys_t *hdr);
138 static void sa_idx_tab_rele(objset_t *os, void *arg);
139 static void sa_copy_data(sa_data_locator_t *func, void *start, void *target,
140 int buflen);
141 static int sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
142 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
143 uint16_t buflen, dmu_tx_t *tx);
145 static arc_byteswap_func_t sa_bswap_table[] = {
146 byteswap_uint64_array,
147 byteswap_uint32_array,
148 byteswap_uint16_array,
149 byteswap_uint8_array,
150 zfs_acl_byteswap,
153 #ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
154 #define SA_COPY_DATA(f, s, t, l) \
155 do { \
156 if (f == NULL) { \
157 if (l == 8) { \
158 *(uint64_t *)t = *(uint64_t *)s; \
159 } else if (l == 16) { \
160 *(uint64_t *)t = *(uint64_t *)s; \
161 *(uint64_t *)((uintptr_t)t + 8) = \
162 *(uint64_t *)((uintptr_t)s + 8); \
163 } else { \
164 memcpy(t, s, l); \
166 } else { \
167 sa_copy_data(f, s, t, l); \
169 } while (0)
170 #else
171 #define SA_COPY_DATA(f, s, t, l) sa_copy_data(f, s, t, l)
172 #endif
175 * This table is fixed and cannot be changed. Its purpose is to
176 * allow the SA code to work with both old/new ZPL file systems.
177 * It contains the list of legacy attributes. These attributes aren't
178 * stored in the "attribute" registry zap objects, since older ZPL file systems
179 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
180 * use this static table.
182 static const sa_attr_reg_t sa_legacy_attrs[] = {
183 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0},
184 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1},
185 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2},
186 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 3},
187 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY, 4},
188 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY, 5},
189 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY, 6},
190 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY, 7},
191 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY, 8},
192 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY, 9},
193 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY, 10},
194 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY, 11},
195 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY, 12},
196 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY, 13},
197 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY, 14},
198 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15},
202 * This is only used for objects of type DMU_OT_ZNODE
204 static const sa_attr_type_t sa_legacy_zpl_layout[] = {
205 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
209 * Special dummy layout used for buffers with no attributes.
211 static const sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };
213 static const size_t sa_legacy_attr_count = ARRAY_SIZE(sa_legacy_attrs);
214 static kmem_cache_t *sa_cache = NULL;
216 static int
217 sa_cache_constructor(void *buf, void *unused, int kmflag)
219 (void) unused, (void) kmflag;
220 sa_handle_t *hdl = buf;
222 mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL);
223 return (0);
226 static void
227 sa_cache_destructor(void *buf, void *unused)
229 (void) unused;
230 sa_handle_t *hdl = buf;
231 mutex_destroy(&hdl->sa_lock);
234 void
235 sa_cache_init(void)
237 sa_cache = kmem_cache_create("sa_cache",
238 sizeof (sa_handle_t), 0, sa_cache_constructor,
239 sa_cache_destructor, NULL, NULL, NULL, KMC_RECLAIMABLE);
242 void
243 sa_cache_fini(void)
245 if (sa_cache)
246 kmem_cache_destroy(sa_cache);
249 static int
250 layout_num_compare(const void *arg1, const void *arg2)
252 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
253 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
255 return (TREE_CMP(node1->lot_num, node2->lot_num));
258 static int
259 layout_hash_compare(const void *arg1, const void *arg2)
261 const sa_lot_t *node1 = (const sa_lot_t *)arg1;
262 const sa_lot_t *node2 = (const sa_lot_t *)arg2;
264 int cmp = TREE_CMP(node1->lot_hash, node2->lot_hash);
265 if (likely(cmp))
266 return (cmp);
268 return (TREE_CMP(node1->lot_instance, node2->lot_instance));
271 static boolean_t
272 sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count)
274 int i;
276 if (count != tbf->lot_attr_count)
277 return (1);
279 for (i = 0; i != count; i++) {
280 if (attrs[i] != tbf->lot_attrs[i])
281 return (1);
283 return (0);
286 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
288 static uint64_t
289 sa_layout_info_hash(const sa_attr_type_t *attrs, int attr_count)
291 uint64_t crc = -1ULL;
293 for (int i = 0; i != attr_count; i++)
294 crc ^= SA_ATTR_HASH(attrs[i]);
296 return (crc);
299 static int
300 sa_get_spill(sa_handle_t *hdl)
302 int rc;
303 if (hdl->sa_spill == NULL) {
304 if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
305 &hdl->sa_spill)) == 0)
306 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
307 } else {
308 rc = 0;
311 return (rc);
315 * Main attribute lookup/update function
316 * returns 0 for success or non zero for failures
318 * Operates on bulk array, first failure will abort further processing
320 static int
321 sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
322 sa_data_op_t data_op, dmu_tx_t *tx)
324 sa_os_t *sa = hdl->sa_os->os_sa;
325 int i;
326 int error = 0;
327 sa_buf_type_t buftypes;
329 buftypes = 0;
331 ASSERT(count > 0);
332 for (i = 0; i != count; i++) {
333 ASSERT(bulk[i].sa_attr <= hdl->sa_os->os_sa->sa_num_attrs);
335 bulk[i].sa_addr = NULL;
336 /* First check the bonus buffer */
338 if (hdl->sa_bonus_tab && TOC_ATTR_PRESENT(
339 hdl->sa_bonus_tab->sa_idx_tab[bulk[i].sa_attr])) {
340 SA_ATTR_INFO(sa, hdl->sa_bonus_tab,
341 SA_GET_HDR(hdl, SA_BONUS),
342 bulk[i].sa_attr, bulk[i], SA_BONUS, hdl);
343 if (tx && !(buftypes & SA_BONUS)) {
344 dmu_buf_will_dirty(hdl->sa_bonus, tx);
345 buftypes |= SA_BONUS;
348 if (bulk[i].sa_addr == NULL &&
349 ((error = sa_get_spill(hdl)) == 0)) {
350 if (TOC_ATTR_PRESENT(
351 hdl->sa_spill_tab->sa_idx_tab[bulk[i].sa_attr])) {
352 SA_ATTR_INFO(sa, hdl->sa_spill_tab,
353 SA_GET_HDR(hdl, SA_SPILL),
354 bulk[i].sa_attr, bulk[i], SA_SPILL, hdl);
355 if (tx && !(buftypes & SA_SPILL) &&
356 bulk[i].sa_size == bulk[i].sa_length) {
357 dmu_buf_will_dirty(hdl->sa_spill, tx);
358 buftypes |= SA_SPILL;
362 if (error && error != ENOENT) {
363 return ((error == ECKSUM) ? EIO : error);
366 switch (data_op) {
367 case SA_LOOKUP:
368 if (bulk[i].sa_addr == NULL)
369 return (SET_ERROR(ENOENT));
370 if (bulk[i].sa_data) {
371 SA_COPY_DATA(bulk[i].sa_data_func,
372 bulk[i].sa_addr, bulk[i].sa_data,
373 MIN(bulk[i].sa_size, bulk[i].sa_length));
375 continue;
377 case SA_UPDATE:
378 /* existing rewrite of attr */
379 if (bulk[i].sa_addr &&
380 bulk[i].sa_size == bulk[i].sa_length) {
381 SA_COPY_DATA(bulk[i].sa_data_func,
382 bulk[i].sa_data, bulk[i].sa_addr,
383 bulk[i].sa_length);
384 continue;
385 } else if (bulk[i].sa_addr) { /* attr size change */
386 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
387 SA_REPLACE, bulk[i].sa_data_func,
388 bulk[i].sa_data, bulk[i].sa_length, tx);
389 } else { /* adding new attribute */
390 error = sa_modify_attrs(hdl, bulk[i].sa_attr,
391 SA_ADD, bulk[i].sa_data_func,
392 bulk[i].sa_data, bulk[i].sa_length, tx);
394 if (error)
395 return (error);
396 break;
397 default:
398 break;
401 return (error);
404 static sa_lot_t *
405 sa_add_layout_entry(objset_t *os, const sa_attr_type_t *attrs, int attr_count,
406 uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
408 sa_os_t *sa = os->os_sa;
409 sa_lot_t *tb, *findtb;
410 int i;
411 avl_index_t loc;
413 ASSERT(MUTEX_HELD(&sa->sa_lock));
414 tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
415 tb->lot_attr_count = attr_count;
416 tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
417 KM_SLEEP);
418 memcpy(tb->lot_attrs, attrs, sizeof (sa_attr_type_t) * attr_count);
419 tb->lot_num = lot_num;
420 tb->lot_hash = hash;
421 tb->lot_instance = 0;
423 if (zapadd) {
424 char attr_name[8];
426 if (sa->sa_layout_attr_obj == 0) {
427 sa->sa_layout_attr_obj = zap_create_link(os,
428 DMU_OT_SA_ATTR_LAYOUTS,
429 sa->sa_master_obj, SA_LAYOUTS, tx);
432 (void) snprintf(attr_name, sizeof (attr_name),
433 "%d", (int)lot_num);
434 VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
435 attr_name, 2, attr_count, attrs, tx));
438 list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
439 offsetof(sa_idx_tab_t, sa_next));
441 for (i = 0; i != attr_count; i++) {
442 if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
443 tb->lot_var_sizes++;
446 avl_add(&sa->sa_layout_num_tree, tb);
448 /* verify we don't have a hash collision */
449 if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
450 for (; findtb && findtb->lot_hash == hash;
451 findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
452 if (findtb->lot_instance != tb->lot_instance)
453 break;
454 tb->lot_instance++;
457 avl_add(&sa->sa_layout_hash_tree, tb);
458 return (tb);
461 static void
462 sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs,
463 int count, dmu_tx_t *tx, sa_lot_t **lot)
465 sa_lot_t *tb, tbsearch;
466 avl_index_t loc;
467 sa_os_t *sa = os->os_sa;
468 boolean_t found = B_FALSE;
470 mutex_enter(&sa->sa_lock);
471 tbsearch.lot_hash = hash;
472 tbsearch.lot_instance = 0;
473 tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc);
474 if (tb) {
475 for (; tb && tb->lot_hash == hash;
476 tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) {
477 if (sa_layout_equal(tb, attrs, count) == 0) {
478 found = B_TRUE;
479 break;
483 if (!found) {
484 tb = sa_add_layout_entry(os, attrs, count,
485 avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx);
487 mutex_exit(&sa->sa_lock);
488 *lot = tb;
491 static int
492 sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
494 int error;
495 uint32_t blocksize;
497 if (size == 0) {
498 blocksize = SPA_MINBLOCKSIZE;
499 } else if (size > SPA_OLD_MAXBLOCKSIZE) {
500 ASSERT(0);
501 return (SET_ERROR(EFBIG));
502 } else {
503 blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
506 error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
507 ASSERT(error == 0);
508 return (error);
511 static void
512 sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
514 if (func == NULL) {
515 memcpy(target, datastart, buflen);
516 } else {
517 boolean_t start;
518 int bytes;
519 void *dataptr;
520 void *saptr = target;
521 uint32_t length;
523 start = B_TRUE;
524 bytes = 0;
525 while (bytes < buflen) {
526 func(&dataptr, &length, buflen, start, datastart);
527 memcpy(saptr, dataptr, length);
528 saptr = (void *)((caddr_t)saptr + length);
529 bytes += length;
530 start = B_FALSE;
536 * Determine several different values pertaining to system attribute
537 * buffers.
539 * Return the size of the sa_hdr_phys_t header for the buffer. Each
540 * variable length attribute except the first contributes two bytes to
541 * the header size, which is then rounded up to an 8-byte boundary.
543 * The following output parameters are also computed.
545 * index - The index of the first attribute in attr_desc that will
546 * spill over. Only valid if will_spill is set.
548 * total - The total number of bytes of all system attributes described
549 * in attr_desc.
551 * will_spill - Set when spilling is necessary. It is only set when
552 * the buftype is SA_BONUS.
554 static int
555 sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count,
556 dmu_buf_t *db, sa_buf_type_t buftype, int full_space, int *index,
557 int *total, boolean_t *will_spill)
559 int var_size_count = 0;
560 int i;
561 int hdrsize;
562 int extra_hdrsize;
564 if (buftype == SA_BONUS && sa->sa_force_spill) {
565 *total = 0;
566 *index = 0;
567 *will_spill = B_TRUE;
568 return (0);
571 *index = -1;
572 *total = 0;
573 *will_spill = B_FALSE;
575 extra_hdrsize = 0;
576 hdrsize = (SA_BONUSTYPE_FROM_DB(db) == DMU_OT_ZNODE) ? 0 :
577 sizeof (sa_hdr_phys_t);
579 ASSERT(IS_P2ALIGNED(full_space, 8));
581 for (i = 0; i != attr_count; i++) {
582 boolean_t is_var_sz, might_spill_here;
583 int tmp_hdrsize;
585 *total = P2ROUNDUP(*total, 8);
586 *total += attr_desc[i].sa_length;
587 if (*will_spill)
588 continue;
590 is_var_sz = (SA_REGISTERED_LEN(sa, attr_desc[i].sa_attr) == 0);
591 if (is_var_sz)
592 var_size_count++;
595 * Calculate what the SA header size would be if this
596 * attribute doesn't spill.
598 tmp_hdrsize = hdrsize + ((is_var_sz && var_size_count > 1) ?
599 sizeof (uint16_t) : 0);
602 * Check whether this attribute spans into the space
603 * that would be used by the spill block pointer should
604 * a spill block be needed.
606 might_spill_here =
607 buftype == SA_BONUS && *index == -1 &&
608 (*total + P2ROUNDUP(tmp_hdrsize, 8)) >
609 (full_space - sizeof (blkptr_t));
611 if (is_var_sz && var_size_count > 1) {
612 if (buftype == SA_SPILL ||
613 tmp_hdrsize + *total < full_space) {
615 * Record the extra header size in case this
616 * increase needs to be reversed due to
617 * spill-over.
619 hdrsize = tmp_hdrsize;
620 if (*index != -1 || might_spill_here)
621 extra_hdrsize += sizeof (uint16_t);
622 } else {
623 ASSERT(buftype == SA_BONUS);
624 if (*index == -1)
625 *index = i;
626 *will_spill = B_TRUE;
627 continue;
632 * Store index of where spill *could* occur. Then
633 * continue to count the remaining attribute sizes. The
634 * sum is used later for sizing bonus and spill buffer.
636 if (might_spill_here)
637 *index = i;
639 if ((*total + P2ROUNDUP(hdrsize, 8)) > full_space &&
640 buftype == SA_BONUS)
641 *will_spill = B_TRUE;
644 if (*will_spill)
645 hdrsize -= extra_hdrsize;
647 hdrsize = P2ROUNDUP(hdrsize, 8);
648 return (hdrsize);
651 #define BUF_SPACE_NEEDED(total, header) (total + header)
654 * Find layout that corresponds to ordering of attributes
655 * If not found a new layout number is created and added to
656 * persistent layout tables.
658 static int
659 sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
660 dmu_tx_t *tx)
662 sa_os_t *sa = hdl->sa_os->os_sa;
663 uint64_t hash;
664 sa_buf_type_t buftype;
665 sa_hdr_phys_t *sahdr;
666 void *data_start;
667 sa_attr_type_t *attrs, *attrs_start;
668 int i, lot_count;
669 int dnodesize;
670 int spill_idx;
671 int hdrsize;
672 int spillhdrsize = 0;
673 int used;
674 dmu_object_type_t bonustype;
675 sa_lot_t *lot;
676 int len_idx;
677 int spill_used;
678 int bonuslen;
679 boolean_t spilling;
681 dmu_buf_will_dirty(hdl->sa_bonus, tx);
682 bonustype = SA_BONUSTYPE_FROM_DB(hdl->sa_bonus);
683 dmu_object_dnsize_from_db(hdl->sa_bonus, &dnodesize);
684 bonuslen = DN_BONUS_SIZE(dnodesize);
686 /* first determine bonus header size and sum of all attributes */
687 hdrsize = sa_find_sizes(sa, attr_desc, attr_count, hdl->sa_bonus,
688 SA_BONUS, bonuslen, &spill_idx, &used, &spilling);
690 if (used > SPA_OLD_MAXBLOCKSIZE)
691 return (SET_ERROR(EFBIG));
693 VERIFY0(dmu_set_bonus(hdl->sa_bonus, spilling ?
694 MIN(bonuslen - sizeof (blkptr_t), used + hdrsize) :
695 used + hdrsize, tx));
697 ASSERT((bonustype == DMU_OT_ZNODE && spilling == 0) ||
698 bonustype == DMU_OT_SA);
700 /* setup and size spill buffer when needed */
701 if (spilling) {
702 boolean_t dummy;
704 if (hdl->sa_spill == NULL) {
705 VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, 0, NULL,
706 &hdl->sa_spill) == 0);
708 dmu_buf_will_dirty(hdl->sa_spill, tx);
710 spillhdrsize = sa_find_sizes(sa, &attr_desc[spill_idx],
711 attr_count - spill_idx, hdl->sa_spill, SA_SPILL,
712 hdl->sa_spill->db_size, &i, &spill_used, &dummy);
714 if (spill_used > SPA_OLD_MAXBLOCKSIZE)
715 return (SET_ERROR(EFBIG));
717 if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
718 hdl->sa_spill->db_size)
719 VERIFY(0 == sa_resize_spill(hdl,
720 BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
723 /* setup starting pointers to lay down data */
724 data_start = (void *)((uintptr_t)hdl->sa_bonus->db_data + hdrsize);
725 sahdr = (sa_hdr_phys_t *)hdl->sa_bonus->db_data;
726 buftype = SA_BONUS;
728 attrs_start = attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
729 KM_SLEEP);
730 lot_count = 0;
732 for (i = 0, len_idx = 0, hash = -1ULL; i != attr_count; i++) {
733 uint16_t length;
735 ASSERT(IS_P2ALIGNED(data_start, 8));
736 attrs[i] = attr_desc[i].sa_attr;
737 length = SA_REGISTERED_LEN(sa, attrs[i]);
738 if (length == 0)
739 length = attr_desc[i].sa_length;
741 if (spilling && i == spill_idx) { /* switch to spill buffer */
742 VERIFY(bonustype == DMU_OT_SA);
743 if (buftype == SA_BONUS && !sa->sa_force_spill) {
744 sa_find_layout(hdl->sa_os, hash, attrs_start,
745 lot_count, tx, &lot);
746 SA_SET_HDR(sahdr, lot->lot_num, hdrsize);
749 buftype = SA_SPILL;
750 hash = -1ULL;
751 len_idx = 0;
753 sahdr = (sa_hdr_phys_t *)hdl->sa_spill->db_data;
754 sahdr->sa_magic = SA_MAGIC;
755 data_start = (void *)((uintptr_t)sahdr +
756 spillhdrsize);
757 attrs_start = &attrs[i];
758 lot_count = 0;
760 hash ^= SA_ATTR_HASH(attrs[i]);
761 attr_desc[i].sa_addr = data_start;
762 attr_desc[i].sa_size = length;
763 SA_COPY_DATA(attr_desc[i].sa_data_func, attr_desc[i].sa_data,
764 data_start, length);
765 if (sa->sa_attr_table[attrs[i]].sa_length == 0) {
766 sahdr->sa_lengths[len_idx++] = length;
768 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
769 length), 8);
770 lot_count++;
773 sa_find_layout(hdl->sa_os, hash, attrs_start, lot_count, tx, &lot);
776 * Verify that old znodes always have layout number 0.
777 * Must be DMU_OT_SA for arbitrary layouts
779 VERIFY((bonustype == DMU_OT_ZNODE && lot->lot_num == 0) ||
780 (bonustype == DMU_OT_SA && lot->lot_num > 1));
782 if (bonustype == DMU_OT_SA) {
783 SA_SET_HDR(sahdr, lot->lot_num,
784 buftype == SA_BONUS ? hdrsize : spillhdrsize);
787 kmem_free(attrs, sizeof (sa_attr_type_t) * attr_count);
788 if (hdl->sa_bonus_tab) {
789 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
790 hdl->sa_bonus_tab = NULL;
792 if (!sa->sa_force_spill)
793 VERIFY(0 == sa_build_index(hdl, SA_BONUS));
794 if (hdl->sa_spill) {
795 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
796 if (!spilling) {
798 * remove spill block that is no longer needed.
800 dmu_buf_rele(hdl->sa_spill, NULL);
801 hdl->sa_spill = NULL;
802 hdl->sa_spill_tab = NULL;
803 VERIFY(0 == dmu_rm_spill(hdl->sa_os,
804 sa_handle_object(hdl), tx));
805 } else {
806 VERIFY(0 == sa_build_index(hdl, SA_SPILL));
810 return (0);
813 static void
814 sa_free_attr_table(sa_os_t *sa)
816 int i;
818 if (sa->sa_attr_table == NULL)
819 return;
821 for (i = 0; i != sa->sa_num_attrs; i++) {
822 if (sa->sa_attr_table[i].sa_name)
823 kmem_free(sa->sa_attr_table[i].sa_name,
824 strlen(sa->sa_attr_table[i].sa_name) + 1);
827 kmem_free(sa->sa_attr_table,
828 sizeof (sa_attr_table_t) * sa->sa_num_attrs);
830 sa->sa_attr_table = NULL;
833 static int
834 sa_attr_table_setup(objset_t *os, const sa_attr_reg_t *reg_attrs, int count)
836 sa_os_t *sa = os->os_sa;
837 uint64_t sa_attr_count = 0;
838 uint64_t sa_reg_count = 0;
839 int error = 0;
840 uint64_t attr_value;
841 sa_attr_table_t *tb;
842 zap_cursor_t zc;
843 zap_attribute_t *za;
844 int registered_count = 0;
845 int i;
846 dmu_objset_type_t ostype = dmu_objset_type(os);
848 sa->sa_user_table =
849 kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP);
850 sa->sa_user_table_sz = count * sizeof (sa_attr_type_t);
852 if (sa->sa_reg_attr_obj != 0) {
853 error = zap_count(os, sa->sa_reg_attr_obj,
854 &sa_attr_count);
857 * Make sure we retrieved a count and that it isn't zero
859 if (error || (error == 0 && sa_attr_count == 0)) {
860 if (error == 0)
861 error = SET_ERROR(EINVAL);
862 goto bail;
864 sa_reg_count = sa_attr_count;
867 if (ostype == DMU_OST_ZFS && sa_attr_count == 0)
868 sa_attr_count += sa_legacy_attr_count;
870 /* Allocate attribute numbers for attributes that aren't registered */
871 for (i = 0; i != count; i++) {
872 boolean_t found = B_FALSE;
873 int j;
875 if (ostype == DMU_OST_ZFS) {
876 for (j = 0; j != sa_legacy_attr_count; j++) {
877 if (strcmp(reg_attrs[i].sa_name,
878 sa_legacy_attrs[j].sa_name) == 0) {
879 sa->sa_user_table[i] =
880 sa_legacy_attrs[j].sa_attr;
881 found = B_TRUE;
885 if (found)
886 continue;
888 if (sa->sa_reg_attr_obj)
889 error = zap_lookup(os, sa->sa_reg_attr_obj,
890 reg_attrs[i].sa_name, 8, 1, &attr_value);
891 else
892 error = SET_ERROR(ENOENT);
893 switch (error) {
894 case ENOENT:
895 sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
896 sa_attr_count++;
897 break;
898 case 0:
899 sa->sa_user_table[i] = ATTR_NUM(attr_value);
900 break;
901 default:
902 goto bail;
906 sa->sa_num_attrs = sa_attr_count;
907 tb = sa->sa_attr_table =
908 kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP);
911 * Attribute table is constructed from requested attribute list,
912 * previously foreign registered attributes, and also the legacy
913 * ZPL set of attributes.
916 if (sa->sa_reg_attr_obj) {
917 za = zap_attribute_alloc();
918 for (zap_cursor_init(&zc, os, sa->sa_reg_attr_obj);
919 (error = zap_cursor_retrieve(&zc, za)) == 0;
920 zap_cursor_advance(&zc)) {
921 uint64_t value;
922 value = za->za_first_integer;
924 registered_count++;
925 tb[ATTR_NUM(value)].sa_attr = ATTR_NUM(value);
926 tb[ATTR_NUM(value)].sa_length = ATTR_LENGTH(value);
927 tb[ATTR_NUM(value)].sa_byteswap = ATTR_BSWAP(value);
928 tb[ATTR_NUM(value)].sa_registered = B_TRUE;
930 if (tb[ATTR_NUM(value)].sa_name) {
931 continue;
933 tb[ATTR_NUM(value)].sa_name =
934 kmem_zalloc(strlen(za->za_name) +1, KM_SLEEP);
935 (void) strlcpy(tb[ATTR_NUM(value)].sa_name, za->za_name,
936 strlen(za->za_name) +1);
938 zap_cursor_fini(&zc);
939 zap_attribute_free(za);
941 * Make sure we processed the correct number of registered
942 * attributes
944 if (registered_count != sa_reg_count) {
945 ASSERT(error != 0);
946 goto bail;
951 if (ostype == DMU_OST_ZFS) {
952 for (i = 0; i != sa_legacy_attr_count; i++) {
953 if (tb[i].sa_name)
954 continue;
955 tb[i].sa_attr = sa_legacy_attrs[i].sa_attr;
956 tb[i].sa_length = sa_legacy_attrs[i].sa_length;
957 tb[i].sa_byteswap = sa_legacy_attrs[i].sa_byteswap;
958 tb[i].sa_registered = B_FALSE;
959 tb[i].sa_name =
960 kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1,
961 KM_SLEEP);
962 (void) strlcpy(tb[i].sa_name,
963 sa_legacy_attrs[i].sa_name,
964 strlen(sa_legacy_attrs[i].sa_name) + 1);
968 for (i = 0; i != count; i++) {
969 sa_attr_type_t attr_id;
971 attr_id = sa->sa_user_table[i];
972 if (tb[attr_id].sa_name)
973 continue;
975 tb[attr_id].sa_length = reg_attrs[i].sa_length;
976 tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap;
977 tb[attr_id].sa_attr = attr_id;
978 tb[attr_id].sa_name =
979 kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP);
980 (void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name,
981 strlen(reg_attrs[i].sa_name) + 1);
984 sa->sa_need_attr_registration =
985 (sa_attr_count != registered_count);
987 return (0);
988 bail:
989 kmem_free(sa->sa_user_table, count * sizeof (sa_attr_type_t));
990 sa->sa_user_table = NULL;
991 sa_free_attr_table(sa);
992 ASSERT(error != 0);
993 return (error);
997 sa_setup(objset_t *os, uint64_t sa_obj, const sa_attr_reg_t *reg_attrs,
998 int count, sa_attr_type_t **user_table)
1000 zap_cursor_t zc;
1001 zap_attribute_t *za;
1002 sa_os_t *sa;
1003 dmu_objset_type_t ostype = dmu_objset_type(os);
1004 sa_attr_type_t *tb;
1005 int error;
1007 mutex_enter(&os->os_user_ptr_lock);
1008 if (os->os_sa) {
1009 mutex_enter(&os->os_sa->sa_lock);
1010 mutex_exit(&os->os_user_ptr_lock);
1011 tb = os->os_sa->sa_user_table;
1012 mutex_exit(&os->os_sa->sa_lock);
1013 *user_table = tb;
1014 return (0);
1017 sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
1018 mutex_init(&sa->sa_lock, NULL, MUTEX_NOLOCKDEP, NULL);
1019 sa->sa_master_obj = sa_obj;
1021 os->os_sa = sa;
1022 mutex_enter(&sa->sa_lock);
1023 mutex_exit(&os->os_user_ptr_lock);
1024 avl_create(&sa->sa_layout_num_tree, layout_num_compare,
1025 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node));
1026 avl_create(&sa->sa_layout_hash_tree, layout_hash_compare,
1027 sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node));
1029 if (sa_obj) {
1030 error = zap_lookup(os, sa_obj, SA_LAYOUTS,
1031 8, 1, &sa->sa_layout_attr_obj);
1032 if (error != 0 && error != ENOENT)
1033 goto fail;
1034 error = zap_lookup(os, sa_obj, SA_REGISTRY,
1035 8, 1, &sa->sa_reg_attr_obj);
1036 if (error != 0 && error != ENOENT)
1037 goto fail;
1040 if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0)
1041 goto fail;
1043 if (sa->sa_layout_attr_obj != 0) {
1044 uint64_t layout_count;
1046 error = zap_count(os, sa->sa_layout_attr_obj,
1047 &layout_count);
1050 * Layout number count should be > 0
1052 if (error || (error == 0 && layout_count == 0)) {
1053 if (error == 0)
1054 error = SET_ERROR(EINVAL);
1055 goto fail;
1058 za = zap_attribute_alloc();
1059 for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj);
1060 (error = zap_cursor_retrieve(&zc, za)) == 0;
1061 zap_cursor_advance(&zc)) {
1062 sa_attr_type_t *lot_attrs;
1063 uint64_t lot_num;
1065 lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
1066 za->za_num_integers, KM_SLEEP);
1068 if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
1069 za->za_name, 2, za->za_num_integers,
1070 lot_attrs))) != 0) {
1071 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1072 za->za_num_integers);
1073 break;
1075 VERIFY0(ddi_strtoull(za->za_name, NULL, 10,
1076 (unsigned long long *)&lot_num));
1078 (void) sa_add_layout_entry(os, lot_attrs,
1079 za->za_num_integers, lot_num,
1080 sa_layout_info_hash(lot_attrs,
1081 za->za_num_integers), B_FALSE, NULL);
1082 kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
1083 za->za_num_integers);
1085 zap_cursor_fini(&zc);
1086 zap_attribute_free(za);
1089 * Make sure layout count matches number of entries added
1090 * to AVL tree
1092 if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) {
1093 ASSERT(error != 0);
1094 goto fail;
1098 /* Add special layout number for old ZNODES */
1099 if (ostype == DMU_OST_ZFS) {
1100 (void) sa_add_layout_entry(os, sa_legacy_zpl_layout,
1101 sa_legacy_attr_count, 0,
1102 sa_layout_info_hash(sa_legacy_zpl_layout,
1103 sa_legacy_attr_count), B_FALSE, NULL);
1105 (void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1,
1106 0, B_FALSE, NULL);
1108 *user_table = os->os_sa->sa_user_table;
1109 mutex_exit(&sa->sa_lock);
1110 return (0);
1111 fail:
1112 os->os_sa = NULL;
1113 sa_free_attr_table(sa);
1114 if (sa->sa_user_table)
1115 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1116 mutex_exit(&sa->sa_lock);
1117 avl_destroy(&sa->sa_layout_hash_tree);
1118 avl_destroy(&sa->sa_layout_num_tree);
1119 mutex_destroy(&sa->sa_lock);
1120 kmem_free(sa, sizeof (sa_os_t));
1121 return ((error == ECKSUM) ? EIO : error);
1124 void
1125 sa_tear_down(objset_t *os)
1127 sa_os_t *sa = os->os_sa;
1128 sa_lot_t *layout;
1129 void *cookie;
1131 kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
1133 /* Free up attr table */
1135 sa_free_attr_table(sa);
1137 cookie = NULL;
1138 while ((layout =
1139 avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))) {
1140 sa_idx_tab_t *tab;
1141 while ((tab = list_head(&layout->lot_idx_tab))) {
1142 ASSERT(zfs_refcount_count(&tab->sa_refcount));
1143 sa_idx_tab_rele(os, tab);
1147 cookie = NULL;
1148 while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))) {
1149 kmem_free(layout->lot_attrs,
1150 sizeof (sa_attr_type_t) * layout->lot_attr_count);
1151 kmem_free(layout, sizeof (sa_lot_t));
1154 avl_destroy(&sa->sa_layout_hash_tree);
1155 avl_destroy(&sa->sa_layout_num_tree);
1156 mutex_destroy(&sa->sa_lock);
1158 kmem_free(sa, sizeof (sa_os_t));
1159 os->os_sa = NULL;
1162 static void
1163 sa_build_idx_tab(void *hdr, void *attr_addr, sa_attr_type_t attr,
1164 uint16_t length, int length_idx, boolean_t var_length, void *userp)
1166 sa_idx_tab_t *idx_tab = userp;
1168 if (var_length) {
1169 ASSERT(idx_tab->sa_variable_lengths);
1170 idx_tab->sa_variable_lengths[length_idx] = length;
1172 TOC_ATTR_ENCODE(idx_tab->sa_idx_tab[attr], length_idx,
1173 (uint32_t)((uintptr_t)attr_addr - (uintptr_t)hdr));
1176 static void
1177 sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type,
1178 sa_iterfunc_t func, sa_lot_t *tab, void *userp)
1180 void *data_start;
1181 sa_lot_t *tb = tab;
1182 sa_lot_t search;
1183 avl_index_t loc;
1184 sa_os_t *sa = os->os_sa;
1185 int i;
1186 uint16_t *length_start = NULL;
1187 uint8_t length_idx = 0;
1189 if (tab == NULL) {
1190 search.lot_num = SA_LAYOUT_NUM(hdr, type);
1191 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1192 ASSERT(tb);
1195 if (IS_SA_BONUSTYPE(type)) {
1196 data_start = (void *)P2ROUNDUP(((uintptr_t)hdr +
1197 offsetof(sa_hdr_phys_t, sa_lengths) +
1198 (sizeof (uint16_t) * tb->lot_var_sizes)), 8);
1199 length_start = hdr->sa_lengths;
1200 } else {
1201 data_start = hdr;
1204 for (i = 0; i != tb->lot_attr_count; i++) {
1205 int attr_length, reg_length;
1206 uint8_t idx_len;
1208 reg_length = sa->sa_attr_table[tb->lot_attrs[i]].sa_length;
1209 IMPLY(reg_length == 0, IS_SA_BONUSTYPE(type));
1210 if (reg_length) {
1211 attr_length = reg_length;
1212 idx_len = 0;
1213 } else {
1214 attr_length = length_start[length_idx];
1215 idx_len = length_idx++;
1218 func(hdr, data_start, tb->lot_attrs[i], attr_length,
1219 idx_len, reg_length == 0 ? B_TRUE : B_FALSE, userp);
1221 data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
1222 attr_length), 8);
1226 static void
1227 sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
1228 uint16_t length, int length_idx, boolean_t variable_length, void *userp)
1230 (void) hdr, (void) length_idx, (void) variable_length;
1231 sa_handle_t *hdl = userp;
1232 sa_os_t *sa = hdl->sa_os->os_sa;
1234 sa_bswap_table[sa->sa_attr_table[attr].sa_byteswap](attr_addr, length);
1237 static void
1238 sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
1240 sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1241 dmu_buf_impl_t *db;
1242 int num_lengths = 1;
1243 int i;
1244 sa_os_t *sa __maybe_unused = hdl->sa_os->os_sa;
1246 ASSERT(MUTEX_HELD(&sa->sa_lock));
1247 if (sa_hdr_phys->sa_magic == SA_MAGIC)
1248 return;
1250 db = SA_GET_DB(hdl, buftype);
1252 if (buftype == SA_SPILL) {
1253 arc_release(db->db_buf, NULL);
1254 arc_buf_thaw(db->db_buf);
1257 sa_hdr_phys->sa_magic = BSWAP_32(sa_hdr_phys->sa_magic);
1258 sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info);
1261 * Determine number of variable lengths in header
1262 * The standard 8 byte header has one for free and a
1263 * 16 byte header would have 4 + 1;
1265 if (SA_HDR_SIZE(sa_hdr_phys) > 8)
1266 num_lengths += (SA_HDR_SIZE(sa_hdr_phys) - 8) >> 1;
1267 for (i = 0; i != num_lengths; i++)
1268 sa_hdr_phys->sa_lengths[i] =
1269 BSWAP_16(sa_hdr_phys->sa_lengths[i]);
1271 sa_attr_iter(hdl->sa_os, sa_hdr_phys, DMU_OT_SA,
1272 sa_byteswap_cb, NULL, hdl);
1274 if (buftype == SA_SPILL)
1275 arc_buf_freeze(((dmu_buf_impl_t *)hdl->sa_spill)->db_buf);
1278 static int
1279 sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype)
1281 sa_hdr_phys_t *sa_hdr_phys;
1282 dmu_buf_impl_t *db = SA_GET_DB(hdl, buftype);
1283 dmu_object_type_t bonustype = SA_BONUSTYPE_FROM_DB(db);
1284 sa_os_t *sa = hdl->sa_os->os_sa;
1285 sa_idx_tab_t *idx_tab;
1287 sa_hdr_phys = SA_GET_HDR(hdl, buftype);
1289 mutex_enter(&sa->sa_lock);
1291 /* Do we need to byteswap? */
1293 /* only check if not old znode */
1294 if (IS_SA_BONUSTYPE(bonustype) && sa_hdr_phys->sa_magic != SA_MAGIC &&
1295 sa_hdr_phys->sa_magic != 0) {
1296 if (BSWAP_32(sa_hdr_phys->sa_magic) != SA_MAGIC) {
1297 mutex_exit(&sa->sa_lock);
1298 zfs_dbgmsg("Buffer Header: %x != SA_MAGIC:%x "
1299 "object=%#llx\n", sa_hdr_phys->sa_magic, SA_MAGIC,
1300 (u_longlong_t)db->db.db_object);
1301 return (SET_ERROR(EIO));
1303 sa_byteswap(hdl, buftype);
1306 idx_tab = sa_find_idx_tab(hdl->sa_os, bonustype, sa_hdr_phys);
1308 if (buftype == SA_BONUS)
1309 hdl->sa_bonus_tab = idx_tab;
1310 else
1311 hdl->sa_spill_tab = idx_tab;
1313 mutex_exit(&sa->sa_lock);
1314 return (0);
1317 static void
1318 sa_evict_sync(void *dbu)
1320 (void) dbu;
1321 panic("evicting sa dbuf\n");
1324 static void
1325 sa_idx_tab_rele(objset_t *os, void *arg)
1327 sa_os_t *sa = os->os_sa;
1328 sa_idx_tab_t *idx_tab = arg;
1330 if (idx_tab == NULL)
1331 return;
1333 mutex_enter(&sa->sa_lock);
1334 if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
1335 list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
1336 if (idx_tab->sa_variable_lengths)
1337 kmem_free(idx_tab->sa_variable_lengths,
1338 sizeof (uint16_t) *
1339 idx_tab->sa_layout->lot_var_sizes);
1340 zfs_refcount_destroy(&idx_tab->sa_refcount);
1341 kmem_free(idx_tab->sa_idx_tab,
1342 sizeof (uint32_t) * sa->sa_num_attrs);
1343 kmem_free(idx_tab, sizeof (sa_idx_tab_t));
1345 mutex_exit(&sa->sa_lock);
1348 static void
1349 sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
1351 sa_os_t *sa __maybe_unused = os->os_sa;
1353 ASSERT(MUTEX_HELD(&sa->sa_lock));
1354 (void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
1357 void
1358 sa_spill_rele(sa_handle_t *hdl)
1360 mutex_enter(&hdl->sa_lock);
1361 if (hdl->sa_spill) {
1362 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
1363 dmu_buf_rele(hdl->sa_spill, NULL);
1364 hdl->sa_spill = NULL;
1365 hdl->sa_spill_tab = NULL;
1367 mutex_exit(&hdl->sa_lock);
1370 void
1371 sa_handle_destroy(sa_handle_t *hdl)
1373 dmu_buf_t *db = hdl->sa_bonus;
1375 mutex_enter(&hdl->sa_lock);
1376 (void) dmu_buf_remove_user(db, &hdl->sa_dbu);
1378 if (hdl->sa_bonus_tab)
1379 sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
1381 if (hdl->sa_spill_tab)
1382 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
1384 dmu_buf_rele(hdl->sa_bonus, NULL);
1386 if (hdl->sa_spill)
1387 dmu_buf_rele(hdl->sa_spill, NULL);
1388 mutex_exit(&hdl->sa_lock);
1390 kmem_cache_free(sa_cache, hdl);
1394 sa_handle_get_from_db(objset_t *os, dmu_buf_t *db, void *userp,
1395 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1397 int error = 0;
1398 sa_handle_t *handle = NULL;
1399 #ifdef ZFS_DEBUG
1400 dmu_object_info_t doi;
1402 dmu_object_info_from_db(db, &doi);
1403 ASSERT(doi.doi_bonus_type == DMU_OT_SA ||
1404 doi.doi_bonus_type == DMU_OT_ZNODE);
1405 #endif
1406 /* find handle, if it exists */
1407 /* if one doesn't exist then create a new one, and initialize it */
1409 if (hdl_type == SA_HDL_SHARED)
1410 handle = dmu_buf_get_user(db);
1412 if (handle == NULL) {
1413 sa_handle_t *winner = NULL;
1415 handle = kmem_cache_alloc(sa_cache, KM_SLEEP);
1416 handle->sa_dbu.dbu_evict_func_sync = NULL;
1417 handle->sa_dbu.dbu_evict_func_async = NULL;
1418 handle->sa_userp = userp;
1419 handle->sa_bonus = db;
1420 handle->sa_os = os;
1421 handle->sa_spill = NULL;
1422 handle->sa_bonus_tab = NULL;
1423 handle->sa_spill_tab = NULL;
1425 error = sa_build_index(handle, SA_BONUS);
1427 if (hdl_type == SA_HDL_SHARED) {
1428 dmu_buf_init_user(&handle->sa_dbu, sa_evict_sync, NULL,
1429 NULL);
1430 winner = dmu_buf_set_user_ie(db, &handle->sa_dbu);
1433 if (winner != NULL) {
1434 kmem_cache_free(sa_cache, handle);
1435 handle = winner;
1438 *handlepp = handle;
1440 return (error);
1444 sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
1445 sa_handle_type_t hdl_type, sa_handle_t **handlepp)
1447 dmu_buf_t *db;
1448 int error;
1450 if ((error = dmu_bonus_hold(objset, objid, NULL, &db)))
1451 return (error);
1453 return (sa_handle_get_from_db(objset, db, userp, hdl_type,
1454 handlepp));
1458 sa_buf_hold(objset_t *objset, uint64_t obj_num, const void *tag, dmu_buf_t **db)
1460 return (dmu_bonus_hold(objset, obj_num, tag, db));
1463 void
1464 sa_buf_rele(dmu_buf_t *db, const void *tag)
1466 dmu_buf_rele(db, tag);
1469 static int
1470 sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
1472 ASSERT(hdl);
1473 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1474 return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
1477 static int
1478 sa_lookup_locked(sa_handle_t *hdl, sa_attr_type_t attr, void *buf,
1479 uint32_t buflen)
1481 int error;
1482 sa_bulk_attr_t bulk;
1484 VERIFY3U(buflen, <=, SA_ATTR_MAX_LEN);
1486 bulk.sa_attr = attr;
1487 bulk.sa_data = buf;
1488 bulk.sa_length = buflen;
1489 bulk.sa_data_func = NULL;
1491 ASSERT(hdl);
1492 error = sa_lookup_impl(hdl, &bulk, 1);
1493 return (error);
1497 sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
1499 int error;
1501 mutex_enter(&hdl->sa_lock);
1502 error = sa_lookup_locked(hdl, attr, buf, buflen);
1503 mutex_exit(&hdl->sa_lock);
1505 return (error);
1509 * Return size of an attribute
1512 static int
1513 sa_size_locked(sa_handle_t *hdl, sa_attr_type_t attr, int *size)
1515 sa_bulk_attr_t bulk;
1516 int error;
1518 bulk.sa_data = NULL;
1519 bulk.sa_attr = attr;
1520 bulk.sa_data_func = NULL;
1522 ASSERT(hdl);
1523 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1524 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) != 0) {
1525 return (error);
1527 *size = bulk.sa_size;
1529 return (0);
1533 sa_size(sa_handle_t *hdl, sa_attr_type_t attr, int *size)
1535 int error;
1537 mutex_enter(&hdl->sa_lock);
1538 error = sa_size_locked(hdl, attr, size);
1539 mutex_exit(&hdl->sa_lock);
1541 return (error);
1544 #ifdef _KERNEL
1546 sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, zfs_uio_t *uio)
1548 int error;
1549 sa_bulk_attr_t bulk;
1551 bulk.sa_data = NULL;
1552 bulk.sa_attr = attr;
1553 bulk.sa_data_func = NULL;
1555 ASSERT(hdl);
1557 mutex_enter(&hdl->sa_lock);
1558 if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) {
1559 error = zfs_uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size,
1560 zfs_uio_resid(uio)), UIO_READ, uio);
1562 mutex_exit(&hdl->sa_lock);
1563 return (error);
1567 * For the existed object that is upgraded from old system, its ondisk layout
1568 * has no slot for the project ID attribute. But quota accounting logic needs
1569 * to access related slots by offset directly. So we need to adjust these old
1570 * objects' layout to make the project ID to some unified and fixed offset.
1573 sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
1575 znode_t *zp = sa_get_userdata(hdl);
1576 dmu_buf_t *db = sa_get_db(hdl);
1577 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1578 int count = 0, err = 0;
1579 sa_bulk_attr_t *bulk, *attrs;
1580 zfs_acl_locator_cb_t locate = { 0 };
1581 uint64_t uid, gid, mode, rdev, xattr = 0, parent, gen, links;
1582 uint64_t crtime[2], mtime[2], ctime[2], atime[2];
1583 zfs_acl_phys_t znode_acl = { 0 };
1584 char scanstamp[AV_SCANSTAMP_SZ];
1585 char *dxattr_obj = NULL;
1586 int dxattr_size = 0;
1588 if (zp->z_acl_cached == NULL) {
1589 zfs_acl_t *aclp;
1591 mutex_enter(&zp->z_acl_lock);
1592 err = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
1593 mutex_exit(&zp->z_acl_lock);
1594 if (err != 0 && err != ENOENT)
1595 return (err);
1598 bulk = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1599 attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
1600 mutex_enter(&hdl->sa_lock);
1601 mutex_enter(&zp->z_lock);
1603 err = sa_lookup_locked(hdl, SA_ZPL_PROJID(zfsvfs), &projid,
1604 sizeof (uint64_t));
1605 if (unlikely(err == 0))
1606 /* Someone has added project ID attr by race. */
1607 err = EEXIST;
1608 if (err != ENOENT)
1609 goto out;
1611 /* First do a bulk query of the attributes that aren't cached */
1612 if (zp->z_is_sa) {
1613 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1614 &mode, 8);
1615 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1616 &gen, 8);
1617 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1618 &uid, 8);
1619 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1620 &gid, 8);
1621 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1622 &parent, 8);
1623 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1624 &atime, 16);
1625 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1626 &mtime, 16);
1627 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1628 &ctime, 16);
1629 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1630 &crtime, 16);
1631 if (Z_ISBLK(ZTOTYPE(zp)) || Z_ISCHR(ZTOTYPE(zp)))
1632 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1633 &rdev, 8);
1634 } else {
1635 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
1636 &atime, 16);
1637 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
1638 &mtime, 16);
1639 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
1640 &ctime, 16);
1641 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1642 &crtime, 16);
1643 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
1644 &gen, 8);
1645 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
1646 &mode, 8);
1647 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
1648 &parent, 8);
1649 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL,
1650 &xattr, 8);
1651 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
1652 &rdev, 8);
1653 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1654 &uid, 8);
1655 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1656 &gid, 8);
1657 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
1658 &znode_acl, 88);
1660 err = sa_bulk_lookup_locked(hdl, bulk, count);
1661 if (err != 0)
1662 goto out;
1664 err = sa_lookup_locked(hdl, SA_ZPL_XATTR(zfsvfs), &xattr, 8);
1665 if (err != 0 && err != ENOENT)
1666 goto out;
1668 err = sa_size_locked(hdl, SA_ZPL_DXATTR(zfsvfs), &dxattr_size);
1669 if (err != 0 && err != ENOENT)
1670 goto out;
1671 if (dxattr_size != 0) {
1672 dxattr_obj = vmem_alloc(dxattr_size, KM_SLEEP);
1673 err = sa_lookup_locked(hdl, SA_ZPL_DXATTR(zfsvfs), dxattr_obj,
1674 dxattr_size);
1675 if (err != 0 && err != ENOENT)
1676 goto out;
1679 zp->z_projid = projid;
1680 zp->z_pflags |= ZFS_PROJID;
1681 links = ZTONLNK(zp);
1682 count = 0;
1683 err = 0;
1685 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
1686 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
1687 &zp->z_size, 8);
1688 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8);
1689 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
1690 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
1691 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
1692 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
1693 &zp->z_pflags, 8);
1694 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
1695 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
1696 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
1697 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
1698 &crtime, 16);
1699 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
1700 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PROJID(zfsvfs), NULL, &projid, 8);
1702 if (Z_ISBLK(ZTOTYPE(zp)) || Z_ISCHR(ZTOTYPE(zp)))
1703 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
1704 &rdev, 8);
1706 if (zp->z_acl_cached != NULL) {
1707 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
1708 &zp->z_acl_cached->z_acl_count, 8);
1709 if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
1710 zfs_acl_xform(zp, zp->z_acl_cached, CRED());
1711 locate.cb_aclp = zp->z_acl_cached;
1712 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
1713 zfs_acl_data_locator, &locate,
1714 zp->z_acl_cached->z_acl_bytes);
1717 if (xattr)
1718 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_XATTR(zfsvfs), NULL,
1719 &xattr, 8);
1721 if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
1722 memcpy(scanstamp,
1723 (caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
1724 AV_SCANSTAMP_SZ);
1725 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
1726 scanstamp, AV_SCANSTAMP_SZ);
1727 zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
1730 if (dxattr_obj) {
1731 SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DXATTR(zfsvfs),
1732 NULL, dxattr_obj, dxattr_size);
1735 VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
1736 VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
1737 if (znode_acl.z_acl_extern_obj) {
1738 VERIFY(0 == dmu_object_free(zfsvfs->z_os,
1739 znode_acl.z_acl_extern_obj, tx));
1742 zp->z_is_sa = B_TRUE;
1744 out:
1745 mutex_exit(&zp->z_lock);
1746 mutex_exit(&hdl->sa_lock);
1747 kmem_free(attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
1748 kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
1749 if (dxattr_obj)
1750 vmem_free(dxattr_obj, dxattr_size);
1751 return (err);
1753 #endif
1755 static sa_idx_tab_t *
1756 sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
1758 sa_idx_tab_t *idx_tab;
1759 sa_os_t *sa = os->os_sa;
1760 sa_lot_t *tb, search;
1761 avl_index_t loc;
1764 * Deterimine layout number. If SA node and header == 0 then
1765 * force the index table to the dummy "1" empty layout.
1767 * The layout number would only be zero for a newly created file
1768 * that has not added any attributes yet, or with crypto enabled which
1769 * doesn't write any attributes to the bonus buffer.
1772 search.lot_num = SA_LAYOUT_NUM(hdr, bonustype);
1774 tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
1776 /* Verify header size is consistent with layout information */
1777 ASSERT(tb);
1778 ASSERT((IS_SA_BONUSTYPE(bonustype) &&
1779 SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb)) || !IS_SA_BONUSTYPE(bonustype) ||
1780 (IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
1783 * See if any of the already existing TOC entries can be reused?
1786 for (idx_tab = list_head(&tb->lot_idx_tab); idx_tab;
1787 idx_tab = list_next(&tb->lot_idx_tab, idx_tab)) {
1788 boolean_t valid_idx = B_TRUE;
1789 int i;
1791 if (tb->lot_var_sizes != 0 &&
1792 idx_tab->sa_variable_lengths != NULL) {
1793 for (i = 0; i != tb->lot_var_sizes; i++) {
1794 if (hdr->sa_lengths[i] !=
1795 idx_tab->sa_variable_lengths[i]) {
1796 valid_idx = B_FALSE;
1797 break;
1801 if (valid_idx) {
1802 sa_idx_tab_hold(os, idx_tab);
1803 return (idx_tab);
1807 /* No such luck, create a new entry */
1808 idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_SLEEP);
1809 idx_tab->sa_idx_tab =
1810 kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
1811 idx_tab->sa_layout = tb;
1812 zfs_refcount_create(&idx_tab->sa_refcount);
1813 if (tb->lot_var_sizes)
1814 idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
1815 tb->lot_var_sizes, KM_SLEEP);
1817 sa_attr_iter(os, hdr, bonustype, sa_build_idx_tab,
1818 tb, idx_tab);
1819 sa_idx_tab_hold(os, idx_tab); /* one hold for consumer */
1820 sa_idx_tab_hold(os, idx_tab); /* one for layout */
1821 list_insert_tail(&tb->lot_idx_tab, idx_tab);
1822 return (idx_tab);
1825 void
1826 sa_default_locator(void **dataptr, uint32_t *len, uint32_t total_len,
1827 boolean_t start, void *userdata)
1829 ASSERT(start);
1831 *dataptr = userdata;
1832 *len = total_len;
1835 static void
1836 sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
1838 uint64_t attr_value = 0;
1839 sa_os_t *sa = hdl->sa_os->os_sa;
1840 sa_attr_table_t *tb = sa->sa_attr_table;
1841 int i;
1843 mutex_enter(&sa->sa_lock);
1845 if (!sa->sa_need_attr_registration || sa->sa_master_obj == 0) {
1846 mutex_exit(&sa->sa_lock);
1847 return;
1850 if (sa->sa_reg_attr_obj == 0) {
1851 sa->sa_reg_attr_obj = zap_create_link(hdl->sa_os,
1852 DMU_OT_SA_ATTR_REGISTRATION,
1853 sa->sa_master_obj, SA_REGISTRY, tx);
1855 for (i = 0; i != sa->sa_num_attrs; i++) {
1856 if (sa->sa_attr_table[i].sa_registered)
1857 continue;
1858 ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
1859 tb[i].sa_byteswap);
1860 VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
1861 tb[i].sa_name, 8, 1, &attr_value, tx));
1862 tb[i].sa_registered = B_TRUE;
1864 sa->sa_need_attr_registration = B_FALSE;
1865 mutex_exit(&sa->sa_lock);
1869 * Replace all attributes with attributes specified in template.
1870 * If dnode had a spill buffer then those attributes will be
1871 * also be replaced, possibly with just an empty spill block
1873 * This interface is intended to only be used for bulk adding of
1874 * attributes for a new file. It will also be used by the ZPL
1875 * when converting and old formatted znode to native SA support.
1878 sa_replace_all_by_template_locked(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1879 int attr_count, dmu_tx_t *tx)
1881 sa_os_t *sa = hdl->sa_os->os_sa;
1883 if (sa->sa_need_attr_registration)
1884 sa_attr_register_sync(hdl, tx);
1885 return (sa_build_layouts(hdl, attr_desc, attr_count, tx));
1889 sa_replace_all_by_template(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
1890 int attr_count, dmu_tx_t *tx)
1892 int error;
1894 mutex_enter(&hdl->sa_lock);
1895 error = sa_replace_all_by_template_locked(hdl, attr_desc,
1896 attr_count, tx);
1897 mutex_exit(&hdl->sa_lock);
1898 return (error);
1902 * Add/remove a single attribute or replace a variable-sized attribute value
1903 * with a value of a different size, and then rewrite the entire set
1904 * of attributes.
1905 * Same-length attribute value replacement (including fixed-length attributes)
1906 * is handled more efficiently by the upper layers.
1908 static int
1909 sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
1910 sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
1911 uint16_t buflen, dmu_tx_t *tx)
1913 sa_os_t *sa = hdl->sa_os->os_sa;
1914 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1915 sa_bulk_attr_t *attr_desc;
1916 void *old_data[2];
1917 int bonus_attr_count = 0;
1918 int bonus_data_size = 0;
1919 int spill_data_size = 0;
1920 int spill_attr_count = 0;
1921 int error;
1922 uint16_t length, reg_length;
1923 int i, j, k, length_idx;
1924 sa_hdr_phys_t *hdr;
1925 sa_idx_tab_t *idx_tab;
1926 int attr_count;
1927 int count;
1929 ASSERT(MUTEX_HELD(&hdl->sa_lock));
1931 /* First make of copy of the old data */
1933 DB_DNODE_ENTER(db);
1934 if (DB_DNODE(db)->dn_bonuslen != 0) {
1935 bonus_data_size = hdl->sa_bonus->db_size;
1936 old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
1937 memcpy(old_data[0], hdl->sa_bonus->db_data,
1938 hdl->sa_bonus->db_size);
1939 bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
1940 } else {
1941 old_data[0] = NULL;
1943 DB_DNODE_EXIT(db);
1945 /* Bring spill buffer online if it isn't currently */
1947 if ((error = sa_get_spill(hdl)) == 0) {
1948 spill_data_size = hdl->sa_spill->db_size;
1949 old_data[1] = vmem_alloc(spill_data_size, KM_SLEEP);
1950 memcpy(old_data[1], hdl->sa_spill->db_data,
1951 hdl->sa_spill->db_size);
1952 spill_attr_count =
1953 hdl->sa_spill_tab->sa_layout->lot_attr_count;
1954 } else if (error && error != ENOENT) {
1955 if (old_data[0])
1956 kmem_free(old_data[0], bonus_data_size);
1957 return (error);
1958 } else {
1959 old_data[1] = NULL;
1962 /* build descriptor of all attributes */
1964 attr_count = bonus_attr_count + spill_attr_count;
1965 if (action == SA_ADD)
1966 attr_count++;
1967 else if (action == SA_REMOVE)
1968 attr_count--;
1970 attr_desc = kmem_zalloc(sizeof (sa_bulk_attr_t) * attr_count, KM_SLEEP);
1973 * loop through bonus and spill buffer if it exists, and
1974 * build up new attr_descriptor to reset the attributes
1976 k = j = 0;
1977 count = bonus_attr_count;
1978 hdr = SA_GET_HDR(hdl, SA_BONUS);
1979 idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
1980 for (; ; k++) {
1982 * Iterate over each attribute in layout. Fetch the
1983 * size of variable-length attributes needing rewrite
1984 * from sa_lengths[].
1986 for (i = 0, length_idx = 0; i != count; i++) {
1987 sa_attr_type_t attr;
1989 attr = idx_tab->sa_layout->lot_attrs[i];
1990 reg_length = SA_REGISTERED_LEN(sa, attr);
1991 if (reg_length == 0) {
1992 length = hdr->sa_lengths[length_idx];
1993 length_idx++;
1994 } else {
1995 length = reg_length;
1997 if (attr == newattr) {
1999 * There is nothing to do for SA_REMOVE,
2000 * so it is just skipped.
2002 if (action == SA_REMOVE)
2003 continue;
2006 * Duplicate attributes are not allowed, so the
2007 * action can not be SA_ADD here.
2009 ASSERT3S(action, ==, SA_REPLACE);
2012 * Only a variable-sized attribute can be
2013 * replaced here, and its size must be changing.
2015 ASSERT3U(reg_length, ==, 0);
2016 ASSERT3U(length, !=, buflen);
2017 SA_ADD_BULK_ATTR(attr_desc, j, attr,
2018 locator, datastart, buflen);
2019 } else {
2020 SA_ADD_BULK_ATTR(attr_desc, j, attr,
2021 NULL, (void *)
2022 (TOC_OFF(idx_tab->sa_idx_tab[attr]) +
2023 (uintptr_t)old_data[k]), length);
2026 if (k == 0 && hdl->sa_spill) {
2027 hdr = SA_GET_HDR(hdl, SA_SPILL);
2028 idx_tab = SA_IDX_TAB_GET(hdl, SA_SPILL);
2029 count = spill_attr_count;
2030 } else {
2031 break;
2034 if (action == SA_ADD) {
2035 reg_length = SA_REGISTERED_LEN(sa, newattr);
2036 IMPLY(reg_length != 0, reg_length == buflen);
2037 SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
2038 datastart, buflen);
2040 ASSERT3U(j, ==, attr_count);
2042 error = sa_build_layouts(hdl, attr_desc, attr_count, tx);
2044 if (old_data[0])
2045 kmem_free(old_data[0], bonus_data_size);
2046 if (old_data[1])
2047 vmem_free(old_data[1], spill_data_size);
2048 kmem_free(attr_desc, sizeof (sa_bulk_attr_t) * attr_count);
2050 return (error);
2053 static int
2054 sa_bulk_update_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
2055 dmu_tx_t *tx)
2057 int error;
2058 sa_os_t *sa = hdl->sa_os->os_sa;
2059 dmu_object_type_t bonustype;
2060 dmu_buf_t *saved_spill;
2062 ASSERT(hdl);
2063 ASSERT(MUTEX_HELD(&hdl->sa_lock));
2065 bonustype = SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl, SA_BONUS));
2066 saved_spill = hdl->sa_spill;
2068 /* sync out registration table if necessary */
2069 if (sa->sa_need_attr_registration)
2070 sa_attr_register_sync(hdl, tx);
2072 error = sa_attr_op(hdl, bulk, count, SA_UPDATE, tx);
2073 if (error == 0 && !IS_SA_BONUSTYPE(bonustype) && sa->sa_update_cb)
2074 sa->sa_update_cb(hdl, tx);
2077 * If saved_spill is NULL and current sa_spill is not NULL that
2078 * means we increased the refcount of the spill buffer through
2079 * sa_get_spill() or dmu_spill_hold_by_dnode(). Therefore we
2080 * must release the hold before calling dmu_tx_commit() to avoid
2081 * making a copy of this buffer in dbuf_sync_leaf() due to the
2082 * reference count now being greater than 1.
2084 if (!saved_spill && hdl->sa_spill) {
2085 if (hdl->sa_spill_tab) {
2086 sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
2087 hdl->sa_spill_tab = NULL;
2090 dmu_buf_rele(hdl->sa_spill, NULL);
2091 hdl->sa_spill = NULL;
2094 return (error);
2098 * update or add new attribute
2101 sa_update(sa_handle_t *hdl, sa_attr_type_t type,
2102 void *buf, uint32_t buflen, dmu_tx_t *tx)
2104 int error;
2105 sa_bulk_attr_t bulk;
2107 VERIFY3U(buflen, <=, SA_ATTR_MAX_LEN);
2109 bulk.sa_attr = type;
2110 bulk.sa_data_func = NULL;
2111 bulk.sa_length = buflen;
2112 bulk.sa_data = buf;
2114 mutex_enter(&hdl->sa_lock);
2115 error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
2116 mutex_exit(&hdl->sa_lock);
2117 return (error);
2121 sa_bulk_lookup_locked(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2123 ASSERT(hdl);
2124 ASSERT(MUTEX_HELD(&hdl->sa_lock));
2125 return (sa_lookup_impl(hdl, attrs, count));
2129 sa_bulk_lookup(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
2131 int error;
2133 ASSERT(hdl);
2134 mutex_enter(&hdl->sa_lock);
2135 error = sa_bulk_lookup_locked(hdl, attrs, count);
2136 mutex_exit(&hdl->sa_lock);
2137 return (error);
2141 sa_bulk_update(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count, dmu_tx_t *tx)
2143 int error;
2145 ASSERT(hdl);
2146 mutex_enter(&hdl->sa_lock);
2147 error = sa_bulk_update_impl(hdl, attrs, count, tx);
2148 mutex_exit(&hdl->sa_lock);
2149 return (error);
2153 sa_remove(sa_handle_t *hdl, sa_attr_type_t attr, dmu_tx_t *tx)
2155 int error;
2157 mutex_enter(&hdl->sa_lock);
2158 error = sa_modify_attrs(hdl, attr, SA_REMOVE, NULL,
2159 NULL, 0, tx);
2160 mutex_exit(&hdl->sa_lock);
2161 return (error);
2164 void
2165 sa_object_info(sa_handle_t *hdl, dmu_object_info_t *doi)
2167 dmu_object_info_from_db(hdl->sa_bonus, doi);
2170 void
2171 sa_object_size(sa_handle_t *hdl, uint32_t *blksize, u_longlong_t *nblocks)
2173 dmu_object_size_from_db(hdl->sa_bonus,
2174 blksize, nblocks);
2177 void
2178 sa_set_userp(sa_handle_t *hdl, void *ptr)
2180 hdl->sa_userp = ptr;
2183 dmu_buf_t *
2184 sa_get_db(sa_handle_t *hdl)
2186 return (hdl->sa_bonus);
2189 void *
2190 sa_get_userdata(sa_handle_t *hdl)
2192 return (hdl->sa_userp);
2195 void
2196 sa_register_update_callback_locked(objset_t *os, sa_update_cb_t *func)
2198 ASSERT(MUTEX_HELD(&os->os_sa->sa_lock));
2199 os->os_sa->sa_update_cb = func;
2202 void
2203 sa_register_update_callback(objset_t *os, sa_update_cb_t *func)
2206 mutex_enter(&os->os_sa->sa_lock);
2207 sa_register_update_callback_locked(os, func);
2208 mutex_exit(&os->os_sa->sa_lock);
2211 uint64_t
2212 sa_handle_object(sa_handle_t *hdl)
2214 return (hdl->sa_bonus->db_object);
2217 boolean_t
2218 sa_enabled(objset_t *os)
2220 return (os->os_sa == NULL);
2224 sa_set_sa_object(objset_t *os, uint64_t sa_object)
2226 sa_os_t *sa = os->os_sa;
2228 if (sa->sa_master_obj)
2229 return (1);
2231 sa->sa_master_obj = sa_object;
2233 return (0);
2237 sa_hdrsize(void *arg)
2239 sa_hdr_phys_t *hdr = arg;
2241 return (SA_HDR_SIZE(hdr));
2244 void
2245 sa_handle_lock(sa_handle_t *hdl)
2247 ASSERT(hdl);
2248 mutex_enter(&hdl->sa_lock);
2251 void
2252 sa_handle_unlock(sa_handle_t *hdl)
2254 ASSERT(hdl);
2255 mutex_exit(&hdl->sa_lock);
2258 #ifdef _KERNEL
2259 EXPORT_SYMBOL(sa_handle_get);
2260 EXPORT_SYMBOL(sa_handle_get_from_db);
2261 EXPORT_SYMBOL(sa_handle_destroy);
2262 EXPORT_SYMBOL(sa_buf_hold);
2263 EXPORT_SYMBOL(sa_buf_rele);
2264 EXPORT_SYMBOL(sa_spill_rele);
2265 EXPORT_SYMBOL(sa_lookup);
2266 EXPORT_SYMBOL(sa_update);
2267 EXPORT_SYMBOL(sa_remove);
2268 EXPORT_SYMBOL(sa_bulk_lookup);
2269 EXPORT_SYMBOL(sa_bulk_lookup_locked);
2270 EXPORT_SYMBOL(sa_bulk_update);
2271 EXPORT_SYMBOL(sa_size);
2272 EXPORT_SYMBOL(sa_object_info);
2273 EXPORT_SYMBOL(sa_object_size);
2274 EXPORT_SYMBOL(sa_get_userdata);
2275 EXPORT_SYMBOL(sa_set_userp);
2276 EXPORT_SYMBOL(sa_get_db);
2277 EXPORT_SYMBOL(sa_handle_object);
2278 EXPORT_SYMBOL(sa_register_update_callback);
2279 EXPORT_SYMBOL(sa_setup);
2280 EXPORT_SYMBOL(sa_replace_all_by_template);
2281 EXPORT_SYMBOL(sa_replace_all_by_template_locked);
2282 EXPORT_SYMBOL(sa_enabled);
2283 EXPORT_SYMBOL(sa_cache_init);
2284 EXPORT_SYMBOL(sa_cache_fini);
2285 EXPORT_SYMBOL(sa_set_sa_object);
2286 EXPORT_SYMBOL(sa_hdrsize);
2287 EXPORT_SYMBOL(sa_handle_lock);
2288 EXPORT_SYMBOL(sa_handle_unlock);
2289 EXPORT_SYMBOL(sa_lookup_uio);
2290 EXPORT_SYMBOL(sa_add_projid);
2291 #endif /* _KERNEL */