4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2023 RackTop Systems, Inc.
29 #include <sys/zfs_context.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/sysmacros.h>
34 #include <sys/dmu_impl.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dmu_tx.h>
38 #include <sys/dnode.h>
41 #include <sys/sunddi.h>
42 #include <sys/sa_impl.h>
43 #include <sys/errno.h>
44 #include <sys/zfs_context.h>
47 #include <sys/zfs_znode.h>
51 * ZFS System attributes:
53 * A generic mechanism to allow for arbitrary attributes
54 * to be stored in a dnode. The data will be stored in the bonus buffer of
55 * the dnode and if necessary a special "spill" block will be used to handle
56 * overflow situations. The spill block will be sized to fit the data
57 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
58 * spill block is stored at the end of the current bonus buffer. Any
59 * attributes that would be in the way of the blkptr_t will be relocated
60 * into the spill block.
62 * Attribute registration:
64 * Stored persistently on a per dataset basis
65 * a mapping between attribute "string" names and their actual attribute
66 * numeric values, length, and byteswap function. The names are only used
67 * during registration. All attributes are known by their unique attribute
68 * id value. If an attribute can have a variable size then the value
69 * 0 will be used to indicate this.
73 * Attribute layouts are a way to compactly store multiple attributes, but
74 * without taking the overhead associated with managing each attribute
75 * individually. Since you will typically have the same set of attributes
76 * stored in the same order a single table will be used to represent that
77 * layout. The ZPL for example will usually have only about 10 different
78 * layouts (regular files, device files, symlinks,
79 * regular files + scanstamp, files/dir with extended attributes, and then
80 * you have the possibility of all of those minus ACL, because it would
81 * be kicked out into the spill block)
83 * Layouts are simply an array of the attributes and their
84 * ordering i.e. [0, 1, 4, 5, 2]
86 * Each distinct layout is given a unique layout number and that is what's
87 * stored in the header at the beginning of the SA data buffer.
89 * A layout only covers a single dbuf (bonus or spill). If a set of
90 * attributes is split up between the bonus buffer and a spill buffer then
91 * two different layouts will be used. This allows us to byteswap the
92 * spill without looking at the bonus buffer and keeps the on disk format of
93 * the bonus and spill buffer the same.
95 * Adding a single attribute will cause the entire set of attributes to
96 * be rewritten and could result in a new layout number being constructed
97 * as part of the rewrite if no such layout exists for the new set of
98 * attributes. The new attribute will be appended to the end of the already
99 * existing attributes.
101 * Both the attribute registration and attribute layout information are
102 * stored in normal ZAP attributes. Their should be a small number of
103 * known layouts and the set of attributes is assumed to typically be quite
106 * The registered attributes and layout "table" information is maintained
107 * in core and a special "sa_os_t" is attached to the objset_t.
109 * A special interface is provided to allow for quickly applying
110 * a large set of attributes at once. sa_replace_all_by_template() is
111 * used to set an array of attributes. This is used by the ZPL when
112 * creating a brand new file. The template that is passed into the function
113 * specifies the attribute, size for variable length attributes, location of
114 * data and special "data locator" function if the data isn't in a contiguous
117 * Byteswap implications:
119 * Since the SA attributes are not entirely self describing we can't do
120 * the normal byteswap processing. The special ZAP layout attribute and
121 * attribute registration attributes define the byteswap function and the
122 * size of the attributes, unless it is variable sized.
123 * The normal ZFS byteswapping infrastructure assumes you don't need
124 * to read any objects in order to do the necessary byteswapping. Whereas
125 * SA attributes can only be properly byteswapped if the dataset is opened
126 * and the layout/attribute ZAP attributes are available. Because of this
127 * the SA attributes will be byteswapped when they are first accessed by
128 * the SA code that will read the SA data.
131 typedef void (sa_iterfunc_t
)(void *hdr
, void *addr
, sa_attr_type_t
,
132 uint16_t length
, int length_idx
, boolean_t
, void *userp
);
134 static int sa_build_index(sa_handle_t
*hdl
, sa_buf_type_t buftype
);
135 static void sa_idx_tab_hold(objset_t
*os
, sa_idx_tab_t
*idx_tab
);
136 static sa_idx_tab_t
*sa_find_idx_tab(objset_t
*os
, dmu_object_type_t bonustype
,
138 static void sa_idx_tab_rele(objset_t
*os
, void *arg
);
139 static void sa_copy_data(sa_data_locator_t
*func
, void *start
, void *target
,
141 static int sa_modify_attrs(sa_handle_t
*hdl
, sa_attr_type_t newattr
,
142 sa_data_op_t action
, sa_data_locator_t
*locator
, void *datastart
,
143 uint16_t buflen
, dmu_tx_t
*tx
);
145 static arc_byteswap_func_t sa_bswap_table
[] = {
146 byteswap_uint64_array
,
147 byteswap_uint32_array
,
148 byteswap_uint16_array
,
149 byteswap_uint8_array
,
153 #ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
154 #define SA_COPY_DATA(f, s, t, l) \
158 *(uint64_t *)t = *(uint64_t *)s; \
159 } else if (l == 16) { \
160 *(uint64_t *)t = *(uint64_t *)s; \
161 *(uint64_t *)((uintptr_t)t + 8) = \
162 *(uint64_t *)((uintptr_t)s + 8); \
167 sa_copy_data(f, s, t, l); \
171 #define SA_COPY_DATA(f, s, t, l) sa_copy_data(f, s, t, l)
175 * This table is fixed and cannot be changed. Its purpose is to
176 * allow the SA code to work with both old/new ZPL file systems.
177 * It contains the list of legacy attributes. These attributes aren't
178 * stored in the "attribute" registry zap objects, since older ZPL file systems
179 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
180 * use this static table.
182 static const sa_attr_reg_t sa_legacy_attrs
[] = {
183 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 0},
184 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 1},
185 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 2},
186 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 3},
187 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY
, 4},
188 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY
, 5},
189 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY
, 6},
190 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY
, 7},
191 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY
, 8},
192 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY
, 9},
193 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY
, 10},
194 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY
, 11},
195 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY
, 12},
196 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY
, 13},
197 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY
, 14},
198 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY
, 15},
202 * This is only used for objects of type DMU_OT_ZNODE
204 static const sa_attr_type_t sa_legacy_zpl_layout
[] = {
205 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
209 * Special dummy layout used for buffers with no attributes.
211 static const sa_attr_type_t sa_dummy_zpl_layout
[] = { 0 };
213 static const size_t sa_legacy_attr_count
= ARRAY_SIZE(sa_legacy_attrs
);
214 static kmem_cache_t
*sa_cache
= NULL
;
217 sa_cache_constructor(void *buf
, void *unused
, int kmflag
)
219 (void) unused
, (void) kmflag
;
220 sa_handle_t
*hdl
= buf
;
222 mutex_init(&hdl
->sa_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
227 sa_cache_destructor(void *buf
, void *unused
)
230 sa_handle_t
*hdl
= buf
;
231 mutex_destroy(&hdl
->sa_lock
);
237 sa_cache
= kmem_cache_create("sa_cache",
238 sizeof (sa_handle_t
), 0, sa_cache_constructor
,
239 sa_cache_destructor
, NULL
, NULL
, NULL
, KMC_RECLAIMABLE
);
246 kmem_cache_destroy(sa_cache
);
250 layout_num_compare(const void *arg1
, const void *arg2
)
252 const sa_lot_t
*node1
= (const sa_lot_t
*)arg1
;
253 const sa_lot_t
*node2
= (const sa_lot_t
*)arg2
;
255 return (TREE_CMP(node1
->lot_num
, node2
->lot_num
));
259 layout_hash_compare(const void *arg1
, const void *arg2
)
261 const sa_lot_t
*node1
= (const sa_lot_t
*)arg1
;
262 const sa_lot_t
*node2
= (const sa_lot_t
*)arg2
;
264 int cmp
= TREE_CMP(node1
->lot_hash
, node2
->lot_hash
);
268 return (TREE_CMP(node1
->lot_instance
, node2
->lot_instance
));
272 sa_layout_equal(sa_lot_t
*tbf
, sa_attr_type_t
*attrs
, int count
)
276 if (count
!= tbf
->lot_attr_count
)
279 for (i
= 0; i
!= count
; i
++) {
280 if (attrs
[i
] != tbf
->lot_attrs
[i
])
286 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
289 sa_layout_info_hash(const sa_attr_type_t
*attrs
, int attr_count
)
291 uint64_t crc
= -1ULL;
293 for (int i
= 0; i
!= attr_count
; i
++)
294 crc
^= SA_ATTR_HASH(attrs
[i
]);
300 sa_get_spill(sa_handle_t
*hdl
)
303 if (hdl
->sa_spill
== NULL
) {
304 if ((rc
= dmu_spill_hold_existing(hdl
->sa_bonus
, NULL
,
305 &hdl
->sa_spill
)) == 0)
306 VERIFY(0 == sa_build_index(hdl
, SA_SPILL
));
315 * Main attribute lookup/update function
316 * returns 0 for success or non zero for failures
318 * Operates on bulk array, first failure will abort further processing
321 sa_attr_op(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
,
322 sa_data_op_t data_op
, dmu_tx_t
*tx
)
324 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
327 sa_buf_type_t buftypes
;
332 for (i
= 0; i
!= count
; i
++) {
333 ASSERT(bulk
[i
].sa_attr
<= hdl
->sa_os
->os_sa
->sa_num_attrs
);
335 bulk
[i
].sa_addr
= NULL
;
336 /* First check the bonus buffer */
338 if (hdl
->sa_bonus_tab
&& TOC_ATTR_PRESENT(
339 hdl
->sa_bonus_tab
->sa_idx_tab
[bulk
[i
].sa_attr
])) {
340 SA_ATTR_INFO(sa
, hdl
->sa_bonus_tab
,
341 SA_GET_HDR(hdl
, SA_BONUS
),
342 bulk
[i
].sa_attr
, bulk
[i
], SA_BONUS
, hdl
);
343 if (tx
&& !(buftypes
& SA_BONUS
)) {
344 dmu_buf_will_dirty(hdl
->sa_bonus
, tx
);
345 buftypes
|= SA_BONUS
;
348 if (bulk
[i
].sa_addr
== NULL
&&
349 ((error
= sa_get_spill(hdl
)) == 0)) {
350 if (TOC_ATTR_PRESENT(
351 hdl
->sa_spill_tab
->sa_idx_tab
[bulk
[i
].sa_attr
])) {
352 SA_ATTR_INFO(sa
, hdl
->sa_spill_tab
,
353 SA_GET_HDR(hdl
, SA_SPILL
),
354 bulk
[i
].sa_attr
, bulk
[i
], SA_SPILL
, hdl
);
355 if (tx
&& !(buftypes
& SA_SPILL
) &&
356 bulk
[i
].sa_size
== bulk
[i
].sa_length
) {
357 dmu_buf_will_dirty(hdl
->sa_spill
, tx
);
358 buftypes
|= SA_SPILL
;
362 if (error
&& error
!= ENOENT
) {
363 return ((error
== ECKSUM
) ? EIO
: error
);
368 if (bulk
[i
].sa_addr
== NULL
)
369 return (SET_ERROR(ENOENT
));
370 if (bulk
[i
].sa_data
) {
371 SA_COPY_DATA(bulk
[i
].sa_data_func
,
372 bulk
[i
].sa_addr
, bulk
[i
].sa_data
,
373 MIN(bulk
[i
].sa_size
, bulk
[i
].sa_length
));
378 /* existing rewrite of attr */
379 if (bulk
[i
].sa_addr
&&
380 bulk
[i
].sa_size
== bulk
[i
].sa_length
) {
381 SA_COPY_DATA(bulk
[i
].sa_data_func
,
382 bulk
[i
].sa_data
, bulk
[i
].sa_addr
,
385 } else if (bulk
[i
].sa_addr
) { /* attr size change */
386 error
= sa_modify_attrs(hdl
, bulk
[i
].sa_attr
,
387 SA_REPLACE
, bulk
[i
].sa_data_func
,
388 bulk
[i
].sa_data
, bulk
[i
].sa_length
, tx
);
389 } else { /* adding new attribute */
390 error
= sa_modify_attrs(hdl
, bulk
[i
].sa_attr
,
391 SA_ADD
, bulk
[i
].sa_data_func
,
392 bulk
[i
].sa_data
, bulk
[i
].sa_length
, tx
);
405 sa_add_layout_entry(objset_t
*os
, const sa_attr_type_t
*attrs
, int attr_count
,
406 uint64_t lot_num
, uint64_t hash
, boolean_t zapadd
, dmu_tx_t
*tx
)
408 sa_os_t
*sa
= os
->os_sa
;
409 sa_lot_t
*tb
, *findtb
;
413 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
414 tb
= kmem_zalloc(sizeof (sa_lot_t
), KM_SLEEP
);
415 tb
->lot_attr_count
= attr_count
;
416 tb
->lot_attrs
= kmem_alloc(sizeof (sa_attr_type_t
) * attr_count
,
418 memcpy(tb
->lot_attrs
, attrs
, sizeof (sa_attr_type_t
) * attr_count
);
419 tb
->lot_num
= lot_num
;
421 tb
->lot_instance
= 0;
426 if (sa
->sa_layout_attr_obj
== 0) {
427 sa
->sa_layout_attr_obj
= zap_create_link(os
,
428 DMU_OT_SA_ATTR_LAYOUTS
,
429 sa
->sa_master_obj
, SA_LAYOUTS
, tx
);
432 (void) snprintf(attr_name
, sizeof (attr_name
),
434 VERIFY(0 == zap_update(os
, os
->os_sa
->sa_layout_attr_obj
,
435 attr_name
, 2, attr_count
, attrs
, tx
));
438 list_create(&tb
->lot_idx_tab
, sizeof (sa_idx_tab_t
),
439 offsetof(sa_idx_tab_t
, sa_next
));
441 for (i
= 0; i
!= attr_count
; i
++) {
442 if (sa
->sa_attr_table
[tb
->lot_attrs
[i
]].sa_length
== 0)
446 avl_add(&sa
->sa_layout_num_tree
, tb
);
448 /* verify we don't have a hash collision */
449 if ((findtb
= avl_find(&sa
->sa_layout_hash_tree
, tb
, &loc
)) != NULL
) {
450 for (; findtb
&& findtb
->lot_hash
== hash
;
451 findtb
= AVL_NEXT(&sa
->sa_layout_hash_tree
, findtb
)) {
452 if (findtb
->lot_instance
!= tb
->lot_instance
)
457 avl_add(&sa
->sa_layout_hash_tree
, tb
);
462 sa_find_layout(objset_t
*os
, uint64_t hash
, sa_attr_type_t
*attrs
,
463 int count
, dmu_tx_t
*tx
, sa_lot_t
**lot
)
465 sa_lot_t
*tb
, tbsearch
;
467 sa_os_t
*sa
= os
->os_sa
;
468 boolean_t found
= B_FALSE
;
470 mutex_enter(&sa
->sa_lock
);
471 tbsearch
.lot_hash
= hash
;
472 tbsearch
.lot_instance
= 0;
473 tb
= avl_find(&sa
->sa_layout_hash_tree
, &tbsearch
, &loc
);
475 for (; tb
&& tb
->lot_hash
== hash
;
476 tb
= AVL_NEXT(&sa
->sa_layout_hash_tree
, tb
)) {
477 if (sa_layout_equal(tb
, attrs
, count
) == 0) {
484 tb
= sa_add_layout_entry(os
, attrs
, count
,
485 avl_numnodes(&sa
->sa_layout_num_tree
), hash
, B_TRUE
, tx
);
487 mutex_exit(&sa
->sa_lock
);
492 sa_resize_spill(sa_handle_t
*hdl
, uint32_t size
, dmu_tx_t
*tx
)
498 blocksize
= SPA_MINBLOCKSIZE
;
499 } else if (size
> SPA_OLD_MAXBLOCKSIZE
) {
501 return (SET_ERROR(EFBIG
));
503 blocksize
= P2ROUNDUP_TYPED(size
, SPA_MINBLOCKSIZE
, uint32_t);
506 error
= dbuf_spill_set_blksz(hdl
->sa_spill
, blocksize
, tx
);
512 sa_copy_data(sa_data_locator_t
*func
, void *datastart
, void *target
, int buflen
)
515 memcpy(target
, datastart
, buflen
);
520 void *saptr
= target
;
525 while (bytes
< buflen
) {
526 func(&dataptr
, &length
, buflen
, start
, datastart
);
527 memcpy(saptr
, dataptr
, length
);
528 saptr
= (void *)((caddr_t
)saptr
+ length
);
536 * Determine several different values pertaining to system attribute
539 * Return the size of the sa_hdr_phys_t header for the buffer. Each
540 * variable length attribute except the first contributes two bytes to
541 * the header size, which is then rounded up to an 8-byte boundary.
543 * The following output parameters are also computed.
545 * index - The index of the first attribute in attr_desc that will
546 * spill over. Only valid if will_spill is set.
548 * total - The total number of bytes of all system attributes described
551 * will_spill - Set when spilling is necessary. It is only set when
552 * the buftype is SA_BONUS.
555 sa_find_sizes(sa_os_t
*sa
, sa_bulk_attr_t
*attr_desc
, int attr_count
,
556 dmu_buf_t
*db
, sa_buf_type_t buftype
, int full_space
, int *index
,
557 int *total
, boolean_t
*will_spill
)
559 int var_size_count
= 0;
564 if (buftype
== SA_BONUS
&& sa
->sa_force_spill
) {
567 *will_spill
= B_TRUE
;
573 *will_spill
= B_FALSE
;
576 hdrsize
= (SA_BONUSTYPE_FROM_DB(db
) == DMU_OT_ZNODE
) ? 0 :
577 sizeof (sa_hdr_phys_t
);
579 ASSERT(IS_P2ALIGNED(full_space
, 8));
581 for (i
= 0; i
!= attr_count
; i
++) {
582 boolean_t is_var_sz
, might_spill_here
;
585 *total
= P2ROUNDUP(*total
, 8);
586 *total
+= attr_desc
[i
].sa_length
;
590 is_var_sz
= (SA_REGISTERED_LEN(sa
, attr_desc
[i
].sa_attr
) == 0);
595 * Calculate what the SA header size would be if this
596 * attribute doesn't spill.
598 tmp_hdrsize
= hdrsize
+ ((is_var_sz
&& var_size_count
> 1) ?
599 sizeof (uint16_t) : 0);
602 * Check whether this attribute spans into the space
603 * that would be used by the spill block pointer should
604 * a spill block be needed.
607 buftype
== SA_BONUS
&& *index
== -1 &&
608 (*total
+ P2ROUNDUP(tmp_hdrsize
, 8)) >
609 (full_space
- sizeof (blkptr_t
));
611 if (is_var_sz
&& var_size_count
> 1) {
612 if (buftype
== SA_SPILL
||
613 tmp_hdrsize
+ *total
< full_space
) {
615 * Record the extra header size in case this
616 * increase needs to be reversed due to
619 hdrsize
= tmp_hdrsize
;
620 if (*index
!= -1 || might_spill_here
)
621 extra_hdrsize
+= sizeof (uint16_t);
623 ASSERT(buftype
== SA_BONUS
);
626 *will_spill
= B_TRUE
;
632 * Store index of where spill *could* occur. Then
633 * continue to count the remaining attribute sizes. The
634 * sum is used later for sizing bonus and spill buffer.
636 if (might_spill_here
)
639 if ((*total
+ P2ROUNDUP(hdrsize
, 8)) > full_space
&&
641 *will_spill
= B_TRUE
;
645 hdrsize
-= extra_hdrsize
;
647 hdrsize
= P2ROUNDUP(hdrsize
, 8);
651 #define BUF_SPACE_NEEDED(total, header) (total + header)
654 * Find layout that corresponds to ordering of attributes
655 * If not found a new layout number is created and added to
656 * persistent layout tables.
659 sa_build_layouts(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
, int attr_count
,
662 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
664 sa_buf_type_t buftype
;
665 sa_hdr_phys_t
*sahdr
;
667 sa_attr_type_t
*attrs
, *attrs_start
;
672 int spillhdrsize
= 0;
674 dmu_object_type_t bonustype
;
681 dmu_buf_will_dirty(hdl
->sa_bonus
, tx
);
682 bonustype
= SA_BONUSTYPE_FROM_DB(hdl
->sa_bonus
);
683 dmu_object_dnsize_from_db(hdl
->sa_bonus
, &dnodesize
);
684 bonuslen
= DN_BONUS_SIZE(dnodesize
);
686 /* first determine bonus header size and sum of all attributes */
687 hdrsize
= sa_find_sizes(sa
, attr_desc
, attr_count
, hdl
->sa_bonus
,
688 SA_BONUS
, bonuslen
, &spill_idx
, &used
, &spilling
);
690 if (used
> SPA_OLD_MAXBLOCKSIZE
)
691 return (SET_ERROR(EFBIG
));
693 VERIFY0(dmu_set_bonus(hdl
->sa_bonus
, spilling
?
694 MIN(bonuslen
- sizeof (blkptr_t
), used
+ hdrsize
) :
695 used
+ hdrsize
, tx
));
697 ASSERT((bonustype
== DMU_OT_ZNODE
&& spilling
== 0) ||
698 bonustype
== DMU_OT_SA
);
700 /* setup and size spill buffer when needed */
704 if (hdl
->sa_spill
== NULL
) {
705 VERIFY(dmu_spill_hold_by_bonus(hdl
->sa_bonus
, 0, NULL
,
706 &hdl
->sa_spill
) == 0);
708 dmu_buf_will_dirty(hdl
->sa_spill
, tx
);
710 spillhdrsize
= sa_find_sizes(sa
, &attr_desc
[spill_idx
],
711 attr_count
- spill_idx
, hdl
->sa_spill
, SA_SPILL
,
712 hdl
->sa_spill
->db_size
, &i
, &spill_used
, &dummy
);
714 if (spill_used
> SPA_OLD_MAXBLOCKSIZE
)
715 return (SET_ERROR(EFBIG
));
717 if (BUF_SPACE_NEEDED(spill_used
, spillhdrsize
) >
718 hdl
->sa_spill
->db_size
)
719 VERIFY(0 == sa_resize_spill(hdl
,
720 BUF_SPACE_NEEDED(spill_used
, spillhdrsize
), tx
));
723 /* setup starting pointers to lay down data */
724 data_start
= (void *)((uintptr_t)hdl
->sa_bonus
->db_data
+ hdrsize
);
725 sahdr
= (sa_hdr_phys_t
*)hdl
->sa_bonus
->db_data
;
728 attrs_start
= attrs
= kmem_alloc(sizeof (sa_attr_type_t
) * attr_count
,
732 for (i
= 0, len_idx
= 0, hash
= -1ULL; i
!= attr_count
; i
++) {
735 ASSERT(IS_P2ALIGNED(data_start
, 8));
736 attrs
[i
] = attr_desc
[i
].sa_attr
;
737 length
= SA_REGISTERED_LEN(sa
, attrs
[i
]);
739 length
= attr_desc
[i
].sa_length
;
741 if (spilling
&& i
== spill_idx
) { /* switch to spill buffer */
742 VERIFY(bonustype
== DMU_OT_SA
);
743 if (buftype
== SA_BONUS
&& !sa
->sa_force_spill
) {
744 sa_find_layout(hdl
->sa_os
, hash
, attrs_start
,
745 lot_count
, tx
, &lot
);
746 SA_SET_HDR(sahdr
, lot
->lot_num
, hdrsize
);
753 sahdr
= (sa_hdr_phys_t
*)hdl
->sa_spill
->db_data
;
754 sahdr
->sa_magic
= SA_MAGIC
;
755 data_start
= (void *)((uintptr_t)sahdr
+
757 attrs_start
= &attrs
[i
];
760 hash
^= SA_ATTR_HASH(attrs
[i
]);
761 attr_desc
[i
].sa_addr
= data_start
;
762 attr_desc
[i
].sa_size
= length
;
763 SA_COPY_DATA(attr_desc
[i
].sa_data_func
, attr_desc
[i
].sa_data
,
765 if (sa
->sa_attr_table
[attrs
[i
]].sa_length
== 0) {
766 sahdr
->sa_lengths
[len_idx
++] = length
;
768 data_start
= (void *)P2ROUNDUP(((uintptr_t)data_start
+
773 sa_find_layout(hdl
->sa_os
, hash
, attrs_start
, lot_count
, tx
, &lot
);
776 * Verify that old znodes always have layout number 0.
777 * Must be DMU_OT_SA for arbitrary layouts
779 VERIFY((bonustype
== DMU_OT_ZNODE
&& lot
->lot_num
== 0) ||
780 (bonustype
== DMU_OT_SA
&& lot
->lot_num
> 1));
782 if (bonustype
== DMU_OT_SA
) {
783 SA_SET_HDR(sahdr
, lot
->lot_num
,
784 buftype
== SA_BONUS
? hdrsize
: spillhdrsize
);
787 kmem_free(attrs
, sizeof (sa_attr_type_t
) * attr_count
);
788 if (hdl
->sa_bonus_tab
) {
789 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_bonus_tab
);
790 hdl
->sa_bonus_tab
= NULL
;
792 if (!sa
->sa_force_spill
)
793 VERIFY(0 == sa_build_index(hdl
, SA_BONUS
));
795 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
798 * remove spill block that is no longer needed.
800 dmu_buf_rele(hdl
->sa_spill
, NULL
);
801 hdl
->sa_spill
= NULL
;
802 hdl
->sa_spill_tab
= NULL
;
803 VERIFY(0 == dmu_rm_spill(hdl
->sa_os
,
804 sa_handle_object(hdl
), tx
));
806 VERIFY(0 == sa_build_index(hdl
, SA_SPILL
));
814 sa_free_attr_table(sa_os_t
*sa
)
818 if (sa
->sa_attr_table
== NULL
)
821 for (i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
822 if (sa
->sa_attr_table
[i
].sa_name
)
823 kmem_free(sa
->sa_attr_table
[i
].sa_name
,
824 strlen(sa
->sa_attr_table
[i
].sa_name
) + 1);
827 kmem_free(sa
->sa_attr_table
,
828 sizeof (sa_attr_table_t
) * sa
->sa_num_attrs
);
830 sa
->sa_attr_table
= NULL
;
834 sa_attr_table_setup(objset_t
*os
, const sa_attr_reg_t
*reg_attrs
, int count
)
836 sa_os_t
*sa
= os
->os_sa
;
837 uint64_t sa_attr_count
= 0;
838 uint64_t sa_reg_count
= 0;
844 int registered_count
= 0;
846 dmu_objset_type_t ostype
= dmu_objset_type(os
);
849 kmem_zalloc(count
* sizeof (sa_attr_type_t
), KM_SLEEP
);
850 sa
->sa_user_table_sz
= count
* sizeof (sa_attr_type_t
);
852 if (sa
->sa_reg_attr_obj
!= 0) {
853 error
= zap_count(os
, sa
->sa_reg_attr_obj
,
857 * Make sure we retrieved a count and that it isn't zero
859 if (error
|| (error
== 0 && sa_attr_count
== 0)) {
861 error
= SET_ERROR(EINVAL
);
864 sa_reg_count
= sa_attr_count
;
867 if (ostype
== DMU_OST_ZFS
&& sa_attr_count
== 0)
868 sa_attr_count
+= sa_legacy_attr_count
;
870 /* Allocate attribute numbers for attributes that aren't registered */
871 for (i
= 0; i
!= count
; i
++) {
872 boolean_t found
= B_FALSE
;
875 if (ostype
== DMU_OST_ZFS
) {
876 for (j
= 0; j
!= sa_legacy_attr_count
; j
++) {
877 if (strcmp(reg_attrs
[i
].sa_name
,
878 sa_legacy_attrs
[j
].sa_name
) == 0) {
879 sa
->sa_user_table
[i
] =
880 sa_legacy_attrs
[j
].sa_attr
;
888 if (sa
->sa_reg_attr_obj
)
889 error
= zap_lookup(os
, sa
->sa_reg_attr_obj
,
890 reg_attrs
[i
].sa_name
, 8, 1, &attr_value
);
892 error
= SET_ERROR(ENOENT
);
895 sa
->sa_user_table
[i
] = (sa_attr_type_t
)sa_attr_count
;
899 sa
->sa_user_table
[i
] = ATTR_NUM(attr_value
);
906 sa
->sa_num_attrs
= sa_attr_count
;
907 tb
= sa
->sa_attr_table
=
908 kmem_zalloc(sizeof (sa_attr_table_t
) * sa_attr_count
, KM_SLEEP
);
911 * Attribute table is constructed from requested attribute list,
912 * previously foreign registered attributes, and also the legacy
913 * ZPL set of attributes.
916 if (sa
->sa_reg_attr_obj
) {
917 for (zap_cursor_init(&zc
, os
, sa
->sa_reg_attr_obj
);
918 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
919 zap_cursor_advance(&zc
)) {
921 value
= za
.za_first_integer
;
924 tb
[ATTR_NUM(value
)].sa_attr
= ATTR_NUM(value
);
925 tb
[ATTR_NUM(value
)].sa_length
= ATTR_LENGTH(value
);
926 tb
[ATTR_NUM(value
)].sa_byteswap
= ATTR_BSWAP(value
);
927 tb
[ATTR_NUM(value
)].sa_registered
= B_TRUE
;
929 if (tb
[ATTR_NUM(value
)].sa_name
) {
932 tb
[ATTR_NUM(value
)].sa_name
=
933 kmem_zalloc(strlen(za
.za_name
) +1, KM_SLEEP
);
934 (void) strlcpy(tb
[ATTR_NUM(value
)].sa_name
, za
.za_name
,
935 strlen(za
.za_name
) +1);
937 zap_cursor_fini(&zc
);
939 * Make sure we processed the correct number of registered
942 if (registered_count
!= sa_reg_count
) {
949 if (ostype
== DMU_OST_ZFS
) {
950 for (i
= 0; i
!= sa_legacy_attr_count
; i
++) {
953 tb
[i
].sa_attr
= sa_legacy_attrs
[i
].sa_attr
;
954 tb
[i
].sa_length
= sa_legacy_attrs
[i
].sa_length
;
955 tb
[i
].sa_byteswap
= sa_legacy_attrs
[i
].sa_byteswap
;
956 tb
[i
].sa_registered
= B_FALSE
;
958 kmem_zalloc(strlen(sa_legacy_attrs
[i
].sa_name
) +1,
960 (void) strlcpy(tb
[i
].sa_name
,
961 sa_legacy_attrs
[i
].sa_name
,
962 strlen(sa_legacy_attrs
[i
].sa_name
) + 1);
966 for (i
= 0; i
!= count
; i
++) {
967 sa_attr_type_t attr_id
;
969 attr_id
= sa
->sa_user_table
[i
];
970 if (tb
[attr_id
].sa_name
)
973 tb
[attr_id
].sa_length
= reg_attrs
[i
].sa_length
;
974 tb
[attr_id
].sa_byteswap
= reg_attrs
[i
].sa_byteswap
;
975 tb
[attr_id
].sa_attr
= attr_id
;
976 tb
[attr_id
].sa_name
=
977 kmem_zalloc(strlen(reg_attrs
[i
].sa_name
) + 1, KM_SLEEP
);
978 (void) strlcpy(tb
[attr_id
].sa_name
, reg_attrs
[i
].sa_name
,
979 strlen(reg_attrs
[i
].sa_name
) + 1);
982 sa
->sa_need_attr_registration
=
983 (sa_attr_count
!= registered_count
);
987 kmem_free(sa
->sa_user_table
, count
* sizeof (sa_attr_type_t
));
988 sa
->sa_user_table
= NULL
;
989 sa_free_attr_table(sa
);
995 sa_setup(objset_t
*os
, uint64_t sa_obj
, const sa_attr_reg_t
*reg_attrs
,
996 int count
, sa_attr_type_t
**user_table
)
1001 dmu_objset_type_t ostype
= dmu_objset_type(os
);
1005 mutex_enter(&os
->os_user_ptr_lock
);
1007 mutex_enter(&os
->os_sa
->sa_lock
);
1008 mutex_exit(&os
->os_user_ptr_lock
);
1009 tb
= os
->os_sa
->sa_user_table
;
1010 mutex_exit(&os
->os_sa
->sa_lock
);
1015 sa
= kmem_zalloc(sizeof (sa_os_t
), KM_SLEEP
);
1016 mutex_init(&sa
->sa_lock
, NULL
, MUTEX_NOLOCKDEP
, NULL
);
1017 sa
->sa_master_obj
= sa_obj
;
1020 mutex_enter(&sa
->sa_lock
);
1021 mutex_exit(&os
->os_user_ptr_lock
);
1022 avl_create(&sa
->sa_layout_num_tree
, layout_num_compare
,
1023 sizeof (sa_lot_t
), offsetof(sa_lot_t
, lot_num_node
));
1024 avl_create(&sa
->sa_layout_hash_tree
, layout_hash_compare
,
1025 sizeof (sa_lot_t
), offsetof(sa_lot_t
, lot_hash_node
));
1028 error
= zap_lookup(os
, sa_obj
, SA_LAYOUTS
,
1029 8, 1, &sa
->sa_layout_attr_obj
);
1030 if (error
!= 0 && error
!= ENOENT
)
1032 error
= zap_lookup(os
, sa_obj
, SA_REGISTRY
,
1033 8, 1, &sa
->sa_reg_attr_obj
);
1034 if (error
!= 0 && error
!= ENOENT
)
1038 if ((error
= sa_attr_table_setup(os
, reg_attrs
, count
)) != 0)
1041 if (sa
->sa_layout_attr_obj
!= 0) {
1042 uint64_t layout_count
;
1044 error
= zap_count(os
, sa
->sa_layout_attr_obj
,
1048 * Layout number count should be > 0
1050 if (error
|| (error
== 0 && layout_count
== 0)) {
1052 error
= SET_ERROR(EINVAL
);
1056 for (zap_cursor_init(&zc
, os
, sa
->sa_layout_attr_obj
);
1057 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
1058 zap_cursor_advance(&zc
)) {
1059 sa_attr_type_t
*lot_attrs
;
1062 lot_attrs
= kmem_zalloc(sizeof (sa_attr_type_t
) *
1063 za
.za_num_integers
, KM_SLEEP
);
1065 if ((error
= (zap_lookup(os
, sa
->sa_layout_attr_obj
,
1066 za
.za_name
, 2, za
.za_num_integers
,
1067 lot_attrs
))) != 0) {
1068 kmem_free(lot_attrs
, sizeof (sa_attr_type_t
) *
1069 za
.za_num_integers
);
1072 VERIFY0(ddi_strtoull(za
.za_name
, NULL
, 10,
1073 (unsigned long long *)&lot_num
));
1075 (void) sa_add_layout_entry(os
, lot_attrs
,
1076 za
.za_num_integers
, lot_num
,
1077 sa_layout_info_hash(lot_attrs
,
1078 za
.za_num_integers
), B_FALSE
, NULL
);
1079 kmem_free(lot_attrs
, sizeof (sa_attr_type_t
) *
1080 za
.za_num_integers
);
1082 zap_cursor_fini(&zc
);
1085 * Make sure layout count matches number of entries added
1088 if (avl_numnodes(&sa
->sa_layout_num_tree
) != layout_count
) {
1094 /* Add special layout number for old ZNODES */
1095 if (ostype
== DMU_OST_ZFS
) {
1096 (void) sa_add_layout_entry(os
, sa_legacy_zpl_layout
,
1097 sa_legacy_attr_count
, 0,
1098 sa_layout_info_hash(sa_legacy_zpl_layout
,
1099 sa_legacy_attr_count
), B_FALSE
, NULL
);
1101 (void) sa_add_layout_entry(os
, sa_dummy_zpl_layout
, 0, 1,
1104 *user_table
= os
->os_sa
->sa_user_table
;
1105 mutex_exit(&sa
->sa_lock
);
1109 sa_free_attr_table(sa
);
1110 if (sa
->sa_user_table
)
1111 kmem_free(sa
->sa_user_table
, sa
->sa_user_table_sz
);
1112 mutex_exit(&sa
->sa_lock
);
1113 avl_destroy(&sa
->sa_layout_hash_tree
);
1114 avl_destroy(&sa
->sa_layout_num_tree
);
1115 mutex_destroy(&sa
->sa_lock
);
1116 kmem_free(sa
, sizeof (sa_os_t
));
1117 return ((error
== ECKSUM
) ? EIO
: error
);
1121 sa_tear_down(objset_t
*os
)
1123 sa_os_t
*sa
= os
->os_sa
;
1127 kmem_free(sa
->sa_user_table
, sa
->sa_user_table_sz
);
1129 /* Free up attr table */
1131 sa_free_attr_table(sa
);
1135 avl_destroy_nodes(&sa
->sa_layout_hash_tree
, &cookie
))) {
1137 while ((tab
= list_head(&layout
->lot_idx_tab
))) {
1138 ASSERT(zfs_refcount_count(&tab
->sa_refcount
));
1139 sa_idx_tab_rele(os
, tab
);
1144 while ((layout
= avl_destroy_nodes(&sa
->sa_layout_num_tree
, &cookie
))) {
1145 kmem_free(layout
->lot_attrs
,
1146 sizeof (sa_attr_type_t
) * layout
->lot_attr_count
);
1147 kmem_free(layout
, sizeof (sa_lot_t
));
1150 avl_destroy(&sa
->sa_layout_hash_tree
);
1151 avl_destroy(&sa
->sa_layout_num_tree
);
1152 mutex_destroy(&sa
->sa_lock
);
1154 kmem_free(sa
, sizeof (sa_os_t
));
1159 sa_build_idx_tab(void *hdr
, void *attr_addr
, sa_attr_type_t attr
,
1160 uint16_t length
, int length_idx
, boolean_t var_length
, void *userp
)
1162 sa_idx_tab_t
*idx_tab
= userp
;
1165 ASSERT(idx_tab
->sa_variable_lengths
);
1166 idx_tab
->sa_variable_lengths
[length_idx
] = length
;
1168 TOC_ATTR_ENCODE(idx_tab
->sa_idx_tab
[attr
], length_idx
,
1169 (uint32_t)((uintptr_t)attr_addr
- (uintptr_t)hdr
));
1173 sa_attr_iter(objset_t
*os
, sa_hdr_phys_t
*hdr
, dmu_object_type_t type
,
1174 sa_iterfunc_t func
, sa_lot_t
*tab
, void *userp
)
1180 sa_os_t
*sa
= os
->os_sa
;
1182 uint16_t *length_start
= NULL
;
1183 uint8_t length_idx
= 0;
1186 search
.lot_num
= SA_LAYOUT_NUM(hdr
, type
);
1187 tb
= avl_find(&sa
->sa_layout_num_tree
, &search
, &loc
);
1191 if (IS_SA_BONUSTYPE(type
)) {
1192 data_start
= (void *)P2ROUNDUP(((uintptr_t)hdr
+
1193 offsetof(sa_hdr_phys_t
, sa_lengths
) +
1194 (sizeof (uint16_t) * tb
->lot_var_sizes
)), 8);
1195 length_start
= hdr
->sa_lengths
;
1200 for (i
= 0; i
!= tb
->lot_attr_count
; i
++) {
1201 int attr_length
, reg_length
;
1204 reg_length
= sa
->sa_attr_table
[tb
->lot_attrs
[i
]].sa_length
;
1205 IMPLY(reg_length
== 0, IS_SA_BONUSTYPE(type
));
1207 attr_length
= reg_length
;
1210 attr_length
= length_start
[length_idx
];
1211 idx_len
= length_idx
++;
1214 func(hdr
, data_start
, tb
->lot_attrs
[i
], attr_length
,
1215 idx_len
, reg_length
== 0 ? B_TRUE
: B_FALSE
, userp
);
1217 data_start
= (void *)P2ROUNDUP(((uintptr_t)data_start
+
1223 sa_byteswap_cb(void *hdr
, void *attr_addr
, sa_attr_type_t attr
,
1224 uint16_t length
, int length_idx
, boolean_t variable_length
, void *userp
)
1226 (void) hdr
, (void) length_idx
, (void) variable_length
;
1227 sa_handle_t
*hdl
= userp
;
1228 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1230 sa_bswap_table
[sa
->sa_attr_table
[attr
].sa_byteswap
](attr_addr
, length
);
1234 sa_byteswap(sa_handle_t
*hdl
, sa_buf_type_t buftype
)
1236 sa_hdr_phys_t
*sa_hdr_phys
= SA_GET_HDR(hdl
, buftype
);
1238 int num_lengths
= 1;
1240 sa_os_t
*sa __maybe_unused
= hdl
->sa_os
->os_sa
;
1242 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
1243 if (sa_hdr_phys
->sa_magic
== SA_MAGIC
)
1246 db
= SA_GET_DB(hdl
, buftype
);
1248 if (buftype
== SA_SPILL
) {
1249 arc_release(db
->db_buf
, NULL
);
1250 arc_buf_thaw(db
->db_buf
);
1253 sa_hdr_phys
->sa_magic
= BSWAP_32(sa_hdr_phys
->sa_magic
);
1254 sa_hdr_phys
->sa_layout_info
= BSWAP_16(sa_hdr_phys
->sa_layout_info
);
1257 * Determine number of variable lengths in header
1258 * The standard 8 byte header has one for free and a
1259 * 16 byte header would have 4 + 1;
1261 if (SA_HDR_SIZE(sa_hdr_phys
) > 8)
1262 num_lengths
+= (SA_HDR_SIZE(sa_hdr_phys
) - 8) >> 1;
1263 for (i
= 0; i
!= num_lengths
; i
++)
1264 sa_hdr_phys
->sa_lengths
[i
] =
1265 BSWAP_16(sa_hdr_phys
->sa_lengths
[i
]);
1267 sa_attr_iter(hdl
->sa_os
, sa_hdr_phys
, DMU_OT_SA
,
1268 sa_byteswap_cb
, NULL
, hdl
);
1270 if (buftype
== SA_SPILL
)
1271 arc_buf_freeze(((dmu_buf_impl_t
*)hdl
->sa_spill
)->db_buf
);
1275 sa_build_index(sa_handle_t
*hdl
, sa_buf_type_t buftype
)
1277 sa_hdr_phys_t
*sa_hdr_phys
;
1278 dmu_buf_impl_t
*db
= SA_GET_DB(hdl
, buftype
);
1279 dmu_object_type_t bonustype
= SA_BONUSTYPE_FROM_DB(db
);
1280 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1281 sa_idx_tab_t
*idx_tab
;
1283 sa_hdr_phys
= SA_GET_HDR(hdl
, buftype
);
1285 mutex_enter(&sa
->sa_lock
);
1287 /* Do we need to byteswap? */
1289 /* only check if not old znode */
1290 if (IS_SA_BONUSTYPE(bonustype
) && sa_hdr_phys
->sa_magic
!= SA_MAGIC
&&
1291 sa_hdr_phys
->sa_magic
!= 0) {
1292 if (BSWAP_32(sa_hdr_phys
->sa_magic
) != SA_MAGIC
) {
1293 mutex_exit(&sa
->sa_lock
);
1294 zfs_dbgmsg("Buffer Header: %x != SA_MAGIC:%x "
1295 "object=%#llx\n", sa_hdr_phys
->sa_magic
, SA_MAGIC
,
1296 (u_longlong_t
)db
->db
.db_object
);
1297 return (SET_ERROR(EIO
));
1299 sa_byteswap(hdl
, buftype
);
1302 idx_tab
= sa_find_idx_tab(hdl
->sa_os
, bonustype
, sa_hdr_phys
);
1304 if (buftype
== SA_BONUS
)
1305 hdl
->sa_bonus_tab
= idx_tab
;
1307 hdl
->sa_spill_tab
= idx_tab
;
1309 mutex_exit(&sa
->sa_lock
);
1314 sa_evict_sync(void *dbu
)
1317 panic("evicting sa dbuf\n");
1321 sa_idx_tab_rele(objset_t
*os
, void *arg
)
1323 sa_os_t
*sa
= os
->os_sa
;
1324 sa_idx_tab_t
*idx_tab
= arg
;
1326 if (idx_tab
== NULL
)
1329 mutex_enter(&sa
->sa_lock
);
1330 if (zfs_refcount_remove(&idx_tab
->sa_refcount
, NULL
) == 0) {
1331 list_remove(&idx_tab
->sa_layout
->lot_idx_tab
, idx_tab
);
1332 if (idx_tab
->sa_variable_lengths
)
1333 kmem_free(idx_tab
->sa_variable_lengths
,
1335 idx_tab
->sa_layout
->lot_var_sizes
);
1336 zfs_refcount_destroy(&idx_tab
->sa_refcount
);
1337 kmem_free(idx_tab
->sa_idx_tab
,
1338 sizeof (uint32_t) * sa
->sa_num_attrs
);
1339 kmem_free(idx_tab
, sizeof (sa_idx_tab_t
));
1341 mutex_exit(&sa
->sa_lock
);
1345 sa_idx_tab_hold(objset_t
*os
, sa_idx_tab_t
*idx_tab
)
1347 sa_os_t
*sa __maybe_unused
= os
->os_sa
;
1349 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
1350 (void) zfs_refcount_add(&idx_tab
->sa_refcount
, NULL
);
1354 sa_spill_rele(sa_handle_t
*hdl
)
1356 mutex_enter(&hdl
->sa_lock
);
1357 if (hdl
->sa_spill
) {
1358 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
1359 dmu_buf_rele(hdl
->sa_spill
, NULL
);
1360 hdl
->sa_spill
= NULL
;
1361 hdl
->sa_spill_tab
= NULL
;
1363 mutex_exit(&hdl
->sa_lock
);
1367 sa_handle_destroy(sa_handle_t
*hdl
)
1369 dmu_buf_t
*db
= hdl
->sa_bonus
;
1371 mutex_enter(&hdl
->sa_lock
);
1372 (void) dmu_buf_remove_user(db
, &hdl
->sa_dbu
);
1374 if (hdl
->sa_bonus_tab
)
1375 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_bonus_tab
);
1377 if (hdl
->sa_spill_tab
)
1378 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
1380 dmu_buf_rele(hdl
->sa_bonus
, NULL
);
1383 dmu_buf_rele(hdl
->sa_spill
, NULL
);
1384 mutex_exit(&hdl
->sa_lock
);
1386 kmem_cache_free(sa_cache
, hdl
);
1390 sa_handle_get_from_db(objset_t
*os
, dmu_buf_t
*db
, void *userp
,
1391 sa_handle_type_t hdl_type
, sa_handle_t
**handlepp
)
1394 sa_handle_t
*handle
= NULL
;
1396 dmu_object_info_t doi
;
1398 dmu_object_info_from_db(db
, &doi
);
1399 ASSERT(doi
.doi_bonus_type
== DMU_OT_SA
||
1400 doi
.doi_bonus_type
== DMU_OT_ZNODE
);
1402 /* find handle, if it exists */
1403 /* if one doesn't exist then create a new one, and initialize it */
1405 if (hdl_type
== SA_HDL_SHARED
)
1406 handle
= dmu_buf_get_user(db
);
1408 if (handle
== NULL
) {
1409 sa_handle_t
*winner
= NULL
;
1411 handle
= kmem_cache_alloc(sa_cache
, KM_SLEEP
);
1412 handle
->sa_dbu
.dbu_evict_func_sync
= NULL
;
1413 handle
->sa_dbu
.dbu_evict_func_async
= NULL
;
1414 handle
->sa_userp
= userp
;
1415 handle
->sa_bonus
= db
;
1417 handle
->sa_spill
= NULL
;
1418 handle
->sa_bonus_tab
= NULL
;
1419 handle
->sa_spill_tab
= NULL
;
1421 error
= sa_build_index(handle
, SA_BONUS
);
1423 if (hdl_type
== SA_HDL_SHARED
) {
1424 dmu_buf_init_user(&handle
->sa_dbu
, sa_evict_sync
, NULL
,
1426 winner
= dmu_buf_set_user_ie(db
, &handle
->sa_dbu
);
1429 if (winner
!= NULL
) {
1430 kmem_cache_free(sa_cache
, handle
);
1440 sa_handle_get(objset_t
*objset
, uint64_t objid
, void *userp
,
1441 sa_handle_type_t hdl_type
, sa_handle_t
**handlepp
)
1446 if ((error
= dmu_bonus_hold(objset
, objid
, NULL
, &db
)))
1449 return (sa_handle_get_from_db(objset
, db
, userp
, hdl_type
,
1454 sa_buf_hold(objset_t
*objset
, uint64_t obj_num
, const void *tag
, dmu_buf_t
**db
)
1456 return (dmu_bonus_hold(objset
, obj_num
, tag
, db
));
1460 sa_buf_rele(dmu_buf_t
*db
, const void *tag
)
1462 dmu_buf_rele(db
, tag
);
1466 sa_lookup_impl(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
)
1469 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1470 return (sa_attr_op(hdl
, bulk
, count
, SA_LOOKUP
, NULL
));
1474 sa_lookup_locked(sa_handle_t
*hdl
, sa_attr_type_t attr
, void *buf
,
1478 sa_bulk_attr_t bulk
;
1480 VERIFY3U(buflen
, <=, SA_ATTR_MAX_LEN
);
1482 bulk
.sa_attr
= attr
;
1484 bulk
.sa_length
= buflen
;
1485 bulk
.sa_data_func
= NULL
;
1488 error
= sa_lookup_impl(hdl
, &bulk
, 1);
1493 sa_lookup(sa_handle_t
*hdl
, sa_attr_type_t attr
, void *buf
, uint32_t buflen
)
1497 mutex_enter(&hdl
->sa_lock
);
1498 error
= sa_lookup_locked(hdl
, attr
, buf
, buflen
);
1499 mutex_exit(&hdl
->sa_lock
);
1506 sa_lookup_uio(sa_handle_t
*hdl
, sa_attr_type_t attr
, zfs_uio_t
*uio
)
1509 sa_bulk_attr_t bulk
;
1511 bulk
.sa_data
= NULL
;
1512 bulk
.sa_attr
= attr
;
1513 bulk
.sa_data_func
= NULL
;
1517 mutex_enter(&hdl
->sa_lock
);
1518 if ((error
= sa_attr_op(hdl
, &bulk
, 1, SA_LOOKUP
, NULL
)) == 0) {
1519 error
= zfs_uiomove((void *)bulk
.sa_addr
, MIN(bulk
.sa_size
,
1520 zfs_uio_resid(uio
)), UIO_READ
, uio
);
1522 mutex_exit(&hdl
->sa_lock
);
1527 * For the existed object that is upgraded from old system, its ondisk layout
1528 * has no slot for the project ID attribute. But quota accounting logic needs
1529 * to access related slots by offset directly. So we need to adjust these old
1530 * objects' layout to make the project ID to some unified and fixed offset.
1533 sa_add_projid(sa_handle_t
*hdl
, dmu_tx_t
*tx
, uint64_t projid
)
1535 znode_t
*zp
= sa_get_userdata(hdl
);
1536 dmu_buf_t
*db
= sa_get_db(hdl
);
1537 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1538 int count
= 0, err
= 0;
1539 sa_bulk_attr_t
*bulk
, *attrs
;
1540 zfs_acl_locator_cb_t locate
= { 0 };
1541 uint64_t uid
, gid
, mode
, rdev
, xattr
= 0, parent
, gen
, links
;
1542 uint64_t crtime
[2], mtime
[2], ctime
[2], atime
[2];
1543 zfs_acl_phys_t znode_acl
= { 0 };
1544 char scanstamp
[AV_SCANSTAMP_SZ
];
1546 if (zp
->z_acl_cached
== NULL
) {
1549 mutex_enter(&zp
->z_acl_lock
);
1550 err
= zfs_acl_node_read(zp
, B_FALSE
, &aclp
, B_FALSE
);
1551 mutex_exit(&zp
->z_acl_lock
);
1552 if (err
!= 0 && err
!= ENOENT
)
1556 bulk
= kmem_zalloc(sizeof (sa_bulk_attr_t
) * ZPL_END
, KM_SLEEP
);
1557 attrs
= kmem_zalloc(sizeof (sa_bulk_attr_t
) * ZPL_END
, KM_SLEEP
);
1558 mutex_enter(&hdl
->sa_lock
);
1559 mutex_enter(&zp
->z_lock
);
1561 err
= sa_lookup_locked(hdl
, SA_ZPL_PROJID(zfsvfs
), &projid
,
1563 if (unlikely(err
== 0))
1564 /* Someone has added project ID attr by race. */
1569 /* First do a bulk query of the attributes that aren't cached */
1571 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
1573 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GEN(zfsvfs
), NULL
,
1575 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
1577 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
,
1579 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_PARENT(zfsvfs
), NULL
,
1581 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
1583 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
1585 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
1587 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CRTIME(zfsvfs
), NULL
,
1589 if (Z_ISBLK(ZTOTYPE(zp
)) || Z_ISCHR(ZTOTYPE(zp
)))
1590 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_RDEV(zfsvfs
), NULL
,
1593 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
1595 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
1597 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
1599 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CRTIME(zfsvfs
), NULL
,
1601 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GEN(zfsvfs
), NULL
,
1603 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
1605 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_PARENT(zfsvfs
), NULL
,
1607 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_XATTR(zfsvfs
), NULL
,
1609 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_RDEV(zfsvfs
), NULL
,
1611 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
1613 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
,
1615 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ZNODE_ACL(zfsvfs
), NULL
,
1618 err
= sa_bulk_lookup_locked(hdl
, bulk
, count
);
1622 err
= sa_lookup_locked(hdl
, SA_ZPL_XATTR(zfsvfs
), &xattr
, 8);
1623 if (err
!= 0 && err
!= ENOENT
)
1626 zp
->z_projid
= projid
;
1627 zp
->z_pflags
|= ZFS_PROJID
;
1628 links
= ZTONLNK(zp
);
1632 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_MODE(zfsvfs
), NULL
, &mode
, 8);
1633 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
1635 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_GEN(zfsvfs
), NULL
, &gen
, 8);
1636 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_UID(zfsvfs
), NULL
, &uid
, 8);
1637 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_GID(zfsvfs
), NULL
, &gid
, 8);
1638 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_PARENT(zfsvfs
), NULL
, &parent
, 8);
1639 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
1641 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
, &atime
, 16);
1642 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
1643 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
1644 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_CRTIME(zfsvfs
), NULL
,
1646 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_LINKS(zfsvfs
), NULL
, &links
, 8);
1647 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_PROJID(zfsvfs
), NULL
, &projid
, 8);
1649 if (Z_ISBLK(ZTOTYPE(zp
)) || Z_ISCHR(ZTOTYPE(zp
)))
1650 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_RDEV(zfsvfs
), NULL
,
1653 if (zp
->z_acl_cached
!= NULL
) {
1654 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_DACL_COUNT(zfsvfs
), NULL
,
1655 &zp
->z_acl_cached
->z_acl_count
, 8);
1656 if (zp
->z_acl_cached
->z_version
< ZFS_ACL_VERSION_FUID
)
1657 zfs_acl_xform(zp
, zp
->z_acl_cached
, CRED());
1658 locate
.cb_aclp
= zp
->z_acl_cached
;
1659 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_DACL_ACES(zfsvfs
),
1660 zfs_acl_data_locator
, &locate
,
1661 zp
->z_acl_cached
->z_acl_bytes
);
1665 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_XATTR(zfsvfs
), NULL
,
1668 if (zp
->z_pflags
& ZFS_BONUS_SCANSTAMP
) {
1670 (caddr_t
)db
->db_data
+ ZFS_OLD_ZNODE_PHYS_SIZE
,
1672 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_SCANSTAMP(zfsvfs
), NULL
,
1673 scanstamp
, AV_SCANSTAMP_SZ
);
1674 zp
->z_pflags
&= ~ZFS_BONUS_SCANSTAMP
;
1677 VERIFY(dmu_set_bonustype(db
, DMU_OT_SA
, tx
) == 0);
1678 VERIFY(sa_replace_all_by_template_locked(hdl
, attrs
, count
, tx
) == 0);
1679 if (znode_acl
.z_acl_extern_obj
) {
1680 VERIFY(0 == dmu_object_free(zfsvfs
->z_os
,
1681 znode_acl
.z_acl_extern_obj
, tx
));
1684 zp
->z_is_sa
= B_TRUE
;
1687 mutex_exit(&zp
->z_lock
);
1688 mutex_exit(&hdl
->sa_lock
);
1689 kmem_free(attrs
, sizeof (sa_bulk_attr_t
) * ZPL_END
);
1690 kmem_free(bulk
, sizeof (sa_bulk_attr_t
) * ZPL_END
);
1695 static sa_idx_tab_t
*
1696 sa_find_idx_tab(objset_t
*os
, dmu_object_type_t bonustype
, sa_hdr_phys_t
*hdr
)
1698 sa_idx_tab_t
*idx_tab
;
1699 sa_os_t
*sa
= os
->os_sa
;
1700 sa_lot_t
*tb
, search
;
1704 * Deterimine layout number. If SA node and header == 0 then
1705 * force the index table to the dummy "1" empty layout.
1707 * The layout number would only be zero for a newly created file
1708 * that has not added any attributes yet, or with crypto enabled which
1709 * doesn't write any attributes to the bonus buffer.
1712 search
.lot_num
= SA_LAYOUT_NUM(hdr
, bonustype
);
1714 tb
= avl_find(&sa
->sa_layout_num_tree
, &search
, &loc
);
1716 /* Verify header size is consistent with layout information */
1718 ASSERT((IS_SA_BONUSTYPE(bonustype
) &&
1719 SA_HDR_SIZE_MATCH_LAYOUT(hdr
, tb
)) || !IS_SA_BONUSTYPE(bonustype
) ||
1720 (IS_SA_BONUSTYPE(bonustype
) && hdr
->sa_layout_info
== 0));
1723 * See if any of the already existing TOC entries can be reused?
1726 for (idx_tab
= list_head(&tb
->lot_idx_tab
); idx_tab
;
1727 idx_tab
= list_next(&tb
->lot_idx_tab
, idx_tab
)) {
1728 boolean_t valid_idx
= B_TRUE
;
1731 if (tb
->lot_var_sizes
!= 0 &&
1732 idx_tab
->sa_variable_lengths
!= NULL
) {
1733 for (i
= 0; i
!= tb
->lot_var_sizes
; i
++) {
1734 if (hdr
->sa_lengths
[i
] !=
1735 idx_tab
->sa_variable_lengths
[i
]) {
1736 valid_idx
= B_FALSE
;
1742 sa_idx_tab_hold(os
, idx_tab
);
1747 /* No such luck, create a new entry */
1748 idx_tab
= kmem_zalloc(sizeof (sa_idx_tab_t
), KM_SLEEP
);
1749 idx_tab
->sa_idx_tab
=
1750 kmem_zalloc(sizeof (uint32_t) * sa
->sa_num_attrs
, KM_SLEEP
);
1751 idx_tab
->sa_layout
= tb
;
1752 zfs_refcount_create(&idx_tab
->sa_refcount
);
1753 if (tb
->lot_var_sizes
)
1754 idx_tab
->sa_variable_lengths
= kmem_alloc(sizeof (uint16_t) *
1755 tb
->lot_var_sizes
, KM_SLEEP
);
1757 sa_attr_iter(os
, hdr
, bonustype
, sa_build_idx_tab
,
1759 sa_idx_tab_hold(os
, idx_tab
); /* one hold for consumer */
1760 sa_idx_tab_hold(os
, idx_tab
); /* one for layout */
1761 list_insert_tail(&tb
->lot_idx_tab
, idx_tab
);
1766 sa_default_locator(void **dataptr
, uint32_t *len
, uint32_t total_len
,
1767 boolean_t start
, void *userdata
)
1771 *dataptr
= userdata
;
1776 sa_attr_register_sync(sa_handle_t
*hdl
, dmu_tx_t
*tx
)
1778 uint64_t attr_value
= 0;
1779 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1780 sa_attr_table_t
*tb
= sa
->sa_attr_table
;
1783 mutex_enter(&sa
->sa_lock
);
1785 if (!sa
->sa_need_attr_registration
|| sa
->sa_master_obj
== 0) {
1786 mutex_exit(&sa
->sa_lock
);
1790 if (sa
->sa_reg_attr_obj
== 0) {
1791 sa
->sa_reg_attr_obj
= zap_create_link(hdl
->sa_os
,
1792 DMU_OT_SA_ATTR_REGISTRATION
,
1793 sa
->sa_master_obj
, SA_REGISTRY
, tx
);
1795 for (i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
1796 if (sa
->sa_attr_table
[i
].sa_registered
)
1798 ATTR_ENCODE(attr_value
, tb
[i
].sa_attr
, tb
[i
].sa_length
,
1800 VERIFY(0 == zap_update(hdl
->sa_os
, sa
->sa_reg_attr_obj
,
1801 tb
[i
].sa_name
, 8, 1, &attr_value
, tx
));
1802 tb
[i
].sa_registered
= B_TRUE
;
1804 sa
->sa_need_attr_registration
= B_FALSE
;
1805 mutex_exit(&sa
->sa_lock
);
1809 * Replace all attributes with attributes specified in template.
1810 * If dnode had a spill buffer then those attributes will be
1811 * also be replaced, possibly with just an empty spill block
1813 * This interface is intended to only be used for bulk adding of
1814 * attributes for a new file. It will also be used by the ZPL
1815 * when converting and old formatted znode to native SA support.
1818 sa_replace_all_by_template_locked(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
,
1819 int attr_count
, dmu_tx_t
*tx
)
1821 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1823 if (sa
->sa_need_attr_registration
)
1824 sa_attr_register_sync(hdl
, tx
);
1825 return (sa_build_layouts(hdl
, attr_desc
, attr_count
, tx
));
1829 sa_replace_all_by_template(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
,
1830 int attr_count
, dmu_tx_t
*tx
)
1834 mutex_enter(&hdl
->sa_lock
);
1835 error
= sa_replace_all_by_template_locked(hdl
, attr_desc
,
1837 mutex_exit(&hdl
->sa_lock
);
1842 * Add/remove a single attribute or replace a variable-sized attribute value
1843 * with a value of a different size, and then rewrite the entire set
1845 * Same-length attribute value replacement (including fixed-length attributes)
1846 * is handled more efficiently by the upper layers.
1849 sa_modify_attrs(sa_handle_t
*hdl
, sa_attr_type_t newattr
,
1850 sa_data_op_t action
, sa_data_locator_t
*locator
, void *datastart
,
1851 uint16_t buflen
, dmu_tx_t
*tx
)
1853 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1854 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)hdl
->sa_bonus
;
1855 sa_bulk_attr_t
*attr_desc
;
1857 int bonus_attr_count
= 0;
1858 int bonus_data_size
= 0;
1859 int spill_data_size
= 0;
1860 int spill_attr_count
= 0;
1862 uint16_t length
, reg_length
;
1863 int i
, j
, k
, length_idx
;
1865 sa_idx_tab_t
*idx_tab
;
1869 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1871 /* First make of copy of the old data */
1874 if (DB_DNODE(db
)->dn_bonuslen
!= 0) {
1875 bonus_data_size
= hdl
->sa_bonus
->db_size
;
1876 old_data
[0] = kmem_alloc(bonus_data_size
, KM_SLEEP
);
1877 memcpy(old_data
[0], hdl
->sa_bonus
->db_data
,
1878 hdl
->sa_bonus
->db_size
);
1879 bonus_attr_count
= hdl
->sa_bonus_tab
->sa_layout
->lot_attr_count
;
1885 /* Bring spill buffer online if it isn't currently */
1887 if ((error
= sa_get_spill(hdl
)) == 0) {
1888 spill_data_size
= hdl
->sa_spill
->db_size
;
1889 old_data
[1] = vmem_alloc(spill_data_size
, KM_SLEEP
);
1890 memcpy(old_data
[1], hdl
->sa_spill
->db_data
,
1891 hdl
->sa_spill
->db_size
);
1893 hdl
->sa_spill_tab
->sa_layout
->lot_attr_count
;
1894 } else if (error
&& error
!= ENOENT
) {
1896 kmem_free(old_data
[0], bonus_data_size
);
1902 /* build descriptor of all attributes */
1904 attr_count
= bonus_attr_count
+ spill_attr_count
;
1905 if (action
== SA_ADD
)
1907 else if (action
== SA_REMOVE
)
1910 attr_desc
= kmem_zalloc(sizeof (sa_bulk_attr_t
) * attr_count
, KM_SLEEP
);
1913 * loop through bonus and spill buffer if it exists, and
1914 * build up new attr_descriptor to reset the attributes
1917 count
= bonus_attr_count
;
1918 hdr
= SA_GET_HDR(hdl
, SA_BONUS
);
1919 idx_tab
= SA_IDX_TAB_GET(hdl
, SA_BONUS
);
1922 * Iterate over each attribute in layout. Fetch the
1923 * size of variable-length attributes needing rewrite
1924 * from sa_lengths[].
1926 for (i
= 0, length_idx
= 0; i
!= count
; i
++) {
1927 sa_attr_type_t attr
;
1929 attr
= idx_tab
->sa_layout
->lot_attrs
[i
];
1930 reg_length
= SA_REGISTERED_LEN(sa
, attr
);
1931 if (reg_length
== 0) {
1932 length
= hdr
->sa_lengths
[length_idx
];
1935 length
= reg_length
;
1937 if (attr
== newattr
) {
1939 * There is nothing to do for SA_REMOVE,
1940 * so it is just skipped.
1942 if (action
== SA_REMOVE
)
1946 * Duplicate attributes are not allowed, so the
1947 * action can not be SA_ADD here.
1949 ASSERT3S(action
, ==, SA_REPLACE
);
1952 * Only a variable-sized attribute can be
1953 * replaced here, and its size must be changing.
1955 ASSERT3U(reg_length
, ==, 0);
1956 ASSERT3U(length
, !=, buflen
);
1957 SA_ADD_BULK_ATTR(attr_desc
, j
, attr
,
1958 locator
, datastart
, buflen
);
1960 SA_ADD_BULK_ATTR(attr_desc
, j
, attr
,
1962 (TOC_OFF(idx_tab
->sa_idx_tab
[attr
]) +
1963 (uintptr_t)old_data
[k
]), length
);
1966 if (k
== 0 && hdl
->sa_spill
) {
1967 hdr
= SA_GET_HDR(hdl
, SA_SPILL
);
1968 idx_tab
= SA_IDX_TAB_GET(hdl
, SA_SPILL
);
1969 count
= spill_attr_count
;
1974 if (action
== SA_ADD
) {
1975 reg_length
= SA_REGISTERED_LEN(sa
, newattr
);
1976 IMPLY(reg_length
!= 0, reg_length
== buflen
);
1977 SA_ADD_BULK_ATTR(attr_desc
, j
, newattr
, locator
,
1980 ASSERT3U(j
, ==, attr_count
);
1982 error
= sa_build_layouts(hdl
, attr_desc
, attr_count
, tx
);
1985 kmem_free(old_data
[0], bonus_data_size
);
1987 vmem_free(old_data
[1], spill_data_size
);
1988 kmem_free(attr_desc
, sizeof (sa_bulk_attr_t
) * attr_count
);
1994 sa_bulk_update_impl(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
,
1998 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1999 dmu_object_type_t bonustype
;
2000 dmu_buf_t
*saved_spill
;
2003 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
2005 bonustype
= SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl
, SA_BONUS
));
2006 saved_spill
= hdl
->sa_spill
;
2008 /* sync out registration table if necessary */
2009 if (sa
->sa_need_attr_registration
)
2010 sa_attr_register_sync(hdl
, tx
);
2012 error
= sa_attr_op(hdl
, bulk
, count
, SA_UPDATE
, tx
);
2013 if (error
== 0 && !IS_SA_BONUSTYPE(bonustype
) && sa
->sa_update_cb
)
2014 sa
->sa_update_cb(hdl
, tx
);
2017 * If saved_spill is NULL and current sa_spill is not NULL that
2018 * means we increased the refcount of the spill buffer through
2019 * sa_get_spill() or dmu_spill_hold_by_dnode(). Therefore we
2020 * must release the hold before calling dmu_tx_commit() to avoid
2021 * making a copy of this buffer in dbuf_sync_leaf() due to the
2022 * reference count now being greater than 1.
2024 if (!saved_spill
&& hdl
->sa_spill
) {
2025 if (hdl
->sa_spill_tab
) {
2026 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
2027 hdl
->sa_spill_tab
= NULL
;
2030 dmu_buf_rele(hdl
->sa_spill
, NULL
);
2031 hdl
->sa_spill
= NULL
;
2038 * update or add new attribute
2041 sa_update(sa_handle_t
*hdl
, sa_attr_type_t type
,
2042 void *buf
, uint32_t buflen
, dmu_tx_t
*tx
)
2045 sa_bulk_attr_t bulk
;
2047 VERIFY3U(buflen
, <=, SA_ATTR_MAX_LEN
);
2049 bulk
.sa_attr
= type
;
2050 bulk
.sa_data_func
= NULL
;
2051 bulk
.sa_length
= buflen
;
2054 mutex_enter(&hdl
->sa_lock
);
2055 error
= sa_bulk_update_impl(hdl
, &bulk
, 1, tx
);
2056 mutex_exit(&hdl
->sa_lock
);
2061 * Return size of an attribute
2065 sa_size(sa_handle_t
*hdl
, sa_attr_type_t attr
, int *size
)
2067 sa_bulk_attr_t bulk
;
2070 bulk
.sa_data
= NULL
;
2071 bulk
.sa_attr
= attr
;
2072 bulk
.sa_data_func
= NULL
;
2075 mutex_enter(&hdl
->sa_lock
);
2076 if ((error
= sa_attr_op(hdl
, &bulk
, 1, SA_LOOKUP
, NULL
)) != 0) {
2077 mutex_exit(&hdl
->sa_lock
);
2080 *size
= bulk
.sa_size
;
2082 mutex_exit(&hdl
->sa_lock
);
2087 sa_bulk_lookup_locked(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
)
2090 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
2091 return (sa_lookup_impl(hdl
, attrs
, count
));
2095 sa_bulk_lookup(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
)
2100 mutex_enter(&hdl
->sa_lock
);
2101 error
= sa_bulk_lookup_locked(hdl
, attrs
, count
);
2102 mutex_exit(&hdl
->sa_lock
);
2107 sa_bulk_update(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
, dmu_tx_t
*tx
)
2112 mutex_enter(&hdl
->sa_lock
);
2113 error
= sa_bulk_update_impl(hdl
, attrs
, count
, tx
);
2114 mutex_exit(&hdl
->sa_lock
);
2119 sa_remove(sa_handle_t
*hdl
, sa_attr_type_t attr
, dmu_tx_t
*tx
)
2123 mutex_enter(&hdl
->sa_lock
);
2124 error
= sa_modify_attrs(hdl
, attr
, SA_REMOVE
, NULL
,
2126 mutex_exit(&hdl
->sa_lock
);
2131 sa_object_info(sa_handle_t
*hdl
, dmu_object_info_t
*doi
)
2133 dmu_object_info_from_db(hdl
->sa_bonus
, doi
);
2137 sa_object_size(sa_handle_t
*hdl
, uint32_t *blksize
, u_longlong_t
*nblocks
)
2139 dmu_object_size_from_db(hdl
->sa_bonus
,
2144 sa_set_userp(sa_handle_t
*hdl
, void *ptr
)
2146 hdl
->sa_userp
= ptr
;
2150 sa_get_db(sa_handle_t
*hdl
)
2152 return (hdl
->sa_bonus
);
2156 sa_get_userdata(sa_handle_t
*hdl
)
2158 return (hdl
->sa_userp
);
2162 sa_register_update_callback_locked(objset_t
*os
, sa_update_cb_t
*func
)
2164 ASSERT(MUTEX_HELD(&os
->os_sa
->sa_lock
));
2165 os
->os_sa
->sa_update_cb
= func
;
2169 sa_register_update_callback(objset_t
*os
, sa_update_cb_t
*func
)
2172 mutex_enter(&os
->os_sa
->sa_lock
);
2173 sa_register_update_callback_locked(os
, func
);
2174 mutex_exit(&os
->os_sa
->sa_lock
);
2178 sa_handle_object(sa_handle_t
*hdl
)
2180 return (hdl
->sa_bonus
->db_object
);
2184 sa_enabled(objset_t
*os
)
2186 return (os
->os_sa
== NULL
);
2190 sa_set_sa_object(objset_t
*os
, uint64_t sa_object
)
2192 sa_os_t
*sa
= os
->os_sa
;
2194 if (sa
->sa_master_obj
)
2197 sa
->sa_master_obj
= sa_object
;
2203 sa_hdrsize(void *arg
)
2205 sa_hdr_phys_t
*hdr
= arg
;
2207 return (SA_HDR_SIZE(hdr
));
2211 sa_handle_lock(sa_handle_t
*hdl
)
2214 mutex_enter(&hdl
->sa_lock
);
2218 sa_handle_unlock(sa_handle_t
*hdl
)
2221 mutex_exit(&hdl
->sa_lock
);
2225 EXPORT_SYMBOL(sa_handle_get
);
2226 EXPORT_SYMBOL(sa_handle_get_from_db
);
2227 EXPORT_SYMBOL(sa_handle_destroy
);
2228 EXPORT_SYMBOL(sa_buf_hold
);
2229 EXPORT_SYMBOL(sa_buf_rele
);
2230 EXPORT_SYMBOL(sa_spill_rele
);
2231 EXPORT_SYMBOL(sa_lookup
);
2232 EXPORT_SYMBOL(sa_update
);
2233 EXPORT_SYMBOL(sa_remove
);
2234 EXPORT_SYMBOL(sa_bulk_lookup
);
2235 EXPORT_SYMBOL(sa_bulk_lookup_locked
);
2236 EXPORT_SYMBOL(sa_bulk_update
);
2237 EXPORT_SYMBOL(sa_size
);
2238 EXPORT_SYMBOL(sa_object_info
);
2239 EXPORT_SYMBOL(sa_object_size
);
2240 EXPORT_SYMBOL(sa_get_userdata
);
2241 EXPORT_SYMBOL(sa_set_userp
);
2242 EXPORT_SYMBOL(sa_get_db
);
2243 EXPORT_SYMBOL(sa_handle_object
);
2244 EXPORT_SYMBOL(sa_register_update_callback
);
2245 EXPORT_SYMBOL(sa_setup
);
2246 EXPORT_SYMBOL(sa_replace_all_by_template
);
2247 EXPORT_SYMBOL(sa_replace_all_by_template_locked
);
2248 EXPORT_SYMBOL(sa_enabled
);
2249 EXPORT_SYMBOL(sa_cache_init
);
2250 EXPORT_SYMBOL(sa_cache_fini
);
2251 EXPORT_SYMBOL(sa_set_sa_object
);
2252 EXPORT_SYMBOL(sa_hdrsize
);
2253 EXPORT_SYMBOL(sa_handle_lock
);
2254 EXPORT_SYMBOL(sa_handle_unlock
);
2255 EXPORT_SYMBOL(sa_lookup_uio
);
2256 EXPORT_SYMBOL(sa_add_projid
);
2257 #endif /* _KERNEL */