Make sure we are not trying to clone a spill block.
[zfs.git] / module / zfs / abd.c
blob745ee8f02ed401dda4cfbeb94ef7acb791146617
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2019 by Delphix. All rights reserved.
27 * ARC buffer data (ABD).
29 * ABDs are an abstract data structure for the ARC which can use two
30 * different ways of storing the underlying data:
32 * (a) Linear buffer. In this case, all the data in the ABD is stored in one
33 * contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
35 * +-------------------+
36 * | ABD (linear) |
37 * | abd_flags = ... |
38 * | abd_size = ... | +--------------------------------+
39 * | abd_buf ------------->| raw buffer of size abd_size |
40 * +-------------------+ +--------------------------------+
41 * no abd_chunks
43 * (b) Scattered buffer. In this case, the data in the ABD is split into
44 * equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
45 * to the chunks recorded in an array at the end of the ABD structure.
47 * +-------------------+
48 * | ABD (scattered) |
49 * | abd_flags = ... |
50 * | abd_size = ... |
51 * | abd_offset = 0 | +-----------+
52 * | abd_chunks[0] ----------------------------->| chunk 0 |
53 * | abd_chunks[1] ---------------------+ +-----------+
54 * | ... | | +-----------+
55 * | abd_chunks[N-1] ---------+ +------->| chunk 1 |
56 * +-------------------+ | +-----------+
57 * | ...
58 * | +-----------+
59 * +----------------->| chunk N-1 |
60 * +-----------+
62 * In addition to directly allocating a linear or scattered ABD, it is also
63 * possible to create an ABD by requesting the "sub-ABD" starting at an offset
64 * within an existing ABD. In linear buffers this is simple (set abd_buf of
65 * the new ABD to the starting point within the original raw buffer), but
66 * scattered ABDs are a little more complex. The new ABD makes a copy of the
67 * relevant abd_chunks pointers (but not the underlying data). However, to
68 * provide arbitrary rather than only chunk-aligned starting offsets, it also
69 * tracks an abd_offset field which represents the starting point of the data
70 * within the first chunk in abd_chunks. For both linear and scattered ABDs,
71 * creating an offset ABD marks the original ABD as the offset's parent, and the
72 * original ABD's abd_children refcount is incremented. This data allows us to
73 * ensure the root ABD isn't deleted before its children.
75 * Most consumers should never need to know what type of ABD they're using --
76 * the ABD public API ensures that it's possible to transparently switch from
77 * using a linear ABD to a scattered one when doing so would be beneficial.
79 * If you need to use the data within an ABD directly, if you know it's linear
80 * (because you allocated it) you can use abd_to_buf() to access the underlying
81 * raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
82 * which will allocate a raw buffer if necessary. Use the abd_return_buf*
83 * functions to return any raw buffers that are no longer necessary when you're
84 * done using them.
86 * There are a variety of ABD APIs that implement basic buffer operations:
87 * compare, copy, read, write, and fill with zeroes. If you need a custom
88 * function which progressively accesses the whole ABD, use the abd_iterate_*
89 * functions.
91 * As an additional feature, linear and scatter ABD's can be stitched together
92 * by using the gang ABD type (abd_alloc_gang_abd()). This allows for
93 * multiple ABDs to be viewed as a singular ABD.
95 * It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
96 * B_FALSE.
99 #include <sys/abd_impl.h>
100 #include <sys/param.h>
101 #include <sys/zio.h>
102 #include <sys/zfs_context.h>
103 #include <sys/zfs_znode.h>
105 /* see block comment above for description */
106 int zfs_abd_scatter_enabled = B_TRUE;
108 void
109 abd_verify(abd_t *abd)
111 #ifdef ZFS_DEBUG
112 ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
113 ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
114 ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
115 ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
116 ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS | ABD_FLAG_ALLOCD));
117 IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
118 IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
119 if (abd_is_linear(abd)) {
120 ASSERT3U(abd->abd_size, >, 0);
121 ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
122 } else if (abd_is_gang(abd)) {
123 uint_t child_sizes = 0;
124 for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
125 cabd != NULL;
126 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
127 ASSERT(list_link_active(&cabd->abd_gang_link));
128 child_sizes += cabd->abd_size;
129 abd_verify(cabd);
131 ASSERT3U(abd->abd_size, ==, child_sizes);
132 } else {
133 ASSERT3U(abd->abd_size, >, 0);
134 abd_verify_scatter(abd);
136 #endif
139 static void
140 abd_init_struct(abd_t *abd)
142 list_link_init(&abd->abd_gang_link);
143 mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
144 abd->abd_flags = 0;
145 #ifdef ZFS_DEBUG
146 zfs_refcount_create(&abd->abd_children);
147 abd->abd_parent = NULL;
148 #endif
149 abd->abd_size = 0;
152 static void
153 abd_fini_struct(abd_t *abd)
155 mutex_destroy(&abd->abd_mtx);
156 ASSERT(!list_link_active(&abd->abd_gang_link));
157 #ifdef ZFS_DEBUG
158 zfs_refcount_destroy(&abd->abd_children);
159 #endif
162 abd_t *
163 abd_alloc_struct(size_t size)
165 abd_t *abd = abd_alloc_struct_impl(size);
166 abd_init_struct(abd);
167 abd->abd_flags |= ABD_FLAG_ALLOCD;
168 return (abd);
171 void
172 abd_free_struct(abd_t *abd)
174 abd_fini_struct(abd);
175 abd_free_struct_impl(abd);
179 * Allocate an ABD, along with its own underlying data buffers. Use this if you
180 * don't care whether the ABD is linear or not.
182 abd_t *
183 abd_alloc(size_t size, boolean_t is_metadata)
185 if (abd_size_alloc_linear(size))
186 return (abd_alloc_linear(size, is_metadata));
188 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
190 abd_t *abd = abd_alloc_struct(size);
191 abd->abd_flags |= ABD_FLAG_OWNER;
192 abd->abd_u.abd_scatter.abd_offset = 0;
193 abd_alloc_chunks(abd, size);
195 if (is_metadata) {
196 abd->abd_flags |= ABD_FLAG_META;
198 abd->abd_size = size;
200 abd_update_scatter_stats(abd, ABDSTAT_INCR);
202 return (abd);
206 * Allocate an ABD that must be linear, along with its own underlying data
207 * buffer. Only use this when it would be very annoying to write your ABD
208 * consumer with a scattered ABD.
210 abd_t *
211 abd_alloc_linear(size_t size, boolean_t is_metadata)
213 abd_t *abd = abd_alloc_struct(0);
215 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
217 abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
218 if (is_metadata) {
219 abd->abd_flags |= ABD_FLAG_META;
221 abd->abd_size = size;
223 if (is_metadata) {
224 ABD_LINEAR_BUF(abd) = zio_buf_alloc(size);
225 } else {
226 ABD_LINEAR_BUF(abd) = zio_data_buf_alloc(size);
229 abd_update_linear_stats(abd, ABDSTAT_INCR);
231 return (abd);
234 static void
235 abd_free_linear(abd_t *abd)
237 if (abd_is_linear_page(abd)) {
238 abd_free_linear_page(abd);
239 return;
241 if (abd->abd_flags & ABD_FLAG_META) {
242 zio_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
243 } else {
244 zio_data_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
247 abd_update_linear_stats(abd, ABDSTAT_DECR);
250 static void
251 abd_free_gang(abd_t *abd)
253 ASSERT(abd_is_gang(abd));
254 abd_t *cabd;
256 while ((cabd = list_head(&ABD_GANG(abd).abd_gang_chain)) != NULL) {
258 * We must acquire the child ABDs mutex to ensure that if it
259 * is being added to another gang ABD we will set the link
260 * as inactive when removing it from this gang ABD and before
261 * adding it to the other gang ABD.
263 mutex_enter(&cabd->abd_mtx);
264 ASSERT(list_link_active(&cabd->abd_gang_link));
265 list_remove(&ABD_GANG(abd).abd_gang_chain, cabd);
266 mutex_exit(&cabd->abd_mtx);
267 if (cabd->abd_flags & ABD_FLAG_GANG_FREE)
268 abd_free(cabd);
270 list_destroy(&ABD_GANG(abd).abd_gang_chain);
273 static void
274 abd_free_scatter(abd_t *abd)
276 abd_free_chunks(abd);
277 abd_update_scatter_stats(abd, ABDSTAT_DECR);
281 * Free an ABD. Use with any kind of abd: those created with abd_alloc_*()
282 * and abd_get_*(), including abd_get_offset_struct().
284 * If the ABD was created with abd_alloc_*(), the underlying data
285 * (scatterlist or linear buffer) will also be freed. (Subject to ownership
286 * changes via abd_*_ownership_of_buf().)
288 * Unless the ABD was created with abd_get_offset_struct(), the abd_t will
289 * also be freed.
291 void
292 abd_free(abd_t *abd)
294 if (abd == NULL)
295 return;
297 abd_verify(abd);
298 #ifdef ZFS_DEBUG
299 IMPLY(abd->abd_flags & ABD_FLAG_OWNER, abd->abd_parent == NULL);
300 #endif
302 if (abd_is_gang(abd)) {
303 abd_free_gang(abd);
304 } else if (abd_is_linear(abd)) {
305 if (abd->abd_flags & ABD_FLAG_OWNER)
306 abd_free_linear(abd);
307 } else {
308 if (abd->abd_flags & ABD_FLAG_OWNER)
309 abd_free_scatter(abd);
312 #ifdef ZFS_DEBUG
313 if (abd->abd_parent != NULL) {
314 (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
315 abd->abd_size, abd);
317 #endif
319 abd_fini_struct(abd);
320 if (abd->abd_flags & ABD_FLAG_ALLOCD)
321 abd_free_struct_impl(abd);
325 * Allocate an ABD of the same format (same metadata flag, same scatterize
326 * setting) as another ABD.
328 abd_t *
329 abd_alloc_sametype(abd_t *sabd, size_t size)
331 boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
332 if (abd_is_linear(sabd) &&
333 !abd_is_linear_page(sabd)) {
334 return (abd_alloc_linear(size, is_metadata));
335 } else {
336 return (abd_alloc(size, is_metadata));
341 * Create gang ABD that will be the head of a list of ABD's. This is used
342 * to "chain" scatter/gather lists together when constructing aggregated
343 * IO's. To free this abd, abd_free() must be called.
345 abd_t *
346 abd_alloc_gang(void)
348 abd_t *abd = abd_alloc_struct(0);
349 abd->abd_flags |= ABD_FLAG_GANG | ABD_FLAG_OWNER;
350 list_create(&ABD_GANG(abd).abd_gang_chain,
351 sizeof (abd_t), offsetof(abd_t, abd_gang_link));
352 return (abd);
356 * Add a child gang ABD to a parent gang ABDs chained list.
358 static void
359 abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
361 ASSERT(abd_is_gang(pabd));
362 ASSERT(abd_is_gang(cabd));
364 if (free_on_free) {
366 * If the parent is responsible for freeing the child gang
367 * ABD we will just splice the child's children ABD list to
368 * the parent's list and immediately free the child gang ABD
369 * struct. The parent gang ABDs children from the child gang
370 * will retain all the free_on_free settings after being
371 * added to the parents list.
373 #ifdef ZFS_DEBUG
375 * If cabd had abd_parent, we have to drop it here. We can't
376 * transfer it to pabd, nor we can clear abd_size leaving it.
378 if (cabd->abd_parent != NULL) {
379 (void) zfs_refcount_remove_many(
380 &cabd->abd_parent->abd_children,
381 cabd->abd_size, cabd);
382 cabd->abd_parent = NULL;
384 #endif
385 pabd->abd_size += cabd->abd_size;
386 cabd->abd_size = 0;
387 list_move_tail(&ABD_GANG(pabd).abd_gang_chain,
388 &ABD_GANG(cabd).abd_gang_chain);
389 ASSERT(list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
390 abd_verify(pabd);
391 abd_free(cabd);
392 } else {
393 for (abd_t *child = list_head(&ABD_GANG(cabd).abd_gang_chain);
394 child != NULL;
395 child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
397 * We always pass B_FALSE for free_on_free as it is the
398 * original child gang ABDs responsibility to determine
399 * if any of its child ABDs should be free'd on the call
400 * to abd_free().
402 abd_gang_add(pabd, child, B_FALSE);
404 abd_verify(pabd);
409 * Add a child ABD to a gang ABD's chained list.
411 void
412 abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
414 ASSERT(abd_is_gang(pabd));
415 abd_t *child_abd = NULL;
418 * If the child being added is a gang ABD, we will add the
419 * child's ABDs to the parent gang ABD. This allows us to account
420 * for the offset correctly in the parent gang ABD.
422 if (abd_is_gang(cabd)) {
423 ASSERT(!list_link_active(&cabd->abd_gang_link));
424 return (abd_gang_add_gang(pabd, cabd, free_on_free));
426 ASSERT(!abd_is_gang(cabd));
429 * In order to verify that an ABD is not already part of
430 * another gang ABD, we must lock the child ABD's abd_mtx
431 * to check its abd_gang_link status. We unlock the abd_mtx
432 * only after it is has been added to a gang ABD, which
433 * will update the abd_gang_link's status. See comment below
434 * for how an ABD can be in multiple gang ABD's simultaneously.
436 mutex_enter(&cabd->abd_mtx);
437 if (list_link_active(&cabd->abd_gang_link)) {
439 * If the child ABD is already part of another
440 * gang ABD then we must allocate a new
441 * ABD to use a separate link. We mark the newly
442 * allocated ABD with ABD_FLAG_GANG_FREE, before
443 * adding it to the gang ABD's list, to make the
444 * gang ABD aware that it is responsible to call
445 * abd_free(). We use abd_get_offset() in order
446 * to just allocate a new ABD but avoid copying the
447 * data over into the newly allocated ABD.
449 * An ABD may become part of multiple gang ABD's. For
450 * example, when writing ditto bocks, the same ABD
451 * is used to write 2 or 3 locations with 2 or 3
452 * zio_t's. Each of the zio's may be aggregated with
453 * different adjacent zio's. zio aggregation uses gang
454 * zio's, so the single ABD can become part of multiple
455 * gang zio's.
457 * The ASSERT below is to make sure that if
458 * free_on_free is passed as B_TRUE, the ABD can
459 * not be in multiple gang ABD's. The gang ABD
460 * can not be responsible for cleaning up the child
461 * ABD memory allocation if the ABD can be in
462 * multiple gang ABD's at one time.
464 ASSERT3B(free_on_free, ==, B_FALSE);
465 child_abd = abd_get_offset(cabd, 0);
466 child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
467 } else {
468 child_abd = cabd;
469 if (free_on_free)
470 child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
472 ASSERT3P(child_abd, !=, NULL);
474 list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
475 mutex_exit(&cabd->abd_mtx);
476 pabd->abd_size += child_abd->abd_size;
480 * Locate the ABD for the supplied offset in the gang ABD.
481 * Return a new offset relative to the returned ABD.
483 abd_t *
484 abd_gang_get_offset(abd_t *abd, size_t *off)
486 abd_t *cabd;
488 ASSERT(abd_is_gang(abd));
489 ASSERT3U(*off, <, abd->abd_size);
490 for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
491 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
492 if (*off >= cabd->abd_size)
493 *off -= cabd->abd_size;
494 else
495 return (cabd);
497 VERIFY3P(cabd, !=, NULL);
498 return (cabd);
502 * Allocate a new ABD, using the provided struct (if non-NULL, and if
503 * circumstances allow - otherwise allocate the struct). The returned ABD will
504 * point to offset off of sabd. It shares the underlying buffer data with sabd.
505 * Use abd_free() to free. sabd must not be freed while any derived ABDs exist.
507 static abd_t *
508 abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
510 abd_verify(sabd);
511 ASSERT3U(off + size, <=, sabd->abd_size);
513 if (abd_is_linear(sabd)) {
514 if (abd == NULL)
515 abd = abd_alloc_struct(0);
517 * Even if this buf is filesystem metadata, we only track that
518 * if we own the underlying data buffer, which is not true in
519 * this case. Therefore, we don't ever use ABD_FLAG_META here.
521 abd->abd_flags |= ABD_FLAG_LINEAR;
523 ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
524 } else if (abd_is_gang(sabd)) {
525 size_t left = size;
526 if (abd == NULL) {
527 abd = abd_alloc_gang();
528 } else {
529 abd->abd_flags |= ABD_FLAG_GANG;
530 list_create(&ABD_GANG(abd).abd_gang_chain,
531 sizeof (abd_t), offsetof(abd_t, abd_gang_link));
534 abd->abd_flags &= ~ABD_FLAG_OWNER;
535 for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
536 cabd != NULL && left > 0;
537 cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
538 int csize = MIN(left, cabd->abd_size - off);
540 abd_t *nabd = abd_get_offset_size(cabd, off, csize);
541 abd_gang_add(abd, nabd, B_TRUE);
542 left -= csize;
543 off = 0;
545 ASSERT3U(left, ==, 0);
546 } else {
547 abd = abd_get_offset_scatter(abd, sabd, off, size);
550 ASSERT3P(abd, !=, NULL);
551 abd->abd_size = size;
552 #ifdef ZFS_DEBUG
553 abd->abd_parent = sabd;
554 (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
555 #endif
556 return (abd);
560 * Like abd_get_offset_size(), but memory for the abd_t is provided by the
561 * caller. Using this routine can improve performance by avoiding the cost
562 * of allocating memory for the abd_t struct, and updating the abd stats.
563 * Usually, the provided abd is returned, but in some circumstances (FreeBSD,
564 * if sabd is scatter and size is more than 2 pages) a new abd_t may need to
565 * be allocated. Therefore callers should be careful to use the returned
566 * abd_t*.
568 abd_t *
569 abd_get_offset_struct(abd_t *abd, abd_t *sabd, size_t off, size_t size)
571 abd_t *result;
572 abd_init_struct(abd);
573 result = abd_get_offset_impl(abd, sabd, off, size);
574 if (result != abd)
575 abd_fini_struct(abd);
576 return (result);
579 abd_t *
580 abd_get_offset(abd_t *sabd, size_t off)
582 size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
583 VERIFY3U(size, >, 0);
584 return (abd_get_offset_impl(NULL, sabd, off, size));
587 abd_t *
588 abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
590 ASSERT3U(off + size, <=, sabd->abd_size);
591 return (abd_get_offset_impl(NULL, sabd, off, size));
595 * Return a size scatter ABD containing only zeros.
597 abd_t *
598 abd_get_zeros(size_t size)
600 ASSERT3P(abd_zero_scatter, !=, NULL);
601 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
602 return (abd_get_offset_size(abd_zero_scatter, 0, size));
606 * Allocate a linear ABD structure for buf.
608 abd_t *
609 abd_get_from_buf(void *buf, size_t size)
611 abd_t *abd = abd_alloc_struct(0);
613 VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
616 * Even if this buf is filesystem metadata, we only track that if we
617 * own the underlying data buffer, which is not true in this case.
618 * Therefore, we don't ever use ABD_FLAG_META here.
620 abd->abd_flags |= ABD_FLAG_LINEAR;
621 abd->abd_size = size;
623 ABD_LINEAR_BUF(abd) = buf;
625 return (abd);
629 * Get the raw buffer associated with a linear ABD.
631 void *
632 abd_to_buf(abd_t *abd)
634 ASSERT(abd_is_linear(abd));
635 abd_verify(abd);
636 return (ABD_LINEAR_BUF(abd));
640 * Borrow a raw buffer from an ABD without copying the contents of the ABD
641 * into the buffer. If the ABD is scattered, this will allocate a raw buffer
642 * whose contents are undefined. To copy over the existing data in the ABD, use
643 * abd_borrow_buf_copy() instead.
645 void *
646 abd_borrow_buf(abd_t *abd, size_t n)
648 void *buf;
649 abd_verify(abd);
650 ASSERT3U(abd->abd_size, >=, n);
651 if (abd_is_linear(abd)) {
652 buf = abd_to_buf(abd);
653 } else {
654 buf = zio_buf_alloc(n);
656 #ifdef ZFS_DEBUG
657 (void) zfs_refcount_add_many(&abd->abd_children, n, buf);
658 #endif
659 return (buf);
662 void *
663 abd_borrow_buf_copy(abd_t *abd, size_t n)
665 void *buf = abd_borrow_buf(abd, n);
666 if (!abd_is_linear(abd)) {
667 abd_copy_to_buf(buf, abd, n);
669 return (buf);
673 * Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
674 * not change the contents of the ABD and will ASSERT that you didn't modify
675 * the buffer since it was borrowed. If you want any changes you made to buf to
676 * be copied back to abd, use abd_return_buf_copy() instead.
678 void
679 abd_return_buf(abd_t *abd, void *buf, size_t n)
681 abd_verify(abd);
682 ASSERT3U(abd->abd_size, >=, n);
683 #ifdef ZFS_DEBUG
684 (void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
685 #endif
686 if (abd_is_linear(abd)) {
687 ASSERT3P(buf, ==, abd_to_buf(abd));
688 } else {
689 ASSERT0(abd_cmp_buf(abd, buf, n));
690 zio_buf_free(buf, n);
694 void
695 abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
697 if (!abd_is_linear(abd)) {
698 abd_copy_from_buf(abd, buf, n);
700 abd_return_buf(abd, buf, n);
703 void
704 abd_release_ownership_of_buf(abd_t *abd)
706 ASSERT(abd_is_linear(abd));
707 ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
710 * abd_free() needs to handle LINEAR_PAGE ABD's specially.
711 * Since that flag does not survive the
712 * abd_release_ownership_of_buf() -> abd_get_from_buf() ->
713 * abd_take_ownership_of_buf() sequence, we don't allow releasing
714 * these "linear but not zio_[data_]buf_alloc()'ed" ABD's.
716 ASSERT(!abd_is_linear_page(abd));
718 abd_verify(abd);
720 abd->abd_flags &= ~ABD_FLAG_OWNER;
721 /* Disable this flag since we no longer own the data buffer */
722 abd->abd_flags &= ~ABD_FLAG_META;
724 abd_update_linear_stats(abd, ABDSTAT_DECR);
729 * Give this ABD ownership of the buffer that it's storing. Can only be used on
730 * linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
731 * with abd_alloc_linear() which subsequently released ownership of their buf
732 * with abd_release_ownership_of_buf().
734 void
735 abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
737 ASSERT(abd_is_linear(abd));
738 ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
739 abd_verify(abd);
741 abd->abd_flags |= ABD_FLAG_OWNER;
742 if (is_metadata) {
743 abd->abd_flags |= ABD_FLAG_META;
746 abd_update_linear_stats(abd, ABDSTAT_INCR);
750 * Initializes an abd_iter based on whether the abd is a gang ABD
751 * or just a single ABD.
753 static inline abd_t *
754 abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
756 abd_t *cabd = NULL;
758 if (abd_is_gang(abd)) {
759 cabd = abd_gang_get_offset(abd, &off);
760 if (cabd) {
761 abd_iter_init(aiter, cabd);
762 abd_iter_advance(aiter, off);
764 } else {
765 abd_iter_init(aiter, abd);
766 abd_iter_advance(aiter, off);
768 return (cabd);
772 * Advances an abd_iter. We have to be careful with gang ABD as
773 * advancing could mean that we are at the end of a particular ABD and
774 * must grab the ABD in the gang ABD's list.
776 static inline abd_t *
777 abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
778 size_t len)
780 abd_iter_advance(aiter, len);
781 if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
782 ASSERT3P(cabd, !=, NULL);
783 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
784 if (cabd) {
785 abd_iter_init(aiter, cabd);
786 abd_iter_advance(aiter, 0);
789 return (cabd);
793 abd_iterate_func(abd_t *abd, size_t off, size_t size,
794 abd_iter_func_t *func, void *private)
796 struct abd_iter aiter;
797 int ret = 0;
799 if (size == 0)
800 return (0);
802 abd_verify(abd);
803 ASSERT3U(off + size, <=, abd->abd_size);
805 boolean_t gang = abd_is_gang(abd);
806 abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
808 while (size > 0) {
809 /* If we are at the end of the gang ABD we are done */
810 if (gang && !c_abd)
811 break;
813 abd_iter_map(&aiter);
815 size_t len = MIN(aiter.iter_mapsize, size);
816 ASSERT3U(len, >, 0);
818 ret = func(aiter.iter_mapaddr, len, private);
820 abd_iter_unmap(&aiter);
822 if (ret != 0)
823 break;
825 size -= len;
826 c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
829 return (ret);
832 struct buf_arg {
833 void *arg_buf;
836 static int
837 abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
839 struct buf_arg *ba_ptr = private;
841 (void) memcpy(ba_ptr->arg_buf, buf, size);
842 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
844 return (0);
848 * Copy abd to buf. (off is the offset in abd.)
850 void
851 abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
853 struct buf_arg ba_ptr = { buf };
855 (void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
856 &ba_ptr);
859 static int
860 abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
862 int ret;
863 struct buf_arg *ba_ptr = private;
865 ret = memcmp(buf, ba_ptr->arg_buf, size);
866 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
868 return (ret);
872 * Compare the contents of abd to buf. (off is the offset in abd.)
875 abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
877 struct buf_arg ba_ptr = { (void *) buf };
879 return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
882 static int
883 abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
885 struct buf_arg *ba_ptr = private;
887 (void) memcpy(buf, ba_ptr->arg_buf, size);
888 ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
890 return (0);
894 * Copy from buf to abd. (off is the offset in abd.)
896 void
897 abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
899 struct buf_arg ba_ptr = { (void *) buf };
901 (void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
902 &ba_ptr);
905 static int
906 abd_zero_off_cb(void *buf, size_t size, void *private)
908 (void) private;
909 (void) memset(buf, 0, size);
910 return (0);
914 * Zero out the abd from a particular offset to the end.
916 void
917 abd_zero_off(abd_t *abd, size_t off, size_t size)
919 (void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
923 * Iterate over two ABDs and call func incrementally on the two ABDs' data in
924 * equal-sized chunks (passed to func as raw buffers). func could be called many
925 * times during this iteration.
928 abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
929 size_t size, abd_iter_func2_t *func, void *private)
931 int ret = 0;
932 struct abd_iter daiter, saiter;
933 boolean_t dabd_is_gang_abd, sabd_is_gang_abd;
934 abd_t *c_dabd, *c_sabd;
936 if (size == 0)
937 return (0);
939 abd_verify(dabd);
940 abd_verify(sabd);
942 ASSERT3U(doff + size, <=, dabd->abd_size);
943 ASSERT3U(soff + size, <=, sabd->abd_size);
945 dabd_is_gang_abd = abd_is_gang(dabd);
946 sabd_is_gang_abd = abd_is_gang(sabd);
947 c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
948 c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
950 while (size > 0) {
951 /* if we are at the end of the gang ABD we are done */
952 if ((dabd_is_gang_abd && !c_dabd) ||
953 (sabd_is_gang_abd && !c_sabd))
954 break;
956 abd_iter_map(&daiter);
957 abd_iter_map(&saiter);
959 size_t dlen = MIN(daiter.iter_mapsize, size);
960 size_t slen = MIN(saiter.iter_mapsize, size);
961 size_t len = MIN(dlen, slen);
962 ASSERT(dlen > 0 || slen > 0);
964 ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
965 private);
967 abd_iter_unmap(&saiter);
968 abd_iter_unmap(&daiter);
970 if (ret != 0)
971 break;
973 size -= len;
974 c_dabd =
975 abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
976 c_sabd =
977 abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
980 return (ret);
983 static int
984 abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
986 (void) private;
987 (void) memcpy(dbuf, sbuf, size);
988 return (0);
992 * Copy from sabd to dabd starting from soff and doff.
994 void
995 abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
997 (void) abd_iterate_func2(dabd, sabd, doff, soff, size,
998 abd_copy_off_cb, NULL);
1001 static int
1002 abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
1004 (void) private;
1005 return (memcmp(bufa, bufb, size));
1009 * Compares the contents of two ABDs.
1012 abd_cmp(abd_t *dabd, abd_t *sabd)
1014 ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
1015 return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
1016 abd_cmp_cb, NULL));
1020 * Iterate over code ABDs and a data ABD and call @func_raidz_gen.
1022 * @cabds parity ABDs, must have equal size
1023 * @dabd data ABD. Can be NULL (in this case @dsize = 0)
1024 * @func_raidz_gen should be implemented so that its behaviour
1025 * is the same when taking linear and when taking scatter
1027 void
1028 abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
1029 ssize_t csize, ssize_t dsize, const unsigned parity,
1030 void (*func_raidz_gen)(void **, const void *, size_t, size_t))
1032 int i;
1033 ssize_t len, dlen;
1034 struct abd_iter caiters[3];
1035 struct abd_iter daiter = {0};
1036 void *caddrs[3];
1037 unsigned long flags __maybe_unused = 0;
1038 abd_t *c_cabds[3];
1039 abd_t *c_dabd = NULL;
1040 boolean_t cabds_is_gang_abd[3];
1041 boolean_t dabd_is_gang_abd = B_FALSE;
1043 ASSERT3U(parity, <=, 3);
1045 for (i = 0; i < parity; i++) {
1046 cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
1047 c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0);
1050 if (dabd) {
1051 dabd_is_gang_abd = abd_is_gang(dabd);
1052 c_dabd = abd_init_abd_iter(dabd, &daiter, 0);
1055 ASSERT3S(dsize, >=, 0);
1057 abd_enter_critical(flags);
1058 while (csize > 0) {
1059 /* if we are at the end of the gang ABD we are done */
1060 if (dabd_is_gang_abd && !c_dabd)
1061 break;
1063 for (i = 0; i < parity; i++) {
1065 * If we are at the end of the gang ABD we are
1066 * done.
1068 if (cabds_is_gang_abd[i] && !c_cabds[i])
1069 break;
1070 abd_iter_map(&caiters[i]);
1071 caddrs[i] = caiters[i].iter_mapaddr;
1074 len = csize;
1076 if (dabd && dsize > 0)
1077 abd_iter_map(&daiter);
1079 switch (parity) {
1080 case 3:
1081 len = MIN(caiters[2].iter_mapsize, len);
1082 zfs_fallthrough;
1083 case 2:
1084 len = MIN(caiters[1].iter_mapsize, len);
1085 zfs_fallthrough;
1086 case 1:
1087 len = MIN(caiters[0].iter_mapsize, len);
1090 /* must be progressive */
1091 ASSERT3S(len, >, 0);
1093 if (dabd && dsize > 0) {
1094 /* this needs precise iter.length */
1095 len = MIN(daiter.iter_mapsize, len);
1096 dlen = len;
1097 } else
1098 dlen = 0;
1100 /* must be progressive */
1101 ASSERT3S(len, >, 0);
1103 * The iterated function likely will not do well if each
1104 * segment except the last one is not multiple of 512 (raidz).
1106 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1108 func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
1110 for (i = parity-1; i >= 0; i--) {
1111 abd_iter_unmap(&caiters[i]);
1112 c_cabds[i] =
1113 abd_advance_abd_iter(cabds[i], c_cabds[i],
1114 &caiters[i], len);
1117 if (dabd && dsize > 0) {
1118 abd_iter_unmap(&daiter);
1119 c_dabd =
1120 abd_advance_abd_iter(dabd, c_dabd, &daiter,
1121 dlen);
1122 dsize -= dlen;
1125 csize -= len;
1127 ASSERT3S(dsize, >=, 0);
1128 ASSERT3S(csize, >=, 0);
1130 abd_exit_critical(flags);
1134 * Iterate over code ABDs and data reconstruction target ABDs and call
1135 * @func_raidz_rec. Function maps at most 6 pages atomically.
1137 * @cabds parity ABDs, must have equal size
1138 * @tabds rec target ABDs, at most 3
1139 * @tsize size of data target columns
1140 * @func_raidz_rec expects syndrome data in target columns. Function
1141 * reconstructs data and overwrites target columns.
1143 void
1144 abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
1145 ssize_t tsize, const unsigned parity,
1146 void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
1147 const unsigned *mul),
1148 const unsigned *mul)
1150 int i;
1151 ssize_t len;
1152 struct abd_iter citers[3];
1153 struct abd_iter xiters[3];
1154 void *caddrs[3], *xaddrs[3];
1155 unsigned long flags __maybe_unused = 0;
1156 boolean_t cabds_is_gang_abd[3];
1157 boolean_t tabds_is_gang_abd[3];
1158 abd_t *c_cabds[3];
1159 abd_t *c_tabds[3];
1161 ASSERT3U(parity, <=, 3);
1163 for (i = 0; i < parity; i++) {
1164 cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
1165 tabds_is_gang_abd[i] = abd_is_gang(tabds[i]);
1166 c_cabds[i] =
1167 abd_init_abd_iter(cabds[i], &citers[i], 0);
1168 c_tabds[i] =
1169 abd_init_abd_iter(tabds[i], &xiters[i], 0);
1172 abd_enter_critical(flags);
1173 while (tsize > 0) {
1175 for (i = 0; i < parity; i++) {
1177 * If we are at the end of the gang ABD we
1178 * are done.
1180 if (cabds_is_gang_abd[i] && !c_cabds[i])
1181 break;
1182 if (tabds_is_gang_abd[i] && !c_tabds[i])
1183 break;
1184 abd_iter_map(&citers[i]);
1185 abd_iter_map(&xiters[i]);
1186 caddrs[i] = citers[i].iter_mapaddr;
1187 xaddrs[i] = xiters[i].iter_mapaddr;
1190 len = tsize;
1191 switch (parity) {
1192 case 3:
1193 len = MIN(xiters[2].iter_mapsize, len);
1194 len = MIN(citers[2].iter_mapsize, len);
1195 zfs_fallthrough;
1196 case 2:
1197 len = MIN(xiters[1].iter_mapsize, len);
1198 len = MIN(citers[1].iter_mapsize, len);
1199 zfs_fallthrough;
1200 case 1:
1201 len = MIN(xiters[0].iter_mapsize, len);
1202 len = MIN(citers[0].iter_mapsize, len);
1204 /* must be progressive */
1205 ASSERT3S(len, >, 0);
1207 * The iterated function likely will not do well if each
1208 * segment except the last one is not multiple of 512 (raidz).
1210 ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
1212 func_raidz_rec(xaddrs, len, caddrs, mul);
1214 for (i = parity-1; i >= 0; i--) {
1215 abd_iter_unmap(&xiters[i]);
1216 abd_iter_unmap(&citers[i]);
1217 c_tabds[i] =
1218 abd_advance_abd_iter(tabds[i], c_tabds[i],
1219 &xiters[i], len);
1220 c_cabds[i] =
1221 abd_advance_abd_iter(cabds[i], c_cabds[i],
1222 &citers[i], len);
1225 tsize -= len;
1226 ASSERT3S(tsize, >=, 0);
1228 abd_exit_critical(flags);