4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2019 by Delphix. All rights reserved.
27 * See abd.c for a general overview of the arc buffered data (ABD).
29 * Linear buffers act exactly like normal buffers and are always mapped into the
30 * kernel's virtual memory space, while scattered ABD data chunks are allocated
31 * as physical pages and then mapped in only while they are actually being
32 * accessed through one of the abd_* library functions. Using scattered ABDs
33 * provides several benefits:
35 * (1) They avoid use of kmem_*, preventing performance problems where running
36 * kmem_reap on very large memory systems never finishes and causes
37 * constant TLB shootdowns.
39 * (2) Fragmentation is less of an issue since when we are at the limit of
40 * allocatable space, we won't have to search around for a long free
41 * hole in the VA space for large ARC allocations. Each chunk is mapped in
42 * individually, so even if we are using HIGHMEM (see next point) we
43 * wouldn't need to worry about finding a contiguous address range.
45 * (3) If we are not using HIGHMEM, then all physical memory is always
46 * mapped into the kernel's address space, so we also avoid the map /
47 * unmap costs on each ABD access.
49 * If we are not using HIGHMEM, scattered buffers which have only one chunk
50 * can be treated as linear buffers, because they are contiguous in the
51 * kernel's virtual address space. See abd_alloc_chunks() for details.
54 #include <sys/abd_impl.h>
55 #include <sys/param.h>
58 #include <sys/zfs_context.h>
59 #include <sys/zfs_znode.h>
61 #include <linux/kmap_compat.h>
62 #include <linux/scatterlist.h>
67 typedef struct abd_stats
{
68 kstat_named_t abdstat_struct_size
;
69 kstat_named_t abdstat_linear_cnt
;
70 kstat_named_t abdstat_linear_data_size
;
71 kstat_named_t abdstat_scatter_cnt
;
72 kstat_named_t abdstat_scatter_data_size
;
73 kstat_named_t abdstat_scatter_chunk_waste
;
74 kstat_named_t abdstat_scatter_orders
[MAX_ORDER
];
75 kstat_named_t abdstat_scatter_page_multi_chunk
;
76 kstat_named_t abdstat_scatter_page_multi_zone
;
77 kstat_named_t abdstat_scatter_page_alloc_retry
;
78 kstat_named_t abdstat_scatter_sg_table_retry
;
81 static abd_stats_t abd_stats
= {
82 /* Amount of memory occupied by all of the abd_t struct allocations */
83 { "struct_size", KSTAT_DATA_UINT64
},
85 * The number of linear ABDs which are currently allocated, excluding
86 * ABDs which don't own their data (for instance the ones which were
87 * allocated through abd_get_offset() and abd_get_from_buf()). If an
88 * ABD takes ownership of its buf then it will become tracked.
90 { "linear_cnt", KSTAT_DATA_UINT64
},
91 /* Amount of data stored in all linear ABDs tracked by linear_cnt */
92 { "linear_data_size", KSTAT_DATA_UINT64
},
94 * The number of scatter ABDs which are currently allocated, excluding
95 * ABDs which don't own their data (for instance the ones which were
96 * allocated through abd_get_offset()).
98 { "scatter_cnt", KSTAT_DATA_UINT64
},
99 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
100 { "scatter_data_size", KSTAT_DATA_UINT64
},
102 * The amount of space wasted at the end of the last chunk across all
103 * scatter ABDs tracked by scatter_cnt.
105 { "scatter_chunk_waste", KSTAT_DATA_UINT64
},
107 * The number of compound allocations of a given order. These
108 * allocations are spread over all currently allocated ABDs, and
109 * act as a measure of memory fragmentation.
111 { { "scatter_order_N", KSTAT_DATA_UINT64
} },
113 * The number of scatter ABDs which contain multiple chunks.
114 * ABDs are preferentially allocated from the minimum number of
115 * contiguous multi-page chunks, a single chunk is optimal.
117 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64
},
119 * The number of scatter ABDs which are split across memory zones.
120 * ABDs are preferentially allocated using pages from a single zone.
122 { "scatter_page_multi_zone", KSTAT_DATA_UINT64
},
124 * The total number of retries encountered when attempting to
125 * allocate the pages to populate the scatter ABD.
127 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64
},
129 * The total number of retries encountered when attempting to
130 * allocate the sg table for an ABD.
132 { "scatter_sg_table_retry", KSTAT_DATA_UINT64
},
136 wmsum_t abdstat_struct_size
;
137 wmsum_t abdstat_linear_cnt
;
138 wmsum_t abdstat_linear_data_size
;
139 wmsum_t abdstat_scatter_cnt
;
140 wmsum_t abdstat_scatter_data_size
;
141 wmsum_t abdstat_scatter_chunk_waste
;
142 wmsum_t abdstat_scatter_orders
[MAX_ORDER
];
143 wmsum_t abdstat_scatter_page_multi_chunk
;
144 wmsum_t abdstat_scatter_page_multi_zone
;
145 wmsum_t abdstat_scatter_page_alloc_retry
;
146 wmsum_t abdstat_scatter_sg_table_retry
;
149 #define abd_for_each_sg(abd, sg, n, i) \
150 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
153 * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
154 * ABD's. Smaller allocations will use linear ABD's which uses
155 * zio_[data_]buf_alloc().
157 * Scatter ABD's use at least one page each, so sub-page allocations waste
158 * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
159 * half of each page). Using linear ABD's for small allocations means that
160 * they will be put on slabs which contain many allocations. This can
161 * improve memory efficiency, but it also makes it much harder for ARC
162 * evictions to actually free pages, because all the buffers on one slab need
163 * to be freed in order for the slab (and underlying pages) to be freed.
164 * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
165 * possible for them to actually waste more memory than scatter (one page per
166 * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
168 * Spill blocks are typically 512B and are heavily used on systems running
169 * selinux with the default dnode size and the `xattr=sa` property set.
171 * By default we use linear allocations for 512B and 1KB, and scatter
172 * allocations for larger (1.5KB and up).
174 static int zfs_abd_scatter_min_size
= 512 * 3;
177 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
178 * just a single zero'd page. This allows us to conserve memory by
179 * only using a single zero page for the scatterlist.
181 abd_t
*abd_zero_scatter
= NULL
;
185 * _KERNEL - Will point to ZERO_PAGE if it is available or it will be
186 * an allocated zero'd PAGESIZE buffer.
187 * Userspace - Will be an allocated zero'ed PAGESIZE buffer.
189 * abd_zero_page is assigned to each of the pages of abd_zero_scatter.
191 static struct page
*abd_zero_page
= NULL
;
193 static kmem_cache_t
*abd_cache
= NULL
;
194 static kstat_t
*abd_ksp
;
197 abd_chunkcnt_for_bytes(size_t size
)
199 return (P2ROUNDUP(size
, PAGESIZE
) / PAGESIZE
);
203 abd_alloc_struct_impl(size_t size
)
206 * In Linux we do not use the size passed in during ABD
207 * allocation, so we just ignore it.
210 abd_t
*abd
= kmem_cache_alloc(abd_cache
, KM_PUSHPAGE
);
211 ASSERT3P(abd
, !=, NULL
);
212 ABDSTAT_INCR(abdstat_struct_size
, sizeof (abd_t
));
218 abd_free_struct_impl(abd_t
*abd
)
220 kmem_cache_free(abd_cache
, abd
);
221 ABDSTAT_INCR(abdstat_struct_size
, -(int)sizeof (abd_t
));
225 static unsigned zfs_abd_scatter_max_order
= MAX_ORDER
- 1;
228 * Mark zfs data pages so they can be excluded from kernel crash dumps
231 #define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E
234 abd_mark_zfs_page(struct page
*page
)
237 SetPagePrivate(page
);
238 set_page_private(page
, ABD_FILE_CACHE_PAGE
);
242 abd_unmark_zfs_page(struct page
*page
)
244 set_page_private(page
, 0UL);
245 ClearPagePrivate(page
);
249 #define abd_mark_zfs_page(page)
250 #define abd_unmark_zfs_page(page)
253 #ifndef CONFIG_HIGHMEM
255 #ifndef __GFP_RECLAIM
256 #define __GFP_RECLAIM __GFP_WAIT
260 * The goal is to minimize fragmentation by preferentially populating ABDs
261 * with higher order compound pages from a single zone. Allocation size is
262 * progressively decreased until it can be satisfied without performing
263 * reclaim or compaction. When necessary this function will degenerate to
264 * allocating individual pages and allowing reclaim to satisfy allocations.
267 abd_alloc_chunks(abd_t
*abd
, size_t size
)
269 struct list_head pages
;
270 struct sg_table table
;
271 struct scatterlist
*sg
;
272 struct page
*page
, *tmp_page
= NULL
;
273 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
274 gfp_t gfp_comp
= (gfp
| __GFP_NORETRY
| __GFP_COMP
) & ~__GFP_RECLAIM
;
275 int max_order
= MIN(zfs_abd_scatter_max_order
, MAX_ORDER
- 1);
276 int nr_pages
= abd_chunkcnt_for_bytes(size
);
277 int chunks
= 0, zones
= 0;
278 size_t remaining_size
;
279 int nid
= NUMA_NO_NODE
;
282 INIT_LIST_HEAD(&pages
);
284 while (alloc_pages
< nr_pages
) {
285 unsigned chunk_pages
;
288 order
= MIN(highbit64(nr_pages
- alloc_pages
) - 1, max_order
);
289 chunk_pages
= (1U << order
);
291 page
= alloc_pages_node(nid
, order
? gfp_comp
: gfp
, order
);
294 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
295 schedule_timeout_interruptible(1);
297 max_order
= MAX(0, order
- 1);
302 list_add_tail(&page
->lru
, &pages
);
304 if ((nid
!= NUMA_NO_NODE
) && (page_to_nid(page
) != nid
))
307 nid
= page_to_nid(page
);
308 ABDSTAT_BUMP(abdstat_scatter_orders
[order
]);
310 alloc_pages
+= chunk_pages
;
313 ASSERT3S(alloc_pages
, ==, nr_pages
);
315 while (sg_alloc_table(&table
, chunks
, gfp
)) {
316 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
317 schedule_timeout_interruptible(1);
321 remaining_size
= size
;
322 list_for_each_entry_safe(page
, tmp_page
, &pages
, lru
) {
323 size_t sg_size
= MIN(PAGESIZE
<< compound_order(page
),
325 sg_set_page(sg
, page
, sg_size
, 0);
326 abd_mark_zfs_page(page
);
327 remaining_size
-= sg_size
;
330 list_del(&page
->lru
);
334 * These conditions ensure that a possible transformation to a linear
335 * ABD would be valid.
337 ASSERT(!PageHighMem(sg_page(table
.sgl
)));
338 ASSERT0(ABD_SCATTER(abd
).abd_offset
);
340 if (table
.nents
== 1) {
342 * Since there is only one entry, this ABD can be represented
343 * as a linear buffer. All single-page (4K) ABD's can be
344 * represented this way. Some multi-page ABD's can also be
345 * represented this way, if we were able to allocate a single
346 * "chunk" (higher-order "page" which represents a power-of-2
347 * series of physically-contiguous pages). This is often the
348 * case for 2-page (8K) ABD's.
350 * Representing a single-entry scatter ABD as a linear ABD
351 * has the performance advantage of avoiding the copy (and
352 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
353 * A performance increase of around 5% has been observed for
354 * ARC-cached reads (of small blocks which can take advantage
357 * Note that this optimization is only possible because the
358 * pages are always mapped into the kernel's address space.
359 * This is not the case for highmem pages, so the
360 * optimization can not be made there.
362 abd
->abd_flags
|= ABD_FLAG_LINEAR
;
363 abd
->abd_flags
|= ABD_FLAG_LINEAR_PAGE
;
364 abd
->abd_u
.abd_linear
.abd_sgl
= table
.sgl
;
365 ABD_LINEAR_BUF(abd
) = page_address(sg_page(table
.sgl
));
366 } else if (table
.nents
> 1) {
367 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
368 abd
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
;
371 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone
);
372 abd
->abd_flags
|= ABD_FLAG_MULTI_ZONE
;
375 ABD_SCATTER(abd
).abd_sgl
= table
.sgl
;
376 ABD_SCATTER(abd
).abd_nents
= table
.nents
;
382 * Allocate N individual pages to construct a scatter ABD. This function
383 * makes no attempt to request contiguous pages and requires the minimal
384 * number of kernel interfaces. It's designed for maximum compatibility.
387 abd_alloc_chunks(abd_t
*abd
, size_t size
)
389 struct scatterlist
*sg
= NULL
;
390 struct sg_table table
;
392 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
393 int nr_pages
= abd_chunkcnt_for_bytes(size
);
396 while (sg_alloc_table(&table
, nr_pages
, gfp
)) {
397 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
398 schedule_timeout_interruptible(1);
401 ASSERT3U(table
.nents
, ==, nr_pages
);
402 ABD_SCATTER(abd
).abd_sgl
= table
.sgl
;
403 ABD_SCATTER(abd
).abd_nents
= nr_pages
;
405 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
406 while ((page
= __page_cache_alloc(gfp
)) == NULL
) {
407 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
408 schedule_timeout_interruptible(1);
411 ABDSTAT_BUMP(abdstat_scatter_orders
[0]);
412 sg_set_page(sg
, page
, PAGESIZE
, 0);
413 abd_mark_zfs_page(page
);
417 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
418 abd
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
;
421 #endif /* !CONFIG_HIGHMEM */
424 * This must be called if any of the sg_table allocation functions
428 abd_free_sg_table(abd_t
*abd
)
430 struct sg_table table
;
432 table
.sgl
= ABD_SCATTER(abd
).abd_sgl
;
433 table
.nents
= table
.orig_nents
= ABD_SCATTER(abd
).abd_nents
;
434 sg_free_table(&table
);
438 abd_free_chunks(abd_t
*abd
)
440 struct scatterlist
*sg
= NULL
;
442 int nr_pages
= ABD_SCATTER(abd
).abd_nents
;
445 if (abd
->abd_flags
& ABD_FLAG_MULTI_ZONE
)
446 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone
);
448 if (abd
->abd_flags
& ABD_FLAG_MULTI_CHUNK
)
449 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk
);
451 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
453 abd_unmark_zfs_page(page
);
454 order
= compound_order(page
);
455 __free_pages(page
, order
);
456 ASSERT3U(sg
->length
, <=, PAGE_SIZE
<< order
);
457 ABDSTAT_BUMPDOWN(abdstat_scatter_orders
[order
]);
459 abd_free_sg_table(abd
);
463 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
464 * the scatterlist will be set to the zero'd out buffer abd_zero_page.
467 abd_alloc_zero_scatter(void)
469 struct scatterlist
*sg
= NULL
;
470 struct sg_table table
;
471 gfp_t gfp
= __GFP_NOWARN
| GFP_NOIO
;
472 int nr_pages
= abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE
);
475 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
476 gfp_t gfp_zero_page
= gfp
| __GFP_ZERO
;
477 while ((abd_zero_page
= __page_cache_alloc(gfp_zero_page
)) == NULL
) {
478 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry
);
479 schedule_timeout_interruptible(1);
481 abd_mark_zfs_page(abd_zero_page
);
483 abd_zero_page
= ZERO_PAGE(0);
484 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
486 while (sg_alloc_table(&table
, nr_pages
, gfp
)) {
487 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry
);
488 schedule_timeout_interruptible(1);
490 ASSERT3U(table
.nents
, ==, nr_pages
);
492 abd_zero_scatter
= abd_alloc_struct(SPA_MAXBLOCKSIZE
);
493 abd_zero_scatter
->abd_flags
|= ABD_FLAG_OWNER
;
494 ABD_SCATTER(abd_zero_scatter
).abd_offset
= 0;
495 ABD_SCATTER(abd_zero_scatter
).abd_sgl
= table
.sgl
;
496 ABD_SCATTER(abd_zero_scatter
).abd_nents
= nr_pages
;
497 abd_zero_scatter
->abd_size
= SPA_MAXBLOCKSIZE
;
498 abd_zero_scatter
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
| ABD_FLAG_ZEROS
;
500 abd_for_each_sg(abd_zero_scatter
, sg
, nr_pages
, i
) {
501 sg_set_page(sg
, abd_zero_page
, PAGESIZE
, 0);
504 ABDSTAT_BUMP(abdstat_scatter_cnt
);
505 ABDSTAT_INCR(abdstat_scatter_data_size
, PAGESIZE
);
506 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
512 #define PAGE_SHIFT (highbit64(PAGESIZE)-1)
515 #define zfs_kmap_atomic(chunk) ((void *)chunk)
516 #define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
517 #define local_irq_save(flags) do { (void)(flags); } while (0)
518 #define local_irq_restore(flags) do { (void)(flags); } while (0)
519 #define nth_page(pg, i) \
520 ((struct page *)((void *)(pg) + (i) * PAGESIZE))
529 sg_init_table(struct scatterlist
*sg
, int nr
)
531 memset(sg
, 0, nr
* sizeof (struct scatterlist
));
536 * This must be called if any of the sg_table allocation functions
540 abd_free_sg_table(abd_t
*abd
)
542 int nents
= ABD_SCATTER(abd
).abd_nents
;
543 vmem_free(ABD_SCATTER(abd
).abd_sgl
,
544 nents
* sizeof (struct scatterlist
));
547 #define for_each_sg(sgl, sg, nr, i) \
548 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
551 sg_set_page(struct scatterlist
*sg
, struct page
*page
, unsigned int len
,
554 /* currently we don't use offset */
560 static inline struct page
*
561 sg_page(struct scatterlist
*sg
)
566 static inline struct scatterlist
*
567 sg_next(struct scatterlist
*sg
)
576 abd_alloc_chunks(abd_t
*abd
, size_t size
)
578 unsigned nr_pages
= abd_chunkcnt_for_bytes(size
);
579 struct scatterlist
*sg
;
582 ABD_SCATTER(abd
).abd_sgl
= vmem_alloc(nr_pages
*
583 sizeof (struct scatterlist
), KM_SLEEP
);
584 sg_init_table(ABD_SCATTER(abd
).abd_sgl
, nr_pages
);
586 abd_for_each_sg(abd
, sg
, nr_pages
, i
) {
587 struct page
*p
= umem_alloc_aligned(PAGESIZE
, 64, KM_SLEEP
);
588 sg_set_page(sg
, p
, PAGESIZE
, 0);
590 ABD_SCATTER(abd
).abd_nents
= nr_pages
;
594 abd_free_chunks(abd_t
*abd
)
596 int i
, n
= ABD_SCATTER(abd
).abd_nents
;
597 struct scatterlist
*sg
;
599 abd_for_each_sg(abd
, sg
, n
, i
) {
600 for (int j
= 0; j
< sg
->length
; j
+= PAGESIZE
) {
601 struct page
*p
= nth_page(sg_page(sg
), j
>> PAGE_SHIFT
);
602 umem_free(p
, PAGESIZE
);
605 abd_free_sg_table(abd
);
609 abd_alloc_zero_scatter(void)
611 unsigned nr_pages
= abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE
);
612 struct scatterlist
*sg
;
615 abd_zero_page
= umem_alloc_aligned(PAGESIZE
, 64, KM_SLEEP
);
616 memset(abd_zero_page
, 0, PAGESIZE
);
617 abd_zero_scatter
= abd_alloc_struct(SPA_MAXBLOCKSIZE
);
618 abd_zero_scatter
->abd_flags
|= ABD_FLAG_OWNER
;
619 abd_zero_scatter
->abd_flags
|= ABD_FLAG_MULTI_CHUNK
| ABD_FLAG_ZEROS
;
620 ABD_SCATTER(abd_zero_scatter
).abd_offset
= 0;
621 ABD_SCATTER(abd_zero_scatter
).abd_nents
= nr_pages
;
622 abd_zero_scatter
->abd_size
= SPA_MAXBLOCKSIZE
;
623 zfs_refcount_create(&abd_zero_scatter
->abd_children
);
624 ABD_SCATTER(abd_zero_scatter
).abd_sgl
= vmem_alloc(nr_pages
*
625 sizeof (struct scatterlist
), KM_SLEEP
);
627 sg_init_table(ABD_SCATTER(abd_zero_scatter
).abd_sgl
, nr_pages
);
629 abd_for_each_sg(abd_zero_scatter
, sg
, nr_pages
, i
) {
630 sg_set_page(sg
, abd_zero_page
, PAGESIZE
, 0);
633 ABDSTAT_BUMP(abdstat_scatter_cnt
);
634 ABDSTAT_INCR(abdstat_scatter_data_size
, PAGESIZE
);
635 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk
);
641 abd_size_alloc_linear(size_t size
)
643 return (!zfs_abd_scatter_enabled
|| size
< zfs_abd_scatter_min_size
);
647 abd_update_scatter_stats(abd_t
*abd
, abd_stats_op_t op
)
649 ASSERT(op
== ABDSTAT_INCR
|| op
== ABDSTAT_DECR
);
650 int waste
= P2ROUNDUP(abd
->abd_size
, PAGESIZE
) - abd
->abd_size
;
651 if (op
== ABDSTAT_INCR
) {
652 ABDSTAT_BUMP(abdstat_scatter_cnt
);
653 ABDSTAT_INCR(abdstat_scatter_data_size
, abd
->abd_size
);
654 ABDSTAT_INCR(abdstat_scatter_chunk_waste
, waste
);
655 arc_space_consume(waste
, ARC_SPACE_ABD_CHUNK_WASTE
);
657 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt
);
658 ABDSTAT_INCR(abdstat_scatter_data_size
, -(int)abd
->abd_size
);
659 ABDSTAT_INCR(abdstat_scatter_chunk_waste
, -waste
);
660 arc_space_return(waste
, ARC_SPACE_ABD_CHUNK_WASTE
);
665 abd_update_linear_stats(abd_t
*abd
, abd_stats_op_t op
)
667 ASSERT(op
== ABDSTAT_INCR
|| op
== ABDSTAT_DECR
);
668 if (op
== ABDSTAT_INCR
) {
669 ABDSTAT_BUMP(abdstat_linear_cnt
);
670 ABDSTAT_INCR(abdstat_linear_data_size
, abd
->abd_size
);
672 ABDSTAT_BUMPDOWN(abdstat_linear_cnt
);
673 ABDSTAT_INCR(abdstat_linear_data_size
, -(int)abd
->abd_size
);
678 abd_verify_scatter(abd_t
*abd
)
682 struct scatterlist
*sg
= NULL
;
684 ASSERT3U(ABD_SCATTER(abd
).abd_nents
, >, 0);
685 ASSERT3U(ABD_SCATTER(abd
).abd_offset
, <,
686 ABD_SCATTER(abd
).abd_sgl
->length
);
687 n
= ABD_SCATTER(abd
).abd_nents
;
688 abd_for_each_sg(abd
, sg
, n
, i
) {
689 ASSERT3P(sg_page(sg
), !=, NULL
);
694 abd_free_zero_scatter(void)
696 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt
);
697 ABDSTAT_INCR(abdstat_scatter_data_size
, -(int)PAGESIZE
);
698 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk
);
700 abd_free_sg_table(abd_zero_scatter
);
701 abd_free_struct(abd_zero_scatter
);
702 abd_zero_scatter
= NULL
;
703 ASSERT3P(abd_zero_page
, !=, NULL
);
705 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
706 abd_unmark_zfs_page(abd_zero_page
);
707 __free_page(abd_zero_page
);
708 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
710 umem_free(abd_zero_page
, PAGESIZE
);
715 abd_kstats_update(kstat_t
*ksp
, int rw
)
717 abd_stats_t
*as
= ksp
->ks_data
;
719 if (rw
== KSTAT_WRITE
)
721 as
->abdstat_struct_size
.value
.ui64
=
722 wmsum_value(&abd_sums
.abdstat_struct_size
);
723 as
->abdstat_linear_cnt
.value
.ui64
=
724 wmsum_value(&abd_sums
.abdstat_linear_cnt
);
725 as
->abdstat_linear_data_size
.value
.ui64
=
726 wmsum_value(&abd_sums
.abdstat_linear_data_size
);
727 as
->abdstat_scatter_cnt
.value
.ui64
=
728 wmsum_value(&abd_sums
.abdstat_scatter_cnt
);
729 as
->abdstat_scatter_data_size
.value
.ui64
=
730 wmsum_value(&abd_sums
.abdstat_scatter_data_size
);
731 as
->abdstat_scatter_chunk_waste
.value
.ui64
=
732 wmsum_value(&abd_sums
.abdstat_scatter_chunk_waste
);
733 for (int i
= 0; i
< MAX_ORDER
; i
++) {
734 as
->abdstat_scatter_orders
[i
].value
.ui64
=
735 wmsum_value(&abd_sums
.abdstat_scatter_orders
[i
]);
737 as
->abdstat_scatter_page_multi_chunk
.value
.ui64
=
738 wmsum_value(&abd_sums
.abdstat_scatter_page_multi_chunk
);
739 as
->abdstat_scatter_page_multi_zone
.value
.ui64
=
740 wmsum_value(&abd_sums
.abdstat_scatter_page_multi_zone
);
741 as
->abdstat_scatter_page_alloc_retry
.value
.ui64
=
742 wmsum_value(&abd_sums
.abdstat_scatter_page_alloc_retry
);
743 as
->abdstat_scatter_sg_table_retry
.value
.ui64
=
744 wmsum_value(&abd_sums
.abdstat_scatter_sg_table_retry
);
753 abd_cache
= kmem_cache_create("abd_t", sizeof (abd_t
),
754 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
756 wmsum_init(&abd_sums
.abdstat_struct_size
, 0);
757 wmsum_init(&abd_sums
.abdstat_linear_cnt
, 0);
758 wmsum_init(&abd_sums
.abdstat_linear_data_size
, 0);
759 wmsum_init(&abd_sums
.abdstat_scatter_cnt
, 0);
760 wmsum_init(&abd_sums
.abdstat_scatter_data_size
, 0);
761 wmsum_init(&abd_sums
.abdstat_scatter_chunk_waste
, 0);
762 for (i
= 0; i
< MAX_ORDER
; i
++)
763 wmsum_init(&abd_sums
.abdstat_scatter_orders
[i
], 0);
764 wmsum_init(&abd_sums
.abdstat_scatter_page_multi_chunk
, 0);
765 wmsum_init(&abd_sums
.abdstat_scatter_page_multi_zone
, 0);
766 wmsum_init(&abd_sums
.abdstat_scatter_page_alloc_retry
, 0);
767 wmsum_init(&abd_sums
.abdstat_scatter_sg_table_retry
, 0);
769 abd_ksp
= kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED
,
770 sizeof (abd_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
771 if (abd_ksp
!= NULL
) {
772 for (i
= 0; i
< MAX_ORDER
; i
++) {
773 snprintf(abd_stats
.abdstat_scatter_orders
[i
].name
,
774 KSTAT_STRLEN
, "scatter_order_%d", i
);
775 abd_stats
.abdstat_scatter_orders
[i
].data_type
=
778 abd_ksp
->ks_data
= &abd_stats
;
779 abd_ksp
->ks_update
= abd_kstats_update
;
780 kstat_install(abd_ksp
);
783 abd_alloc_zero_scatter();
789 abd_free_zero_scatter();
791 if (abd_ksp
!= NULL
) {
792 kstat_delete(abd_ksp
);
796 wmsum_fini(&abd_sums
.abdstat_struct_size
);
797 wmsum_fini(&abd_sums
.abdstat_linear_cnt
);
798 wmsum_fini(&abd_sums
.abdstat_linear_data_size
);
799 wmsum_fini(&abd_sums
.abdstat_scatter_cnt
);
800 wmsum_fini(&abd_sums
.abdstat_scatter_data_size
);
801 wmsum_fini(&abd_sums
.abdstat_scatter_chunk_waste
);
802 for (int i
= 0; i
< MAX_ORDER
; i
++)
803 wmsum_fini(&abd_sums
.abdstat_scatter_orders
[i
]);
804 wmsum_fini(&abd_sums
.abdstat_scatter_page_multi_chunk
);
805 wmsum_fini(&abd_sums
.abdstat_scatter_page_multi_zone
);
806 wmsum_fini(&abd_sums
.abdstat_scatter_page_alloc_retry
);
807 wmsum_fini(&abd_sums
.abdstat_scatter_sg_table_retry
);
810 kmem_cache_destroy(abd_cache
);
816 abd_free_linear_page(abd_t
*abd
)
818 /* Transform it back into a scatter ABD for freeing */
819 struct scatterlist
*sg
= abd
->abd_u
.abd_linear
.abd_sgl
;
820 abd
->abd_flags
&= ~ABD_FLAG_LINEAR
;
821 abd
->abd_flags
&= ~ABD_FLAG_LINEAR_PAGE
;
822 ABD_SCATTER(abd
).abd_nents
= 1;
823 ABD_SCATTER(abd
).abd_offset
= 0;
824 ABD_SCATTER(abd
).abd_sgl
= sg
;
825 abd_free_chunks(abd
);
827 abd_update_scatter_stats(abd
, ABDSTAT_DECR
);
831 * If we're going to use this ABD for doing I/O using the block layer, the
832 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
833 * plan to store this ABD in memory for a long period of time, we should
834 * allocate the ABD type that requires the least data copying to do the I/O.
836 * On Linux the optimal thing to do would be to use abd_get_offset() and
837 * construct a new ABD which shares the original pages thereby eliminating
838 * the copy. But for the moment a new linear ABD is allocated until this
839 * performance optimization can be implemented.
842 abd_alloc_for_io(size_t size
, boolean_t is_metadata
)
844 return (abd_alloc(size
, is_metadata
));
848 abd_get_offset_scatter(abd_t
*abd
, abd_t
*sabd
, size_t off
,
853 struct scatterlist
*sg
= NULL
;
856 ASSERT3U(off
, <=, sabd
->abd_size
);
858 size_t new_offset
= ABD_SCATTER(sabd
).abd_offset
+ off
;
861 abd
= abd_alloc_struct(0);
864 * Even if this buf is filesystem metadata, we only track that
865 * if we own the underlying data buffer, which is not true in
866 * this case. Therefore, we don't ever use ABD_FLAG_META here.
869 abd_for_each_sg(sabd
, sg
, ABD_SCATTER(sabd
).abd_nents
, i
) {
870 if (new_offset
< sg
->length
)
872 new_offset
-= sg
->length
;
875 ABD_SCATTER(abd
).abd_sgl
= sg
;
876 ABD_SCATTER(abd
).abd_offset
= new_offset
;
877 ABD_SCATTER(abd
).abd_nents
= ABD_SCATTER(sabd
).abd_nents
- i
;
883 * Initialize the abd_iter.
886 abd_iter_init(struct abd_iter
*aiter
, abd_t
*abd
)
888 ASSERT(!abd_is_gang(abd
));
890 aiter
->iter_abd
= abd
;
891 aiter
->iter_mapaddr
= NULL
;
892 aiter
->iter_mapsize
= 0;
894 if (abd_is_linear(abd
)) {
895 aiter
->iter_offset
= 0;
896 aiter
->iter_sg
= NULL
;
898 aiter
->iter_offset
= ABD_SCATTER(abd
).abd_offset
;
899 aiter
->iter_sg
= ABD_SCATTER(abd
).abd_sgl
;
904 * This is just a helper function to see if we have exhausted the
905 * abd_iter and reached the end.
908 abd_iter_at_end(struct abd_iter
*aiter
)
910 return (aiter
->iter_pos
== aiter
->iter_abd
->abd_size
);
914 * Advance the iterator by a certain amount. Cannot be called when a chunk is
915 * in use. This can be safely called when the aiter has already exhausted, in
916 * which case this does nothing.
919 abd_iter_advance(struct abd_iter
*aiter
, size_t amount
)
921 ASSERT3P(aiter
->iter_mapaddr
, ==, NULL
);
922 ASSERT0(aiter
->iter_mapsize
);
924 /* There's nothing left to advance to, so do nothing */
925 if (abd_iter_at_end(aiter
))
928 aiter
->iter_pos
+= amount
;
929 aiter
->iter_offset
+= amount
;
930 if (!abd_is_linear(aiter
->iter_abd
)) {
931 while (aiter
->iter_offset
>= aiter
->iter_sg
->length
) {
932 aiter
->iter_offset
-= aiter
->iter_sg
->length
;
933 aiter
->iter_sg
= sg_next(aiter
->iter_sg
);
934 if (aiter
->iter_sg
== NULL
) {
935 ASSERT0(aiter
->iter_offset
);
943 * Map the current chunk into aiter. This can be safely called when the aiter
944 * has already exhausted, in which case this does nothing.
947 abd_iter_map(struct abd_iter
*aiter
)
952 ASSERT3P(aiter
->iter_mapaddr
, ==, NULL
);
953 ASSERT0(aiter
->iter_mapsize
);
955 /* There's nothing left to iterate over, so do nothing */
956 if (abd_iter_at_end(aiter
))
959 if (abd_is_linear(aiter
->iter_abd
)) {
960 ASSERT3U(aiter
->iter_pos
, ==, aiter
->iter_offset
);
961 offset
= aiter
->iter_offset
;
962 aiter
->iter_mapsize
= aiter
->iter_abd
->abd_size
- offset
;
963 paddr
= ABD_LINEAR_BUF(aiter
->iter_abd
);
965 offset
= aiter
->iter_offset
;
966 aiter
->iter_mapsize
= MIN(aiter
->iter_sg
->length
- offset
,
967 aiter
->iter_abd
->abd_size
- aiter
->iter_pos
);
969 paddr
= zfs_kmap_atomic(sg_page(aiter
->iter_sg
));
972 aiter
->iter_mapaddr
= (char *)paddr
+ offset
;
976 * Unmap the current chunk from aiter. This can be safely called when the aiter
977 * has already exhausted, in which case this does nothing.
980 abd_iter_unmap(struct abd_iter
*aiter
)
982 /* There's nothing left to unmap, so do nothing */
983 if (abd_iter_at_end(aiter
))
986 if (!abd_is_linear(aiter
->iter_abd
)) {
987 /* LINTED E_FUNC_SET_NOT_USED */
988 zfs_kunmap_atomic(aiter
->iter_mapaddr
- aiter
->iter_offset
);
991 ASSERT3P(aiter
->iter_mapaddr
, !=, NULL
);
992 ASSERT3U(aiter
->iter_mapsize
, >, 0);
994 aiter
->iter_mapaddr
= NULL
;
995 aiter
->iter_mapsize
= 0;
999 abd_cache_reap_now(void)
1003 #if defined(_KERNEL)
1005 * bio_nr_pages for ABD.
1006 * @off is the offset in @abd
1009 abd_nr_pages_off(abd_t
*abd
, unsigned int size
, size_t off
)
1013 if (abd_is_gang(abd
)) {
1014 unsigned long count
= 0;
1016 for (abd_t
*cabd
= abd_gang_get_offset(abd
, &off
);
1017 cabd
!= NULL
&& size
!= 0;
1018 cabd
= list_next(&ABD_GANG(abd
).abd_gang_chain
, cabd
)) {
1019 ASSERT3U(off
, <, cabd
->abd_size
);
1020 int mysize
= MIN(size
, cabd
->abd_size
- off
);
1021 count
+= abd_nr_pages_off(cabd
, mysize
, off
);
1028 if (abd_is_linear(abd
))
1029 pos
= (unsigned long)abd_to_buf(abd
) + off
;
1031 pos
= ABD_SCATTER(abd
).abd_offset
+ off
;
1033 return (((pos
+ size
+ PAGESIZE
- 1) >> PAGE_SHIFT
) -
1034 (pos
>> PAGE_SHIFT
));
1038 bio_map(struct bio
*bio
, void *buf_ptr
, unsigned int bio_size
)
1040 unsigned int offset
, size
, i
;
1043 offset
= offset_in_page(buf_ptr
);
1044 for (i
= 0; i
< bio
->bi_max_vecs
; i
++) {
1045 size
= PAGE_SIZE
- offset
;
1050 if (size
> bio_size
)
1053 if (is_vmalloc_addr(buf_ptr
))
1054 page
= vmalloc_to_page(buf_ptr
);
1056 page
= virt_to_page(buf_ptr
);
1059 * Some network related block device uses tcp_sendpage, which
1060 * doesn't behave well when using 0-count page, this is a
1061 * safety net to catch them.
1063 ASSERT3S(page_count(page
), >, 0);
1065 if (bio_add_page(bio
, page
, size
, offset
) != size
)
1077 * bio_map for gang ABD.
1080 abd_gang_bio_map_off(struct bio
*bio
, abd_t
*abd
,
1081 unsigned int io_size
, size_t off
)
1083 ASSERT(abd_is_gang(abd
));
1085 for (abd_t
*cabd
= abd_gang_get_offset(abd
, &off
);
1087 cabd
= list_next(&ABD_GANG(abd
).abd_gang_chain
, cabd
)) {
1088 ASSERT3U(off
, <, cabd
->abd_size
);
1089 int size
= MIN(io_size
, cabd
->abd_size
- off
);
1090 int remainder
= abd_bio_map_off(bio
, cabd
, size
, off
);
1091 io_size
-= (size
- remainder
);
1092 if (io_size
== 0 || remainder
> 0)
1102 * @off is the offset in @abd
1103 * Remaining IO size is returned
1106 abd_bio_map_off(struct bio
*bio
, abd_t
*abd
,
1107 unsigned int io_size
, size_t off
)
1109 struct abd_iter aiter
;
1111 ASSERT3U(io_size
, <=, abd
->abd_size
- off
);
1112 if (abd_is_linear(abd
))
1113 return (bio_map(bio
, ((char *)abd_to_buf(abd
)) + off
, io_size
));
1115 ASSERT(!abd_is_linear(abd
));
1116 if (abd_is_gang(abd
))
1117 return (abd_gang_bio_map_off(bio
, abd
, io_size
, off
));
1119 abd_iter_init(&aiter
, abd
);
1120 abd_iter_advance(&aiter
, off
);
1122 for (int i
= 0; i
< bio
->bi_max_vecs
; i
++) {
1124 size_t len
, sgoff
, pgoff
;
1125 struct scatterlist
*sg
;
1131 sgoff
= aiter
.iter_offset
;
1132 pgoff
= sgoff
& (PAGESIZE
- 1);
1133 len
= MIN(io_size
, PAGESIZE
- pgoff
);
1136 pg
= nth_page(sg_page(sg
), sgoff
>> PAGE_SHIFT
);
1137 if (bio_add_page(bio
, pg
, len
, pgoff
) != len
)
1141 abd_iter_advance(&aiter
, len
);
1147 /* Tunable Parameters */
1148 module_param(zfs_abd_scatter_enabled
, int, 0644);
1149 MODULE_PARM_DESC(zfs_abd_scatter_enabled
,
1150 "Toggle whether ABD allocations must be linear.");
1151 module_param(zfs_abd_scatter_min_size
, int, 0644);
1152 MODULE_PARM_DESC(zfs_abd_scatter_min_size
,
1153 "Minimum size of scatter allocations.");
1155 module_param(zfs_abd_scatter_max_order
, uint
, 0644);
1156 MODULE_PARM_DESC(zfs_abd_scatter_max_order
,
1157 "Maximum order allocation used for a scatter ABD.");