Handle possible null pointers from malloc/strdup/strndup()
[zfs.git] / module / os / linux / zfs / abd_os.c
blob2ab85f8cccd0d621e70e6794dc87345c5fb91724
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
23 * Copyright (c) 2019 by Delphix. All rights reserved.
27 * See abd.c for a general overview of the arc buffered data (ABD).
29 * Linear buffers act exactly like normal buffers and are always mapped into the
30 * kernel's virtual memory space, while scattered ABD data chunks are allocated
31 * as physical pages and then mapped in only while they are actually being
32 * accessed through one of the abd_* library functions. Using scattered ABDs
33 * provides several benefits:
35 * (1) They avoid use of kmem_*, preventing performance problems where running
36 * kmem_reap on very large memory systems never finishes and causes
37 * constant TLB shootdowns.
39 * (2) Fragmentation is less of an issue since when we are at the limit of
40 * allocatable space, we won't have to search around for a long free
41 * hole in the VA space for large ARC allocations. Each chunk is mapped in
42 * individually, so even if we are using HIGHMEM (see next point) we
43 * wouldn't need to worry about finding a contiguous address range.
45 * (3) If we are not using HIGHMEM, then all physical memory is always
46 * mapped into the kernel's address space, so we also avoid the map /
47 * unmap costs on each ABD access.
49 * If we are not using HIGHMEM, scattered buffers which have only one chunk
50 * can be treated as linear buffers, because they are contiguous in the
51 * kernel's virtual address space. See abd_alloc_chunks() for details.
54 #include <sys/abd_impl.h>
55 #include <sys/param.h>
56 #include <sys/zio.h>
57 #include <sys/arc.h>
58 #include <sys/zfs_context.h>
59 #include <sys/zfs_znode.h>
60 #ifdef _KERNEL
61 #include <linux/kmap_compat.h>
62 #include <linux/scatterlist.h>
63 #else
64 #define MAX_ORDER 1
65 #endif
67 typedef struct abd_stats {
68 kstat_named_t abdstat_struct_size;
69 kstat_named_t abdstat_linear_cnt;
70 kstat_named_t abdstat_linear_data_size;
71 kstat_named_t abdstat_scatter_cnt;
72 kstat_named_t abdstat_scatter_data_size;
73 kstat_named_t abdstat_scatter_chunk_waste;
74 kstat_named_t abdstat_scatter_orders[MAX_ORDER];
75 kstat_named_t abdstat_scatter_page_multi_chunk;
76 kstat_named_t abdstat_scatter_page_multi_zone;
77 kstat_named_t abdstat_scatter_page_alloc_retry;
78 kstat_named_t abdstat_scatter_sg_table_retry;
79 } abd_stats_t;
81 static abd_stats_t abd_stats = {
82 /* Amount of memory occupied by all of the abd_t struct allocations */
83 { "struct_size", KSTAT_DATA_UINT64 },
85 * The number of linear ABDs which are currently allocated, excluding
86 * ABDs which don't own their data (for instance the ones which were
87 * allocated through abd_get_offset() and abd_get_from_buf()). If an
88 * ABD takes ownership of its buf then it will become tracked.
90 { "linear_cnt", KSTAT_DATA_UINT64 },
91 /* Amount of data stored in all linear ABDs tracked by linear_cnt */
92 { "linear_data_size", KSTAT_DATA_UINT64 },
94 * The number of scatter ABDs which are currently allocated, excluding
95 * ABDs which don't own their data (for instance the ones which were
96 * allocated through abd_get_offset()).
98 { "scatter_cnt", KSTAT_DATA_UINT64 },
99 /* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
100 { "scatter_data_size", KSTAT_DATA_UINT64 },
102 * The amount of space wasted at the end of the last chunk across all
103 * scatter ABDs tracked by scatter_cnt.
105 { "scatter_chunk_waste", KSTAT_DATA_UINT64 },
107 * The number of compound allocations of a given order. These
108 * allocations are spread over all currently allocated ABDs, and
109 * act as a measure of memory fragmentation.
111 { { "scatter_order_N", KSTAT_DATA_UINT64 } },
113 * The number of scatter ABDs which contain multiple chunks.
114 * ABDs are preferentially allocated from the minimum number of
115 * contiguous multi-page chunks, a single chunk is optimal.
117 { "scatter_page_multi_chunk", KSTAT_DATA_UINT64 },
119 * The number of scatter ABDs which are split across memory zones.
120 * ABDs are preferentially allocated using pages from a single zone.
122 { "scatter_page_multi_zone", KSTAT_DATA_UINT64 },
124 * The total number of retries encountered when attempting to
125 * allocate the pages to populate the scatter ABD.
127 { "scatter_page_alloc_retry", KSTAT_DATA_UINT64 },
129 * The total number of retries encountered when attempting to
130 * allocate the sg table for an ABD.
132 { "scatter_sg_table_retry", KSTAT_DATA_UINT64 },
135 struct {
136 wmsum_t abdstat_struct_size;
137 wmsum_t abdstat_linear_cnt;
138 wmsum_t abdstat_linear_data_size;
139 wmsum_t abdstat_scatter_cnt;
140 wmsum_t abdstat_scatter_data_size;
141 wmsum_t abdstat_scatter_chunk_waste;
142 wmsum_t abdstat_scatter_orders[MAX_ORDER];
143 wmsum_t abdstat_scatter_page_multi_chunk;
144 wmsum_t abdstat_scatter_page_multi_zone;
145 wmsum_t abdstat_scatter_page_alloc_retry;
146 wmsum_t abdstat_scatter_sg_table_retry;
147 } abd_sums;
149 #define abd_for_each_sg(abd, sg, n, i) \
150 for_each_sg(ABD_SCATTER(abd).abd_sgl, sg, n, i)
153 * zfs_abd_scatter_min_size is the minimum allocation size to use scatter
154 * ABD's. Smaller allocations will use linear ABD's which uses
155 * zio_[data_]buf_alloc().
157 * Scatter ABD's use at least one page each, so sub-page allocations waste
158 * some space when allocated as scatter (e.g. 2KB scatter allocation wastes
159 * half of each page). Using linear ABD's for small allocations means that
160 * they will be put on slabs which contain many allocations. This can
161 * improve memory efficiency, but it also makes it much harder for ARC
162 * evictions to actually free pages, because all the buffers on one slab need
163 * to be freed in order for the slab (and underlying pages) to be freed.
164 * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's
165 * possible for them to actually waste more memory than scatter (one page per
166 * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th).
168 * Spill blocks are typically 512B and are heavily used on systems running
169 * selinux with the default dnode size and the `xattr=sa` property set.
171 * By default we use linear allocations for 512B and 1KB, and scatter
172 * allocations for larger (1.5KB and up).
174 static int zfs_abd_scatter_min_size = 512 * 3;
177 * We use a scattered SPA_MAXBLOCKSIZE sized ABD whose pages are
178 * just a single zero'd page. This allows us to conserve memory by
179 * only using a single zero page for the scatterlist.
181 abd_t *abd_zero_scatter = NULL;
183 struct page;
185 * _KERNEL - Will point to ZERO_PAGE if it is available or it will be
186 * an allocated zero'd PAGESIZE buffer.
187 * Userspace - Will be an allocated zero'ed PAGESIZE buffer.
189 * abd_zero_page is assigned to each of the pages of abd_zero_scatter.
191 static struct page *abd_zero_page = NULL;
193 static kmem_cache_t *abd_cache = NULL;
194 static kstat_t *abd_ksp;
196 static uint_t
197 abd_chunkcnt_for_bytes(size_t size)
199 return (P2ROUNDUP(size, PAGESIZE) / PAGESIZE);
202 abd_t *
203 abd_alloc_struct_impl(size_t size)
206 * In Linux we do not use the size passed in during ABD
207 * allocation, so we just ignore it.
209 (void) size;
210 abd_t *abd = kmem_cache_alloc(abd_cache, KM_PUSHPAGE);
211 ASSERT3P(abd, !=, NULL);
212 ABDSTAT_INCR(abdstat_struct_size, sizeof (abd_t));
214 return (abd);
217 void
218 abd_free_struct_impl(abd_t *abd)
220 kmem_cache_free(abd_cache, abd);
221 ABDSTAT_INCR(abdstat_struct_size, -(int)sizeof (abd_t));
224 #ifdef _KERNEL
225 static unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
228 * Mark zfs data pages so they can be excluded from kernel crash dumps
230 #ifdef _LP64
231 #define ABD_FILE_CACHE_PAGE 0x2F5ABDF11ECAC4E
233 static inline void
234 abd_mark_zfs_page(struct page *page)
236 get_page(page);
237 SetPagePrivate(page);
238 set_page_private(page, ABD_FILE_CACHE_PAGE);
241 static inline void
242 abd_unmark_zfs_page(struct page *page)
244 set_page_private(page, 0UL);
245 ClearPagePrivate(page);
246 put_page(page);
248 #else
249 #define abd_mark_zfs_page(page)
250 #define abd_unmark_zfs_page(page)
251 #endif /* _LP64 */
253 #ifndef CONFIG_HIGHMEM
255 #ifndef __GFP_RECLAIM
256 #define __GFP_RECLAIM __GFP_WAIT
257 #endif
260 * The goal is to minimize fragmentation by preferentially populating ABDs
261 * with higher order compound pages from a single zone. Allocation size is
262 * progressively decreased until it can be satisfied without performing
263 * reclaim or compaction. When necessary this function will degenerate to
264 * allocating individual pages and allowing reclaim to satisfy allocations.
266 void
267 abd_alloc_chunks(abd_t *abd, size_t size)
269 struct list_head pages;
270 struct sg_table table;
271 struct scatterlist *sg;
272 struct page *page, *tmp_page = NULL;
273 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
274 gfp_t gfp_comp = (gfp | __GFP_NORETRY | __GFP_COMP) & ~__GFP_RECLAIM;
275 int max_order = MIN(zfs_abd_scatter_max_order, MAX_ORDER - 1);
276 int nr_pages = abd_chunkcnt_for_bytes(size);
277 int chunks = 0, zones = 0;
278 size_t remaining_size;
279 int nid = NUMA_NO_NODE;
280 int alloc_pages = 0;
282 INIT_LIST_HEAD(&pages);
284 while (alloc_pages < nr_pages) {
285 unsigned chunk_pages;
286 int order;
288 order = MIN(highbit64(nr_pages - alloc_pages) - 1, max_order);
289 chunk_pages = (1U << order);
291 page = alloc_pages_node(nid, order ? gfp_comp : gfp, order);
292 if (page == NULL) {
293 if (order == 0) {
294 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
295 schedule_timeout_interruptible(1);
296 } else {
297 max_order = MAX(0, order - 1);
299 continue;
302 list_add_tail(&page->lru, &pages);
304 if ((nid != NUMA_NO_NODE) && (page_to_nid(page) != nid))
305 zones++;
307 nid = page_to_nid(page);
308 ABDSTAT_BUMP(abdstat_scatter_orders[order]);
309 chunks++;
310 alloc_pages += chunk_pages;
313 ASSERT3S(alloc_pages, ==, nr_pages);
315 while (sg_alloc_table(&table, chunks, gfp)) {
316 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
317 schedule_timeout_interruptible(1);
320 sg = table.sgl;
321 remaining_size = size;
322 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
323 size_t sg_size = MIN(PAGESIZE << compound_order(page),
324 remaining_size);
325 sg_set_page(sg, page, sg_size, 0);
326 abd_mark_zfs_page(page);
327 remaining_size -= sg_size;
329 sg = sg_next(sg);
330 list_del(&page->lru);
334 * These conditions ensure that a possible transformation to a linear
335 * ABD would be valid.
337 ASSERT(!PageHighMem(sg_page(table.sgl)));
338 ASSERT0(ABD_SCATTER(abd).abd_offset);
340 if (table.nents == 1) {
342 * Since there is only one entry, this ABD can be represented
343 * as a linear buffer. All single-page (4K) ABD's can be
344 * represented this way. Some multi-page ABD's can also be
345 * represented this way, if we were able to allocate a single
346 * "chunk" (higher-order "page" which represents a power-of-2
347 * series of physically-contiguous pages). This is often the
348 * case for 2-page (8K) ABD's.
350 * Representing a single-entry scatter ABD as a linear ABD
351 * has the performance advantage of avoiding the copy (and
352 * allocation) in abd_borrow_buf_copy / abd_return_buf_copy.
353 * A performance increase of around 5% has been observed for
354 * ARC-cached reads (of small blocks which can take advantage
355 * of this).
357 * Note that this optimization is only possible because the
358 * pages are always mapped into the kernel's address space.
359 * This is not the case for highmem pages, so the
360 * optimization can not be made there.
362 abd->abd_flags |= ABD_FLAG_LINEAR;
363 abd->abd_flags |= ABD_FLAG_LINEAR_PAGE;
364 abd->abd_u.abd_linear.abd_sgl = table.sgl;
365 ABD_LINEAR_BUF(abd) = page_address(sg_page(table.sgl));
366 } else if (table.nents > 1) {
367 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
368 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
370 if (zones) {
371 ABDSTAT_BUMP(abdstat_scatter_page_multi_zone);
372 abd->abd_flags |= ABD_FLAG_MULTI_ZONE;
375 ABD_SCATTER(abd).abd_sgl = table.sgl;
376 ABD_SCATTER(abd).abd_nents = table.nents;
379 #else
382 * Allocate N individual pages to construct a scatter ABD. This function
383 * makes no attempt to request contiguous pages and requires the minimal
384 * number of kernel interfaces. It's designed for maximum compatibility.
386 void
387 abd_alloc_chunks(abd_t *abd, size_t size)
389 struct scatterlist *sg = NULL;
390 struct sg_table table;
391 struct page *page;
392 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
393 int nr_pages = abd_chunkcnt_for_bytes(size);
394 int i = 0;
396 while (sg_alloc_table(&table, nr_pages, gfp)) {
397 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
398 schedule_timeout_interruptible(1);
401 ASSERT3U(table.nents, ==, nr_pages);
402 ABD_SCATTER(abd).abd_sgl = table.sgl;
403 ABD_SCATTER(abd).abd_nents = nr_pages;
405 abd_for_each_sg(abd, sg, nr_pages, i) {
406 while ((page = __page_cache_alloc(gfp)) == NULL) {
407 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
408 schedule_timeout_interruptible(1);
411 ABDSTAT_BUMP(abdstat_scatter_orders[0]);
412 sg_set_page(sg, page, PAGESIZE, 0);
413 abd_mark_zfs_page(page);
416 if (nr_pages > 1) {
417 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
418 abd->abd_flags |= ABD_FLAG_MULTI_CHUNK;
421 #endif /* !CONFIG_HIGHMEM */
424 * This must be called if any of the sg_table allocation functions
425 * are called.
427 static void
428 abd_free_sg_table(abd_t *abd)
430 struct sg_table table;
432 table.sgl = ABD_SCATTER(abd).abd_sgl;
433 table.nents = table.orig_nents = ABD_SCATTER(abd).abd_nents;
434 sg_free_table(&table);
437 void
438 abd_free_chunks(abd_t *abd)
440 struct scatterlist *sg = NULL;
441 struct page *page;
442 int nr_pages = ABD_SCATTER(abd).abd_nents;
443 int order, i = 0;
445 if (abd->abd_flags & ABD_FLAG_MULTI_ZONE)
446 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_zone);
448 if (abd->abd_flags & ABD_FLAG_MULTI_CHUNK)
449 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
451 abd_for_each_sg(abd, sg, nr_pages, i) {
452 page = sg_page(sg);
453 abd_unmark_zfs_page(page);
454 order = compound_order(page);
455 __free_pages(page, order);
456 ASSERT3U(sg->length, <=, PAGE_SIZE << order);
457 ABDSTAT_BUMPDOWN(abdstat_scatter_orders[order]);
459 abd_free_sg_table(abd);
463 * Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where each page in
464 * the scatterlist will be set to the zero'd out buffer abd_zero_page.
466 static void
467 abd_alloc_zero_scatter(void)
469 struct scatterlist *sg = NULL;
470 struct sg_table table;
471 gfp_t gfp = __GFP_NOWARN | GFP_NOIO;
472 int nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
473 int i = 0;
475 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
476 gfp_t gfp_zero_page = gfp | __GFP_ZERO;
477 while ((abd_zero_page = __page_cache_alloc(gfp_zero_page)) == NULL) {
478 ABDSTAT_BUMP(abdstat_scatter_page_alloc_retry);
479 schedule_timeout_interruptible(1);
481 abd_mark_zfs_page(abd_zero_page);
482 #else
483 abd_zero_page = ZERO_PAGE(0);
484 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
486 while (sg_alloc_table(&table, nr_pages, gfp)) {
487 ABDSTAT_BUMP(abdstat_scatter_sg_table_retry);
488 schedule_timeout_interruptible(1);
490 ASSERT3U(table.nents, ==, nr_pages);
492 abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
493 abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
494 ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
495 ABD_SCATTER(abd_zero_scatter).abd_sgl = table.sgl;
496 ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
497 abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
498 abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
500 abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
501 sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
504 ABDSTAT_BUMP(abdstat_scatter_cnt);
505 ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
506 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
509 #else /* _KERNEL */
511 #ifndef PAGE_SHIFT
512 #define PAGE_SHIFT (highbit64(PAGESIZE)-1)
513 #endif
515 #define zfs_kmap_atomic(chunk) ((void *)chunk)
516 #define zfs_kunmap_atomic(addr) do { (void)(addr); } while (0)
517 #define local_irq_save(flags) do { (void)(flags); } while (0)
518 #define local_irq_restore(flags) do { (void)(flags); } while (0)
519 #define nth_page(pg, i) \
520 ((struct page *)((void *)(pg) + (i) * PAGESIZE))
522 struct scatterlist {
523 struct page *page;
524 int length;
525 int end;
528 static void
529 sg_init_table(struct scatterlist *sg, int nr)
531 memset(sg, 0, nr * sizeof (struct scatterlist));
532 sg[nr - 1].end = 1;
536 * This must be called if any of the sg_table allocation functions
537 * are called.
539 static void
540 abd_free_sg_table(abd_t *abd)
542 int nents = ABD_SCATTER(abd).abd_nents;
543 vmem_free(ABD_SCATTER(abd).abd_sgl,
544 nents * sizeof (struct scatterlist));
547 #define for_each_sg(sgl, sg, nr, i) \
548 for ((i) = 0, (sg) = (sgl); (i) < (nr); (i)++, (sg) = sg_next(sg))
550 static inline void
551 sg_set_page(struct scatterlist *sg, struct page *page, unsigned int len,
552 unsigned int offset)
554 /* currently we don't use offset */
555 ASSERT(offset == 0);
556 sg->page = page;
557 sg->length = len;
560 static inline struct page *
561 sg_page(struct scatterlist *sg)
563 return (sg->page);
566 static inline struct scatterlist *
567 sg_next(struct scatterlist *sg)
569 if (sg->end)
570 return (NULL);
572 return (sg + 1);
575 void
576 abd_alloc_chunks(abd_t *abd, size_t size)
578 unsigned nr_pages = abd_chunkcnt_for_bytes(size);
579 struct scatterlist *sg;
580 int i;
582 ABD_SCATTER(abd).abd_sgl = vmem_alloc(nr_pages *
583 sizeof (struct scatterlist), KM_SLEEP);
584 sg_init_table(ABD_SCATTER(abd).abd_sgl, nr_pages);
586 abd_for_each_sg(abd, sg, nr_pages, i) {
587 struct page *p = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
588 sg_set_page(sg, p, PAGESIZE, 0);
590 ABD_SCATTER(abd).abd_nents = nr_pages;
593 void
594 abd_free_chunks(abd_t *abd)
596 int i, n = ABD_SCATTER(abd).abd_nents;
597 struct scatterlist *sg;
599 abd_for_each_sg(abd, sg, n, i) {
600 for (int j = 0; j < sg->length; j += PAGESIZE) {
601 struct page *p = nth_page(sg_page(sg), j >> PAGE_SHIFT);
602 umem_free(p, PAGESIZE);
605 abd_free_sg_table(abd);
608 static void
609 abd_alloc_zero_scatter(void)
611 unsigned nr_pages = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
612 struct scatterlist *sg;
613 int i;
615 abd_zero_page = umem_alloc_aligned(PAGESIZE, 64, KM_SLEEP);
616 memset(abd_zero_page, 0, PAGESIZE);
617 abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
618 abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER;
619 abd_zero_scatter->abd_flags |= ABD_FLAG_MULTI_CHUNK | ABD_FLAG_ZEROS;
620 ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
621 ABD_SCATTER(abd_zero_scatter).abd_nents = nr_pages;
622 abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
623 ABD_SCATTER(abd_zero_scatter).abd_sgl = vmem_alloc(nr_pages *
624 sizeof (struct scatterlist), KM_SLEEP);
626 sg_init_table(ABD_SCATTER(abd_zero_scatter).abd_sgl, nr_pages);
628 abd_for_each_sg(abd_zero_scatter, sg, nr_pages, i) {
629 sg_set_page(sg, abd_zero_page, PAGESIZE, 0);
632 ABDSTAT_BUMP(abdstat_scatter_cnt);
633 ABDSTAT_INCR(abdstat_scatter_data_size, PAGESIZE);
634 ABDSTAT_BUMP(abdstat_scatter_page_multi_chunk);
637 #endif /* _KERNEL */
639 boolean_t
640 abd_size_alloc_linear(size_t size)
642 return (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size);
645 void
646 abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
648 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
649 int waste = P2ROUNDUP(abd->abd_size, PAGESIZE) - abd->abd_size;
650 if (op == ABDSTAT_INCR) {
651 ABDSTAT_BUMP(abdstat_scatter_cnt);
652 ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
653 ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
654 arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
655 } else {
656 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
657 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
658 ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
659 arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
663 void
664 abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
666 ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
667 if (op == ABDSTAT_INCR) {
668 ABDSTAT_BUMP(abdstat_linear_cnt);
669 ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
670 } else {
671 ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
672 ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
676 void
677 abd_verify_scatter(abd_t *abd)
679 size_t n;
680 int i = 0;
681 struct scatterlist *sg = NULL;
683 ASSERT3U(ABD_SCATTER(abd).abd_nents, >, 0);
684 ASSERT3U(ABD_SCATTER(abd).abd_offset, <,
685 ABD_SCATTER(abd).abd_sgl->length);
686 n = ABD_SCATTER(abd).abd_nents;
687 abd_for_each_sg(abd, sg, n, i) {
688 ASSERT3P(sg_page(sg), !=, NULL);
692 static void
693 abd_free_zero_scatter(void)
695 ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
696 ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGESIZE);
697 ABDSTAT_BUMPDOWN(abdstat_scatter_page_multi_chunk);
699 abd_free_sg_table(abd_zero_scatter);
700 abd_free_struct(abd_zero_scatter);
701 abd_zero_scatter = NULL;
702 ASSERT3P(abd_zero_page, !=, NULL);
703 #if defined(_KERNEL)
704 #if defined(HAVE_ZERO_PAGE_GPL_ONLY)
705 abd_unmark_zfs_page(abd_zero_page);
706 __free_page(abd_zero_page);
707 #endif /* HAVE_ZERO_PAGE_GPL_ONLY */
708 #else
709 umem_free(abd_zero_page, PAGESIZE);
710 #endif /* _KERNEL */
713 static int
714 abd_kstats_update(kstat_t *ksp, int rw)
716 abd_stats_t *as = ksp->ks_data;
718 if (rw == KSTAT_WRITE)
719 return (EACCES);
720 as->abdstat_struct_size.value.ui64 =
721 wmsum_value(&abd_sums.abdstat_struct_size);
722 as->abdstat_linear_cnt.value.ui64 =
723 wmsum_value(&abd_sums.abdstat_linear_cnt);
724 as->abdstat_linear_data_size.value.ui64 =
725 wmsum_value(&abd_sums.abdstat_linear_data_size);
726 as->abdstat_scatter_cnt.value.ui64 =
727 wmsum_value(&abd_sums.abdstat_scatter_cnt);
728 as->abdstat_scatter_data_size.value.ui64 =
729 wmsum_value(&abd_sums.abdstat_scatter_data_size);
730 as->abdstat_scatter_chunk_waste.value.ui64 =
731 wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
732 for (int i = 0; i < MAX_ORDER; i++) {
733 as->abdstat_scatter_orders[i].value.ui64 =
734 wmsum_value(&abd_sums.abdstat_scatter_orders[i]);
736 as->abdstat_scatter_page_multi_chunk.value.ui64 =
737 wmsum_value(&abd_sums.abdstat_scatter_page_multi_chunk);
738 as->abdstat_scatter_page_multi_zone.value.ui64 =
739 wmsum_value(&abd_sums.abdstat_scatter_page_multi_zone);
740 as->abdstat_scatter_page_alloc_retry.value.ui64 =
741 wmsum_value(&abd_sums.abdstat_scatter_page_alloc_retry);
742 as->abdstat_scatter_sg_table_retry.value.ui64 =
743 wmsum_value(&abd_sums.abdstat_scatter_sg_table_retry);
744 return (0);
747 void
748 abd_init(void)
750 int i;
752 abd_cache = kmem_cache_create("abd_t", sizeof (abd_t),
753 0, NULL, NULL, NULL, NULL, NULL, 0);
755 wmsum_init(&abd_sums.abdstat_struct_size, 0);
756 wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
757 wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
758 wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
759 wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
760 wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
761 for (i = 0; i < MAX_ORDER; i++)
762 wmsum_init(&abd_sums.abdstat_scatter_orders[i], 0);
763 wmsum_init(&abd_sums.abdstat_scatter_page_multi_chunk, 0);
764 wmsum_init(&abd_sums.abdstat_scatter_page_multi_zone, 0);
765 wmsum_init(&abd_sums.abdstat_scatter_page_alloc_retry, 0);
766 wmsum_init(&abd_sums.abdstat_scatter_sg_table_retry, 0);
768 abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
769 sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
770 if (abd_ksp != NULL) {
771 for (i = 0; i < MAX_ORDER; i++) {
772 snprintf(abd_stats.abdstat_scatter_orders[i].name,
773 KSTAT_STRLEN, "scatter_order_%d", i);
774 abd_stats.abdstat_scatter_orders[i].data_type =
775 KSTAT_DATA_UINT64;
777 abd_ksp->ks_data = &abd_stats;
778 abd_ksp->ks_update = abd_kstats_update;
779 kstat_install(abd_ksp);
782 abd_alloc_zero_scatter();
785 void
786 abd_fini(void)
788 abd_free_zero_scatter();
790 if (abd_ksp != NULL) {
791 kstat_delete(abd_ksp);
792 abd_ksp = NULL;
795 wmsum_fini(&abd_sums.abdstat_struct_size);
796 wmsum_fini(&abd_sums.abdstat_linear_cnt);
797 wmsum_fini(&abd_sums.abdstat_linear_data_size);
798 wmsum_fini(&abd_sums.abdstat_scatter_cnt);
799 wmsum_fini(&abd_sums.abdstat_scatter_data_size);
800 wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
801 for (int i = 0; i < MAX_ORDER; i++)
802 wmsum_fini(&abd_sums.abdstat_scatter_orders[i]);
803 wmsum_fini(&abd_sums.abdstat_scatter_page_multi_chunk);
804 wmsum_fini(&abd_sums.abdstat_scatter_page_multi_zone);
805 wmsum_fini(&abd_sums.abdstat_scatter_page_alloc_retry);
806 wmsum_fini(&abd_sums.abdstat_scatter_sg_table_retry);
808 if (abd_cache) {
809 kmem_cache_destroy(abd_cache);
810 abd_cache = NULL;
814 void
815 abd_free_linear_page(abd_t *abd)
817 /* Transform it back into a scatter ABD for freeing */
818 struct scatterlist *sg = abd->abd_u.abd_linear.abd_sgl;
819 abd->abd_flags &= ~ABD_FLAG_LINEAR;
820 abd->abd_flags &= ~ABD_FLAG_LINEAR_PAGE;
821 ABD_SCATTER(abd).abd_nents = 1;
822 ABD_SCATTER(abd).abd_offset = 0;
823 ABD_SCATTER(abd).abd_sgl = sg;
824 abd_free_chunks(abd);
826 abd_update_scatter_stats(abd, ABDSTAT_DECR);
830 * If we're going to use this ABD for doing I/O using the block layer, the
831 * consumer of the ABD data doesn't care if it's scattered or not, and we don't
832 * plan to store this ABD in memory for a long period of time, we should
833 * allocate the ABD type that requires the least data copying to do the I/O.
835 * On Linux the optimal thing to do would be to use abd_get_offset() and
836 * construct a new ABD which shares the original pages thereby eliminating
837 * the copy. But for the moment a new linear ABD is allocated until this
838 * performance optimization can be implemented.
840 abd_t *
841 abd_alloc_for_io(size_t size, boolean_t is_metadata)
843 return (abd_alloc(size, is_metadata));
846 abd_t *
847 abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
848 size_t size)
850 (void) size;
851 int i = 0;
852 struct scatterlist *sg = NULL;
854 abd_verify(sabd);
855 ASSERT3U(off, <=, sabd->abd_size);
857 size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
859 if (abd == NULL)
860 abd = abd_alloc_struct(0);
863 * Even if this buf is filesystem metadata, we only track that
864 * if we own the underlying data buffer, which is not true in
865 * this case. Therefore, we don't ever use ABD_FLAG_META here.
868 abd_for_each_sg(sabd, sg, ABD_SCATTER(sabd).abd_nents, i) {
869 if (new_offset < sg->length)
870 break;
871 new_offset -= sg->length;
874 ABD_SCATTER(abd).abd_sgl = sg;
875 ABD_SCATTER(abd).abd_offset = new_offset;
876 ABD_SCATTER(abd).abd_nents = ABD_SCATTER(sabd).abd_nents - i;
878 return (abd);
882 * Initialize the abd_iter.
884 void
885 abd_iter_init(struct abd_iter *aiter, abd_t *abd)
887 ASSERT(!abd_is_gang(abd));
888 abd_verify(abd);
889 aiter->iter_abd = abd;
890 aiter->iter_mapaddr = NULL;
891 aiter->iter_mapsize = 0;
892 aiter->iter_pos = 0;
893 if (abd_is_linear(abd)) {
894 aiter->iter_offset = 0;
895 aiter->iter_sg = NULL;
896 } else {
897 aiter->iter_offset = ABD_SCATTER(abd).abd_offset;
898 aiter->iter_sg = ABD_SCATTER(abd).abd_sgl;
903 * This is just a helper function to see if we have exhausted the
904 * abd_iter and reached the end.
906 boolean_t
907 abd_iter_at_end(struct abd_iter *aiter)
909 return (aiter->iter_pos == aiter->iter_abd->abd_size);
913 * Advance the iterator by a certain amount. Cannot be called when a chunk is
914 * in use. This can be safely called when the aiter has already exhausted, in
915 * which case this does nothing.
917 void
918 abd_iter_advance(struct abd_iter *aiter, size_t amount)
920 ASSERT3P(aiter->iter_mapaddr, ==, NULL);
921 ASSERT0(aiter->iter_mapsize);
923 /* There's nothing left to advance to, so do nothing */
924 if (abd_iter_at_end(aiter))
925 return;
927 aiter->iter_pos += amount;
928 aiter->iter_offset += amount;
929 if (!abd_is_linear(aiter->iter_abd)) {
930 while (aiter->iter_offset >= aiter->iter_sg->length) {
931 aiter->iter_offset -= aiter->iter_sg->length;
932 aiter->iter_sg = sg_next(aiter->iter_sg);
933 if (aiter->iter_sg == NULL) {
934 ASSERT0(aiter->iter_offset);
935 break;
942 * Map the current chunk into aiter. This can be safely called when the aiter
943 * has already exhausted, in which case this does nothing.
945 void
946 abd_iter_map(struct abd_iter *aiter)
948 void *paddr;
949 size_t offset = 0;
951 ASSERT3P(aiter->iter_mapaddr, ==, NULL);
952 ASSERT0(aiter->iter_mapsize);
954 /* There's nothing left to iterate over, so do nothing */
955 if (abd_iter_at_end(aiter))
956 return;
958 if (abd_is_linear(aiter->iter_abd)) {
959 ASSERT3U(aiter->iter_pos, ==, aiter->iter_offset);
960 offset = aiter->iter_offset;
961 aiter->iter_mapsize = aiter->iter_abd->abd_size - offset;
962 paddr = ABD_LINEAR_BUF(aiter->iter_abd);
963 } else {
964 offset = aiter->iter_offset;
965 aiter->iter_mapsize = MIN(aiter->iter_sg->length - offset,
966 aiter->iter_abd->abd_size - aiter->iter_pos);
968 paddr = zfs_kmap_atomic(sg_page(aiter->iter_sg));
971 aiter->iter_mapaddr = (char *)paddr + offset;
975 * Unmap the current chunk from aiter. This can be safely called when the aiter
976 * has already exhausted, in which case this does nothing.
978 void
979 abd_iter_unmap(struct abd_iter *aiter)
981 /* There's nothing left to unmap, so do nothing */
982 if (abd_iter_at_end(aiter))
983 return;
985 if (!abd_is_linear(aiter->iter_abd)) {
986 /* LINTED E_FUNC_SET_NOT_USED */
987 zfs_kunmap_atomic(aiter->iter_mapaddr - aiter->iter_offset);
990 ASSERT3P(aiter->iter_mapaddr, !=, NULL);
991 ASSERT3U(aiter->iter_mapsize, >, 0);
993 aiter->iter_mapaddr = NULL;
994 aiter->iter_mapsize = 0;
997 void
998 abd_cache_reap_now(void)
1002 #if defined(_KERNEL)
1004 * bio_nr_pages for ABD.
1005 * @off is the offset in @abd
1007 unsigned long
1008 abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
1010 unsigned long pos;
1012 if (abd_is_gang(abd)) {
1013 unsigned long count = 0;
1015 for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1016 cabd != NULL && size != 0;
1017 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1018 ASSERT3U(off, <, cabd->abd_size);
1019 int mysize = MIN(size, cabd->abd_size - off);
1020 count += abd_nr_pages_off(cabd, mysize, off);
1021 size -= mysize;
1022 off = 0;
1024 return (count);
1027 if (abd_is_linear(abd))
1028 pos = (unsigned long)abd_to_buf(abd) + off;
1029 else
1030 pos = ABD_SCATTER(abd).abd_offset + off;
1032 return (((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
1033 (pos >> PAGE_SHIFT));
1036 static unsigned int
1037 bio_map(struct bio *bio, void *buf_ptr, unsigned int bio_size)
1039 unsigned int offset, size, i;
1040 struct page *page;
1042 offset = offset_in_page(buf_ptr);
1043 for (i = 0; i < bio->bi_max_vecs; i++) {
1044 size = PAGE_SIZE - offset;
1046 if (bio_size <= 0)
1047 break;
1049 if (size > bio_size)
1050 size = bio_size;
1052 if (is_vmalloc_addr(buf_ptr))
1053 page = vmalloc_to_page(buf_ptr);
1054 else
1055 page = virt_to_page(buf_ptr);
1058 * Some network related block device uses tcp_sendpage, which
1059 * doesn't behave well when using 0-count page, this is a
1060 * safety net to catch them.
1062 ASSERT3S(page_count(page), >, 0);
1064 if (bio_add_page(bio, page, size, offset) != size)
1065 break;
1067 buf_ptr += size;
1068 bio_size -= size;
1069 offset = 0;
1072 return (bio_size);
1076 * bio_map for gang ABD.
1078 static unsigned int
1079 abd_gang_bio_map_off(struct bio *bio, abd_t *abd,
1080 unsigned int io_size, size_t off)
1082 ASSERT(abd_is_gang(abd));
1084 for (abd_t *cabd = abd_gang_get_offset(abd, &off);
1085 cabd != NULL;
1086 cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
1087 ASSERT3U(off, <, cabd->abd_size);
1088 int size = MIN(io_size, cabd->abd_size - off);
1089 int remainder = abd_bio_map_off(bio, cabd, size, off);
1090 io_size -= (size - remainder);
1091 if (io_size == 0 || remainder > 0)
1092 return (io_size);
1093 off = 0;
1095 ASSERT0(io_size);
1096 return (io_size);
1100 * bio_map for ABD.
1101 * @off is the offset in @abd
1102 * Remaining IO size is returned
1104 unsigned int
1105 abd_bio_map_off(struct bio *bio, abd_t *abd,
1106 unsigned int io_size, size_t off)
1108 struct abd_iter aiter;
1110 ASSERT3U(io_size, <=, abd->abd_size - off);
1111 if (abd_is_linear(abd))
1112 return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, io_size));
1114 ASSERT(!abd_is_linear(abd));
1115 if (abd_is_gang(abd))
1116 return (abd_gang_bio_map_off(bio, abd, io_size, off));
1118 abd_iter_init(&aiter, abd);
1119 abd_iter_advance(&aiter, off);
1121 for (int i = 0; i < bio->bi_max_vecs; i++) {
1122 struct page *pg;
1123 size_t len, sgoff, pgoff;
1124 struct scatterlist *sg;
1126 if (io_size <= 0)
1127 break;
1129 sg = aiter.iter_sg;
1130 sgoff = aiter.iter_offset;
1131 pgoff = sgoff & (PAGESIZE - 1);
1132 len = MIN(io_size, PAGESIZE - pgoff);
1133 ASSERT(len > 0);
1135 pg = nth_page(sg_page(sg), sgoff >> PAGE_SHIFT);
1136 if (bio_add_page(bio, pg, len, pgoff) != len)
1137 break;
1139 io_size -= len;
1140 abd_iter_advance(&aiter, len);
1143 return (io_size);
1146 /* Tunable Parameters */
1147 module_param(zfs_abd_scatter_enabled, int, 0644);
1148 MODULE_PARM_DESC(zfs_abd_scatter_enabled,
1149 "Toggle whether ABD allocations must be linear.");
1150 module_param(zfs_abd_scatter_min_size, int, 0644);
1151 MODULE_PARM_DESC(zfs_abd_scatter_min_size,
1152 "Minimum size of scatter allocations.");
1153 /* CSTYLED */
1154 module_param(zfs_abd_scatter_max_order, uint, 0644);
1155 MODULE_PARM_DESC(zfs_abd_scatter_max_order,
1156 "Maximum order allocation used for a scatter ABD.");
1157 #endif