x86/hyperv: allocate the hypercall page with only read and execute bits
[linux/fpc-iii.git] / lib / scatterlist.c
blob5d63a8857f361d0062426d71ee525a7e2e25b2b8
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
5 * Scatterlist handling helpers.
6 */
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/scatterlist.h>
10 #include <linux/highmem.h>
11 #include <linux/kmemleak.h>
13 /**
14 * sg_next - return the next scatterlist entry in a list
15 * @sg: The current sg entry
17 * Description:
18 * Usually the next entry will be @sg@ + 1, but if this sg element is part
19 * of a chained scatterlist, it could jump to the start of a new
20 * scatterlist array.
22 **/
23 struct scatterlist *sg_next(struct scatterlist *sg)
25 if (sg_is_last(sg))
26 return NULL;
28 sg++;
29 if (unlikely(sg_is_chain(sg)))
30 sg = sg_chain_ptr(sg);
32 return sg;
34 EXPORT_SYMBOL(sg_next);
36 /**
37 * sg_nents - return total count of entries in scatterlist
38 * @sg: The scatterlist
40 * Description:
41 * Allows to know how many entries are in sg, taking into acount
42 * chaining as well
44 **/
45 int sg_nents(struct scatterlist *sg)
47 int nents;
48 for (nents = 0; sg; sg = sg_next(sg))
49 nents++;
50 return nents;
52 EXPORT_SYMBOL(sg_nents);
54 /**
55 * sg_nents_for_len - return total count of entries in scatterlist
56 * needed to satisfy the supplied length
57 * @sg: The scatterlist
58 * @len: The total required length
60 * Description:
61 * Determines the number of entries in sg that are required to meet
62 * the supplied length, taking into acount chaining as well
64 * Returns:
65 * the number of sg entries needed, negative error on failure
67 **/
68 int sg_nents_for_len(struct scatterlist *sg, u64 len)
70 int nents;
71 u64 total;
73 if (!len)
74 return 0;
76 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
77 nents++;
78 total += sg->length;
79 if (total >= len)
80 return nents;
83 return -EINVAL;
85 EXPORT_SYMBOL(sg_nents_for_len);
87 /**
88 * sg_last - return the last scatterlist entry in a list
89 * @sgl: First entry in the scatterlist
90 * @nents: Number of entries in the scatterlist
92 * Description:
93 * Should only be used casually, it (currently) scans the entire list
94 * to get the last entry.
96 * Note that the @sgl@ pointer passed in need not be the first one,
97 * the important bit is that @nents@ denotes the number of entries that
98 * exist from @sgl@.
101 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
103 struct scatterlist *sg, *ret = NULL;
104 unsigned int i;
106 for_each_sg(sgl, sg, nents, i)
107 ret = sg;
109 BUG_ON(!sg_is_last(ret));
110 return ret;
112 EXPORT_SYMBOL(sg_last);
115 * sg_init_table - Initialize SG table
116 * @sgl: The SG table
117 * @nents: Number of entries in table
119 * Notes:
120 * If this is part of a chained sg table, sg_mark_end() should be
121 * used only on the last table part.
124 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
126 memset(sgl, 0, sizeof(*sgl) * nents);
127 sg_init_marker(sgl, nents);
129 EXPORT_SYMBOL(sg_init_table);
132 * sg_init_one - Initialize a single entry sg list
133 * @sg: SG entry
134 * @buf: Virtual address for IO
135 * @buflen: IO length
138 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
140 sg_init_table(sg, 1);
141 sg_set_buf(sg, buf, buflen);
143 EXPORT_SYMBOL(sg_init_one);
146 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
147 * helpers.
149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
151 if (nents == SG_MAX_SINGLE_ALLOC) {
153 * Kmemleak doesn't track page allocations as they are not
154 * commonly used (in a raw form) for kernel data structures.
155 * As we chain together a list of pages and then a normal
156 * kmalloc (tracked by kmemleak), in order to for that last
157 * allocation not to become decoupled (and thus a
158 * false-positive) we need to inform kmemleak of all the
159 * intermediate allocations.
161 void *ptr = (void *) __get_free_page(gfp_mask);
162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
163 return ptr;
164 } else
165 return kmalloc_array(nents, sizeof(struct scatterlist),
166 gfp_mask);
169 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
171 if (nents == SG_MAX_SINGLE_ALLOC) {
172 kmemleak_free(sg);
173 free_page((unsigned long) sg);
174 } else
175 kfree(sg);
179 * __sg_free_table - Free a previously mapped sg table
180 * @table: The sg table header to use
181 * @max_ents: The maximum number of entries per single scatterlist
182 * @nents_first_chunk: Number of entries int the (preallocated) first
183 * scatterlist chunk, 0 means no such preallocated first chunk
184 * @free_fn: Free function
186 * Description:
187 * Free an sg table previously allocated and setup with
188 * __sg_alloc_table(). The @max_ents value must be identical to
189 * that previously used with __sg_alloc_table().
192 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
193 unsigned int nents_first_chunk, sg_free_fn *free_fn)
195 struct scatterlist *sgl, *next;
196 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
198 if (unlikely(!table->sgl))
199 return;
201 sgl = table->sgl;
202 while (table->orig_nents) {
203 unsigned int alloc_size = table->orig_nents;
204 unsigned int sg_size;
207 * If we have more than max_ents segments left,
208 * then assign 'next' to the sg table after the current one.
209 * sg_size is then one less than alloc size, since the last
210 * element is the chain pointer.
212 if (alloc_size > curr_max_ents) {
213 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
214 alloc_size = curr_max_ents;
215 sg_size = alloc_size - 1;
216 } else {
217 sg_size = alloc_size;
218 next = NULL;
221 table->orig_nents -= sg_size;
222 if (nents_first_chunk)
223 nents_first_chunk = 0;
224 else
225 free_fn(sgl, alloc_size);
226 sgl = next;
227 curr_max_ents = max_ents;
230 table->sgl = NULL;
232 EXPORT_SYMBOL(__sg_free_table);
235 * sg_free_table - Free a previously allocated sg table
236 * @table: The mapped sg table header
239 void sg_free_table(struct sg_table *table)
241 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
243 EXPORT_SYMBOL(sg_free_table);
246 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
247 * @table: The sg table header to use
248 * @nents: Number of entries in sg list
249 * @max_ents: The maximum number of entries the allocator returns per call
250 * @nents_first_chunk: Number of entries int the (preallocated) first
251 * scatterlist chunk, 0 means no such preallocated chunk provided by user
252 * @gfp_mask: GFP allocation mask
253 * @alloc_fn: Allocator to use
255 * Description:
256 * This function returns a @table @nents long. The allocator is
257 * defined to return scatterlist chunks of maximum size @max_ents.
258 * Thus if @nents is bigger than @max_ents, the scatterlists will be
259 * chained in units of @max_ents.
261 * Notes:
262 * If this function returns non-0 (eg failure), the caller must call
263 * __sg_free_table() to cleanup any leftover allocations.
266 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
267 unsigned int max_ents, struct scatterlist *first_chunk,
268 unsigned int nents_first_chunk, gfp_t gfp_mask,
269 sg_alloc_fn *alloc_fn)
271 struct scatterlist *sg, *prv;
272 unsigned int left;
273 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
274 unsigned prv_max_ents;
276 memset(table, 0, sizeof(*table));
278 if (nents == 0)
279 return -EINVAL;
280 #ifdef CONFIG_ARCH_NO_SG_CHAIN
281 if (WARN_ON_ONCE(nents > max_ents))
282 return -EINVAL;
283 #endif
285 left = nents;
286 prv = NULL;
287 do {
288 unsigned int sg_size, alloc_size = left;
290 if (alloc_size > curr_max_ents) {
291 alloc_size = curr_max_ents;
292 sg_size = alloc_size - 1;
293 } else
294 sg_size = alloc_size;
296 left -= sg_size;
298 if (first_chunk) {
299 sg = first_chunk;
300 first_chunk = NULL;
301 } else {
302 sg = alloc_fn(alloc_size, gfp_mask);
304 if (unlikely(!sg)) {
306 * Adjust entry count to reflect that the last
307 * entry of the previous table won't be used for
308 * linkage. Without this, sg_kfree() may get
309 * confused.
311 if (prv)
312 table->nents = ++table->orig_nents;
314 return -ENOMEM;
317 sg_init_table(sg, alloc_size);
318 table->nents = table->orig_nents += sg_size;
321 * If this is the first mapping, assign the sg table header.
322 * If this is not the first mapping, chain previous part.
324 if (prv)
325 sg_chain(prv, prv_max_ents, sg);
326 else
327 table->sgl = sg;
330 * If no more entries after this one, mark the end
332 if (!left)
333 sg_mark_end(&sg[sg_size - 1]);
335 prv = sg;
336 prv_max_ents = curr_max_ents;
337 curr_max_ents = max_ents;
338 } while (left);
340 return 0;
342 EXPORT_SYMBOL(__sg_alloc_table);
345 * sg_alloc_table - Allocate and initialize an sg table
346 * @table: The sg table header to use
347 * @nents: Number of entries in sg list
348 * @gfp_mask: GFP allocation mask
350 * Description:
351 * Allocate and initialize an sg table. If @nents@ is larger than
352 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
355 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
357 int ret;
359 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
360 NULL, 0, gfp_mask, sg_kmalloc);
361 if (unlikely(ret))
362 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
364 return ret;
366 EXPORT_SYMBOL(sg_alloc_table);
369 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
370 * an array of pages
371 * @sgt: The sg table header to use
372 * @pages: Pointer to an array of page pointers
373 * @n_pages: Number of pages in the pages array
374 * @offset: Offset from start of the first page to the start of a buffer
375 * @size: Number of valid bytes in the buffer (after offset)
376 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
377 * @gfp_mask: GFP allocation mask
379 * Description:
380 * Allocate and initialize an sg table from a list of pages. Contiguous
381 * ranges of the pages are squashed into a single scatterlist node up to the
382 * maximum size specified in @max_segment. An user may provide an offset at a
383 * start and a size of valid data in a buffer specified by the page array.
384 * The returned sg table is released by sg_free_table.
386 * Returns:
387 * 0 on success, negative error on failure
389 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
390 unsigned int n_pages, unsigned int offset,
391 unsigned long size, unsigned int max_segment,
392 gfp_t gfp_mask)
394 unsigned int chunks, cur_page, seg_len, i;
395 int ret;
396 struct scatterlist *s;
398 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
399 return -EINVAL;
401 /* compute number of contiguous chunks */
402 chunks = 1;
403 seg_len = 0;
404 for (i = 1; i < n_pages; i++) {
405 seg_len += PAGE_SIZE;
406 if (seg_len >= max_segment ||
407 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
408 chunks++;
409 seg_len = 0;
413 ret = sg_alloc_table(sgt, chunks, gfp_mask);
414 if (unlikely(ret))
415 return ret;
417 /* merging chunks and putting them into the scatterlist */
418 cur_page = 0;
419 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
420 unsigned int j, chunk_size;
422 /* look for the end of the current chunk */
423 seg_len = 0;
424 for (j = cur_page + 1; j < n_pages; j++) {
425 seg_len += PAGE_SIZE;
426 if (seg_len >= max_segment ||
427 page_to_pfn(pages[j]) !=
428 page_to_pfn(pages[j - 1]) + 1)
429 break;
432 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
433 sg_set_page(s, pages[cur_page],
434 min_t(unsigned long, size, chunk_size), offset);
435 size -= chunk_size;
436 offset = 0;
437 cur_page = j;
440 return 0;
442 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
445 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
446 * an array of pages
447 * @sgt: The sg table header to use
448 * @pages: Pointer to an array of page pointers
449 * @n_pages: Number of pages in the pages array
450 * @offset: Offset from start of the first page to the start of a buffer
451 * @size: Number of valid bytes in the buffer (after offset)
452 * @gfp_mask: GFP allocation mask
454 * Description:
455 * Allocate and initialize an sg table from a list of pages. Contiguous
456 * ranges of the pages are squashed into a single scatterlist node. A user
457 * may provide an offset at a start and a size of valid data in a buffer
458 * specified by the page array. The returned sg table is released by
459 * sg_free_table.
461 * Returns:
462 * 0 on success, negative error on failure
464 int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
465 unsigned int n_pages, unsigned int offset,
466 unsigned long size, gfp_t gfp_mask)
468 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
469 SCATTERLIST_MAX_SEGMENT, gfp_mask);
471 EXPORT_SYMBOL(sg_alloc_table_from_pages);
473 #ifdef CONFIG_SGL_ALLOC
476 * sgl_alloc_order - allocate a scatterlist and its pages
477 * @length: Length in bytes of the scatterlist. Must be at least one
478 * @order: Second argument for alloc_pages()
479 * @chainable: Whether or not to allocate an extra element in the scatterlist
480 * for scatterlist chaining purposes
481 * @gfp: Memory allocation flags
482 * @nent_p: [out] Number of entries in the scatterlist that have pages
484 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
486 struct scatterlist *sgl_alloc_order(unsigned long long length,
487 unsigned int order, bool chainable,
488 gfp_t gfp, unsigned int *nent_p)
490 struct scatterlist *sgl, *sg;
491 struct page *page;
492 unsigned int nent, nalloc;
493 u32 elem_len;
495 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
496 /* Check for integer overflow */
497 if (length > (nent << (PAGE_SHIFT + order)))
498 return NULL;
499 nalloc = nent;
500 if (chainable) {
501 /* Check for integer overflow */
502 if (nalloc + 1 < nalloc)
503 return NULL;
504 nalloc++;
506 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
507 (gfp & ~GFP_DMA) | __GFP_ZERO);
508 if (!sgl)
509 return NULL;
511 sg_init_table(sgl, nalloc);
512 sg = sgl;
513 while (length) {
514 elem_len = min_t(u64, length, PAGE_SIZE << order);
515 page = alloc_pages(gfp, order);
516 if (!page) {
517 sgl_free(sgl);
518 return NULL;
521 sg_set_page(sg, page, elem_len, 0);
522 length -= elem_len;
523 sg = sg_next(sg);
525 WARN_ONCE(length, "length = %lld\n", length);
526 if (nent_p)
527 *nent_p = nent;
528 return sgl;
530 EXPORT_SYMBOL(sgl_alloc_order);
533 * sgl_alloc - allocate a scatterlist and its pages
534 * @length: Length in bytes of the scatterlist
535 * @gfp: Memory allocation flags
536 * @nent_p: [out] Number of entries in the scatterlist
538 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
540 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
541 unsigned int *nent_p)
543 return sgl_alloc_order(length, 0, false, gfp, nent_p);
545 EXPORT_SYMBOL(sgl_alloc);
548 * sgl_free_n_order - free a scatterlist and its pages
549 * @sgl: Scatterlist with one or more elements
550 * @nents: Maximum number of elements to free
551 * @order: Second argument for __free_pages()
553 * Notes:
554 * - If several scatterlists have been chained and each chain element is
555 * freed separately then it's essential to set nents correctly to avoid that a
556 * page would get freed twice.
557 * - All pages in a chained scatterlist can be freed at once by setting @nents
558 * to a high number.
560 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
562 struct scatterlist *sg;
563 struct page *page;
564 int i;
566 for_each_sg(sgl, sg, nents, i) {
567 if (!sg)
568 break;
569 page = sg_page(sg);
570 if (page)
571 __free_pages(page, order);
573 kfree(sgl);
575 EXPORT_SYMBOL(sgl_free_n_order);
578 * sgl_free_order - free a scatterlist and its pages
579 * @sgl: Scatterlist with one or more elements
580 * @order: Second argument for __free_pages()
582 void sgl_free_order(struct scatterlist *sgl, int order)
584 sgl_free_n_order(sgl, INT_MAX, order);
586 EXPORT_SYMBOL(sgl_free_order);
589 * sgl_free - free a scatterlist and its pages
590 * @sgl: Scatterlist with one or more elements
592 void sgl_free(struct scatterlist *sgl)
594 sgl_free_order(sgl, 0);
596 EXPORT_SYMBOL(sgl_free);
598 #endif /* CONFIG_SGL_ALLOC */
600 void __sg_page_iter_start(struct sg_page_iter *piter,
601 struct scatterlist *sglist, unsigned int nents,
602 unsigned long pgoffset)
604 piter->__pg_advance = 0;
605 piter->__nents = nents;
607 piter->sg = sglist;
608 piter->sg_pgoffset = pgoffset;
610 EXPORT_SYMBOL(__sg_page_iter_start);
612 static int sg_page_count(struct scatterlist *sg)
614 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
617 bool __sg_page_iter_next(struct sg_page_iter *piter)
619 if (!piter->__nents || !piter->sg)
620 return false;
622 piter->sg_pgoffset += piter->__pg_advance;
623 piter->__pg_advance = 1;
625 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
626 piter->sg_pgoffset -= sg_page_count(piter->sg);
627 piter->sg = sg_next(piter->sg);
628 if (!--piter->__nents || !piter->sg)
629 return false;
632 return true;
634 EXPORT_SYMBOL(__sg_page_iter_next);
636 static int sg_dma_page_count(struct scatterlist *sg)
638 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
641 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
643 struct sg_page_iter *piter = &dma_iter->base;
645 if (!piter->__nents || !piter->sg)
646 return false;
648 piter->sg_pgoffset += piter->__pg_advance;
649 piter->__pg_advance = 1;
651 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
652 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
653 piter->sg = sg_next(piter->sg);
654 if (!--piter->__nents || !piter->sg)
655 return false;
658 return true;
660 EXPORT_SYMBOL(__sg_page_iter_dma_next);
663 * sg_miter_start - start mapping iteration over a sg list
664 * @miter: sg mapping iter to be started
665 * @sgl: sg list to iterate over
666 * @nents: number of sg entries
668 * Description:
669 * Starts mapping iterator @miter.
671 * Context:
672 * Don't care.
674 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
675 unsigned int nents, unsigned int flags)
677 memset(miter, 0, sizeof(struct sg_mapping_iter));
679 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
680 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
681 miter->__flags = flags;
683 EXPORT_SYMBOL(sg_miter_start);
685 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
687 if (!miter->__remaining) {
688 struct scatterlist *sg;
690 if (!__sg_page_iter_next(&miter->piter))
691 return false;
693 sg = miter->piter.sg;
695 miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
696 miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
697 miter->__offset &= PAGE_SIZE - 1;
698 miter->__remaining = sg->offset + sg->length -
699 (miter->piter.sg_pgoffset << PAGE_SHIFT) -
700 miter->__offset;
701 miter->__remaining = min_t(unsigned long, miter->__remaining,
702 PAGE_SIZE - miter->__offset);
705 return true;
709 * sg_miter_skip - reposition mapping iterator
710 * @miter: sg mapping iter to be skipped
711 * @offset: number of bytes to plus the current location
713 * Description:
714 * Sets the offset of @miter to its current location plus @offset bytes.
715 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
716 * stops @miter.
718 * Context:
719 * Don't care if @miter is stopped, or not proceeded yet.
720 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
722 * Returns:
723 * true if @miter contains the valid mapping. false if end of sg
724 * list is reached.
726 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
728 sg_miter_stop(miter);
730 while (offset) {
731 off_t consumed;
733 if (!sg_miter_get_next_page(miter))
734 return false;
736 consumed = min_t(off_t, offset, miter->__remaining);
737 miter->__offset += consumed;
738 miter->__remaining -= consumed;
739 offset -= consumed;
742 return true;
744 EXPORT_SYMBOL(sg_miter_skip);
747 * sg_miter_next - proceed mapping iterator to the next mapping
748 * @miter: sg mapping iter to proceed
750 * Description:
751 * Proceeds @miter to the next mapping. @miter should have been started
752 * using sg_miter_start(). On successful return, @miter->page,
753 * @miter->addr and @miter->length point to the current mapping.
755 * Context:
756 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
757 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
759 * Returns:
760 * true if @miter contains the next mapping. false if end of sg
761 * list is reached.
763 bool sg_miter_next(struct sg_mapping_iter *miter)
765 sg_miter_stop(miter);
768 * Get to the next page if necessary.
769 * __remaining, __offset is adjusted by sg_miter_stop
771 if (!sg_miter_get_next_page(miter))
772 return false;
774 miter->page = sg_page_iter_page(&miter->piter);
775 miter->consumed = miter->length = miter->__remaining;
777 if (miter->__flags & SG_MITER_ATOMIC)
778 miter->addr = kmap_atomic(miter->page) + miter->__offset;
779 else
780 miter->addr = kmap(miter->page) + miter->__offset;
782 return true;
784 EXPORT_SYMBOL(sg_miter_next);
787 * sg_miter_stop - stop mapping iteration
788 * @miter: sg mapping iter to be stopped
790 * Description:
791 * Stops mapping iterator @miter. @miter should have been started
792 * using sg_miter_start(). A stopped iteration can be resumed by
793 * calling sg_miter_next() on it. This is useful when resources (kmap)
794 * need to be released during iteration.
796 * Context:
797 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
798 * otherwise.
800 void sg_miter_stop(struct sg_mapping_iter *miter)
802 WARN_ON(miter->consumed > miter->length);
804 /* drop resources from the last iteration */
805 if (miter->addr) {
806 miter->__offset += miter->consumed;
807 miter->__remaining -= miter->consumed;
809 if ((miter->__flags & SG_MITER_TO_SG) &&
810 !PageSlab(miter->page))
811 flush_kernel_dcache_page(miter->page);
813 if (miter->__flags & SG_MITER_ATOMIC) {
814 WARN_ON_ONCE(preemptible());
815 kunmap_atomic(miter->addr);
816 } else
817 kunmap(miter->page);
819 miter->page = NULL;
820 miter->addr = NULL;
821 miter->length = 0;
822 miter->consumed = 0;
825 EXPORT_SYMBOL(sg_miter_stop);
828 * sg_copy_buffer - Copy data between a linear buffer and an SG list
829 * @sgl: The SG list
830 * @nents: Number of SG entries
831 * @buf: Where to copy from
832 * @buflen: The number of bytes to copy
833 * @skip: Number of bytes to skip before copying
834 * @to_buffer: transfer direction (true == from an sg list to a
835 * buffer, false == from a buffer to an sg list)
837 * Returns the number of copied bytes.
840 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
841 size_t buflen, off_t skip, bool to_buffer)
843 unsigned int offset = 0;
844 struct sg_mapping_iter miter;
845 unsigned int sg_flags = SG_MITER_ATOMIC;
847 if (to_buffer)
848 sg_flags |= SG_MITER_FROM_SG;
849 else
850 sg_flags |= SG_MITER_TO_SG;
852 sg_miter_start(&miter, sgl, nents, sg_flags);
854 if (!sg_miter_skip(&miter, skip))
855 return false;
857 while ((offset < buflen) && sg_miter_next(&miter)) {
858 unsigned int len;
860 len = min(miter.length, buflen - offset);
862 if (to_buffer)
863 memcpy(buf + offset, miter.addr, len);
864 else
865 memcpy(miter.addr, buf + offset, len);
867 offset += len;
870 sg_miter_stop(&miter);
872 return offset;
874 EXPORT_SYMBOL(sg_copy_buffer);
877 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
878 * @sgl: The SG list
879 * @nents: Number of SG entries
880 * @buf: Where to copy from
881 * @buflen: The number of bytes to copy
883 * Returns the number of copied bytes.
886 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
887 const void *buf, size_t buflen)
889 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
891 EXPORT_SYMBOL(sg_copy_from_buffer);
894 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
895 * @sgl: The SG list
896 * @nents: Number of SG entries
897 * @buf: Where to copy to
898 * @buflen: The number of bytes to copy
900 * Returns the number of copied bytes.
903 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
904 void *buf, size_t buflen)
906 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
908 EXPORT_SYMBOL(sg_copy_to_buffer);
911 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
912 * @sgl: The SG list
913 * @nents: Number of SG entries
914 * @buf: Where to copy from
915 * @buflen: The number of bytes to copy
916 * @skip: Number of bytes to skip before copying
918 * Returns the number of copied bytes.
921 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
922 const void *buf, size_t buflen, off_t skip)
924 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
926 EXPORT_SYMBOL(sg_pcopy_from_buffer);
929 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
930 * @sgl: The SG list
931 * @nents: Number of SG entries
932 * @buf: Where to copy to
933 * @buflen: The number of bytes to copy
934 * @skip: Number of bytes to skip before copying
936 * Returns the number of copied bytes.
939 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
940 void *buf, size_t buflen, off_t skip)
942 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
944 EXPORT_SYMBOL(sg_pcopy_to_buffer);
947 * sg_zero_buffer - Zero-out a part of a SG list
948 * @sgl: The SG list
949 * @nents: Number of SG entries
950 * @buflen: The number of bytes to zero out
951 * @skip: Number of bytes to skip before zeroing
953 * Returns the number of bytes zeroed.
955 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
956 size_t buflen, off_t skip)
958 unsigned int offset = 0;
959 struct sg_mapping_iter miter;
960 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
962 sg_miter_start(&miter, sgl, nents, sg_flags);
964 if (!sg_miter_skip(&miter, skip))
965 return false;
967 while (offset < buflen && sg_miter_next(&miter)) {
968 unsigned int len;
970 len = min(miter.length, buflen - offset);
971 memset(miter.addr, 0, len);
973 offset += len;
976 sg_miter_stop(&miter);
977 return offset;
979 EXPORT_SYMBOL(sg_zero_buffer);