Fix dependency install on Debian 11 (#16683)
[zfs.git] / module / os / freebsd / spl / spl_uio.c
blob74cbe36bbd9b4eac8c1d4e17046e1b81a51af332
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
40 * $FreeBSD$
43 #include <sys/param.h>
44 #include <sys/uio_impl.h>
45 #include <sys/vnode.h>
46 #include <sys/zfs_znode.h>
47 #include <sys/byteorder.h>
48 #include <sys/lock.h>
49 #include <sys/vm.h>
50 #include <vm/vm_map.h>
52 static void
53 zfs_freeuio(struct uio *uio)
55 #if __FreeBSD_version > 1500013
56 freeuio(uio);
57 #else
58 free(uio, M_IOV);
59 #endif
62 int
63 zfs_uiomove(void *cp, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio)
65 ASSERT3U(zfs_uio_rw(uio), ==, dir);
66 return (uiomove(cp, (int)n, GET_UIO_STRUCT(uio)));
70 * same as zfs_uiomove() but doesn't modify uio structure.
71 * return in cbytes how many bytes were copied.
73 int
74 zfs_uiocopy(void *p, size_t n, zfs_uio_rw_t rw, zfs_uio_t *uio, size_t *cbytes)
76 struct iovec small_iovec[1];
77 struct uio small_uio_clone;
78 struct uio *uio_clone;
79 int error;
81 ASSERT3U(zfs_uio_rw(uio), ==, rw);
82 if (zfs_uio_iovcnt(uio) == 1) {
83 small_uio_clone = *(GET_UIO_STRUCT(uio));
84 small_iovec[0] = *(GET_UIO_STRUCT(uio)->uio_iov);
85 small_uio_clone.uio_iov = small_iovec;
86 uio_clone = &small_uio_clone;
87 } else {
88 uio_clone = cloneuio(GET_UIO_STRUCT(uio));
91 error = vn_io_fault_uiomove(p, n, uio_clone);
92 *cbytes = zfs_uio_resid(uio) - uio_clone->uio_resid;
93 if (uio_clone != &small_uio_clone)
94 zfs_freeuio(uio_clone);
95 return (error);
99 * Drop the next n chars out of *uiop.
101 void
102 zfs_uioskip(zfs_uio_t *uio, size_t n)
104 zfs_uio_seg_t segflg;
106 /* For the full compatibility with illumos. */
107 if (n > zfs_uio_resid(uio))
108 return;
110 segflg = zfs_uio_segflg(uio);
111 zfs_uio_segflg(uio) = UIO_NOCOPY;
112 zfs_uiomove(NULL, n, zfs_uio_rw(uio), uio);
113 zfs_uio_segflg(uio) = segflg;
117 zfs_uio_fault_move(void *p, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio)
119 ASSERT3U(zfs_uio_rw(uio), ==, dir);
120 return (vn_io_fault_uiomove(p, n, GET_UIO_STRUCT(uio)));
124 * Check if the uio is page-aligned in memory.
126 boolean_t
127 zfs_uio_page_aligned(zfs_uio_t *uio)
129 const struct iovec *iov = GET_UIO_STRUCT(uio)->uio_iov;
131 for (int i = zfs_uio_iovcnt(uio); i > 0; iov++, i--) {
132 uintptr_t addr = (uintptr_t)iov->iov_base;
133 size_t size = iov->iov_len;
134 if ((addr & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
135 return (B_FALSE);
139 return (B_TRUE);
142 static void
143 zfs_uio_set_pages_to_stable(zfs_uio_t *uio)
145 ASSERT3P(uio->uio_dio.pages, !=, NULL);
146 ASSERT3S(uio->uio_dio.npages, >, 0);
148 for (int i = 0; i < uio->uio_dio.npages; i++) {
149 vm_page_t page = uio->uio_dio.pages[i];
150 ASSERT3P(page, !=, NULL);
152 MPASS(page == PHYS_TO_VM_PAGE(VM_PAGE_TO_PHYS(page)));
153 vm_page_busy_acquire(page, VM_ALLOC_SBUSY);
154 pmap_remove_write(page);
158 static void
159 zfs_uio_release_stable_pages(zfs_uio_t *uio)
161 ASSERT3P(uio->uio_dio.pages, !=, NULL);
162 for (int i = 0; i < uio->uio_dio.npages; i++) {
163 vm_page_t page = uio->uio_dio.pages[i];
165 ASSERT3P(page, !=, NULL);
166 vm_page_sunbusy(page);
171 * If the operation is marked as read, then we are stating the pages will be
172 * written to and must be given write access.
174 static int
175 zfs_uio_hold_pages(unsigned long start, size_t len, int nr_pages,
176 zfs_uio_rw_t rw, vm_page_t *pages)
178 vm_map_t map;
179 vm_prot_t prot;
180 int count;
182 map = &curthread->td_proc->p_vmspace->vm_map;
183 ASSERT3S(len, >, 0);
185 prot = rw == UIO_READ ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
186 count = vm_fault_quick_hold_pages(map, start, len, prot, pages,
187 nr_pages);
189 return (count);
192 void
193 zfs_uio_free_dio_pages(zfs_uio_t *uio, zfs_uio_rw_t rw)
195 ASSERT(uio->uio_extflg & UIO_DIRECT);
196 ASSERT3P(uio->uio_dio.pages, !=, NULL);
197 ASSERT(zfs_uio_rw(uio) == rw);
199 if (rw == UIO_WRITE)
200 zfs_uio_release_stable_pages(uio);
202 vm_page_unhold_pages(&uio->uio_dio.pages[0],
203 uio->uio_dio.npages);
205 kmem_free(uio->uio_dio.pages,
206 uio->uio_dio.npages * sizeof (vm_page_t));
209 static int
210 zfs_uio_get_user_pages(unsigned long start, int nr_pages,
211 size_t len, zfs_uio_rw_t rw, vm_page_t *pages)
213 int count;
215 count = zfs_uio_hold_pages(start, len, nr_pages, rw, pages);
217 if (count != nr_pages) {
218 if (count > 0)
219 vm_page_unhold_pages(pages, count);
220 return (0);
223 ASSERT3S(count, ==, nr_pages);
225 return (count);
228 static int
229 zfs_uio_iov_step(struct iovec v, zfs_uio_t *uio, int *numpages)
231 unsigned long addr = (unsigned long)(v.iov_base);
232 size_t len = v.iov_len;
233 int n = DIV_ROUND_UP(len, PAGE_SIZE);
235 int res = zfs_uio_get_user_pages(
236 P2ALIGN_TYPED(addr, PAGE_SIZE, unsigned long), n, len,
237 zfs_uio_rw(uio), &uio->uio_dio.pages[uio->uio_dio.npages]);
239 if (res != n)
240 return (SET_ERROR(EFAULT));
242 ASSERT3U(len, ==, res * PAGE_SIZE);
243 *numpages = res;
244 return (0);
247 static int
248 zfs_uio_get_dio_pages_impl(zfs_uio_t *uio)
250 const struct iovec *iovp = GET_UIO_STRUCT(uio)->uio_iov;
251 size_t len = zfs_uio_resid(uio);
253 for (int i = 0; i < zfs_uio_iovcnt(uio); i++) {
254 struct iovec iov;
255 int numpages = 0;
257 if (iovp->iov_len == 0) {
258 iovp++;
259 continue;
261 iov.iov_len = MIN(len, iovp->iov_len);
262 iov.iov_base = iovp->iov_base;
263 int error = zfs_uio_iov_step(iov, uio, &numpages);
265 if (error)
266 return (error);
268 uio->uio_dio.npages += numpages;
269 len -= iov.iov_len;
270 iovp++;
273 ASSERT0(len);
275 return (0);
279 * This function holds user pages into the kernel. In the event that the user
280 * pages are not successfully held an error value is returned.
282 * On success, 0 is returned.
285 zfs_uio_get_dio_pages_alloc(zfs_uio_t *uio, zfs_uio_rw_t rw)
287 int error = 0;
288 int npages = DIV_ROUND_UP(zfs_uio_resid(uio), PAGE_SIZE);
289 size_t size = npages * sizeof (vm_page_t);
291 ASSERT(zfs_uio_rw(uio) == rw);
293 uio->uio_dio.pages = kmem_alloc(size, KM_SLEEP);
295 error = zfs_uio_get_dio_pages_impl(uio);
297 if (error) {
298 vm_page_unhold_pages(&uio->uio_dio.pages[0],
299 uio->uio_dio.npages);
300 kmem_free(uio->uio_dio.pages, size);
301 return (error);
304 ASSERT3S(uio->uio_dio.npages, >, 0);
307 * Since we will be writing the user pages we must make sure that
308 * they are stable. That way the contents of the pages can not change
309 * while we are doing: compression, checksumming, encryption, parity
310 * calculations or deduplication.
312 if (zfs_uio_rw(uio) == UIO_WRITE)
313 zfs_uio_set_pages_to_stable(uio);
315 uio->uio_extflg |= UIO_DIRECT;
317 return (0);