Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / uvm / uvm_io.c
blob5b7a71a021f30da042b1ac3d6e9fc91864718c30
1 /* $NetBSD: uvm_io.c,v 1.23 2005/12/20 08:25:58 skrll Exp $ */
3 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * from: Id: uvm_io.c,v 1.1.2.2 1997/12/30 12:02:00 mrg Exp
38 * uvm_io.c: uvm i/o ops
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_io.c,v 1.23 2005/12/20 08:25:58 skrll Exp $");
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/mman.h>
47 #include <sys/proc.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
51 #include <uvm/uvm.h>
54 * functions
58 * uvm_io: perform I/O on a map
60 * => caller must have a reference to "map" so that it doesn't go away
61 * while we are working.
64 int
65 uvm_io(struct vm_map *map, struct uio *uio)
67 vaddr_t baseva, endva, pageoffset, kva;
68 vsize_t chunksz, togo, sz;
69 struct vm_map_entry *dead_entries;
70 int error;
73 * step 0: sanity checks and set up for copy loop. start with a
74 * large chunk size. if we have trouble finding vm space we will
75 * reduce it.
78 if (uio->uio_resid == 0)
79 return(0);
80 togo = uio->uio_resid;
82 baseva = (vaddr_t) uio->uio_offset;
83 endva = baseva + (togo - 1);
85 if (endva < baseva) /* wrap around? */
86 return(EIO);
88 if (baseva >= VM_MAXUSER_ADDRESS)
89 return(0);
90 if (endva >= VM_MAXUSER_ADDRESS)
91 /* EOF truncate */
92 togo = togo - (endva - VM_MAXUSER_ADDRESS + 1);
93 pageoffset = baseva & PAGE_MASK;
94 baseva = trunc_page(baseva);
95 chunksz = MIN(round_page(togo + pageoffset), trunc_page(MAXPHYS));
96 error = 0;
99 * step 1: main loop... while we've got data to move
102 for (/*null*/; togo > 0 ; pageoffset = 0) {
105 * step 2: extract mappings from the map into kernel_map
108 error = uvm_map_extract(map, baseva, chunksz, kernel_map, &kva,
109 UVM_EXTRACT_QREF | UVM_EXTRACT_CONTIG |
110 UVM_EXTRACT_FIXPROT);
111 if (error) {
113 /* retry with a smaller chunk... */
114 if (error == ENOMEM && chunksz > PAGE_SIZE) {
115 chunksz = trunc_page(chunksz / 2);
116 if (chunksz < PAGE_SIZE)
117 chunksz = PAGE_SIZE;
118 continue;
121 break;
125 * step 3: move a chunk of data
128 sz = chunksz - pageoffset;
129 if (sz > togo)
130 sz = togo;
131 error = uiomove((void *) (kva + pageoffset), sz, uio);
132 togo -= sz;
133 baseva += chunksz;
136 * step 4: unmap the area of kernel memory
139 vm_map_lock(kernel_map);
140 uvm_unmap_remove(kernel_map, kva, kva + chunksz, &dead_entries,
141 NULL, 0);
142 vm_map_unlock(kernel_map);
143 if (dead_entries != NULL)
144 uvm_unmap_detach(dead_entries, AMAP_REFALL);
146 if (error)
147 break;
149 return (error);