1 /* $NetBSD: uvm_map.h,v 1.63 2009/06/10 01:55:33 yamt Exp $ */
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
42 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 * Carnegie Mellon requests users of this software to return to
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
69 #ifndef _UVM_UVM_MAP_H_
70 #define _UVM_UVM_MAP_H_
83 * UVM_MAP_CLIP_START: ensure that the entry begins at or after
84 * the starting address, if it doesn't we split the entry.
86 * => map must be locked by caller
89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \
90 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); }
93 * UVM_MAP_CLIP_END: ensure that the entry ends at or before
94 * the ending address, if it does't we split the entry.
96 * => map must be locked by caller
99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \
100 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); }
105 #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */
106 #define UVM_EXTRACT_CONTIG 0x02 /* try to keep it contig */
107 #define UVM_EXTRACT_QREF 0x04 /* use quick refs */
108 #define UVM_EXTRACT_FIXPROT 0x08 /* set prot to maxprot as we go */
109 #define UVM_EXTRACT_RESERVED 0x10 /* caller did uvm_map_reserve() */
114 #include <sys/pool.h>
115 #include <sys/rwlock.h>
116 #include <sys/mutex.h>
117 #include <sys/condvar.h>
119 #include <uvm/uvm_anon.h>
122 * Address map entries consist of start and end addresses,
123 * a VM object (or sharing map) and offset into that object,
124 * and user-exported inheritance and protection information.
125 * Also included is control information for virtual copy operations.
127 struct vm_map_entry
{
128 struct rb_node rb_node
; /* tree information */
129 vsize_t gap
; /* free space after */
130 vsize_t maxgap
; /* space in subtree */
131 struct vm_map_entry
*prev
; /* previous entry */
132 struct vm_map_entry
*next
; /* next entry */
133 vaddr_t start
; /* start address */
134 vaddr_t end
; /* end address */
136 struct uvm_object
*uvm_obj
; /* uvm object */
137 struct vm_map
*sub_map
; /* belongs to another map */
138 } object
; /* object I point to */
139 voff_t offset
; /* offset into object */
140 int etype
; /* entry type */
141 vm_prot_t protection
; /* protection code */
142 vm_prot_t max_protection
; /* maximum protection */
143 vm_inherit_t inheritance
; /* inheritance */
144 int wired_count
; /* can be paged if == 0 */
145 struct vm_aref aref
; /* anonymous overlay */
146 int advice
; /* madvise advice */
147 #define uvm_map_entry_stop_copy flags
148 u_int8_t flags
; /* flags */
150 #define UVM_MAP_KERNEL 0x01 /* kernel map entry */
151 #define UVM_MAP_KMAPENT 0x02 /* contains map entries */
152 #define UVM_MAP_FIRST 0x04 /* the first special entry */
153 #define UVM_MAP_QUANTUM 0x08 /* allocated with
154 * UVM_FLAG_QUANTUM */
155 #define UVM_MAP_NOMERGE 0x10 /* this entry is not mergable */
159 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
162 * Maps are doubly-linked lists of map entries, kept sorted
163 * by address. A single hint is provided to start
164 * searches again from the last successful search,
165 * insertion, or removal.
167 * LOCKING PROTOCOL NOTES:
168 * -----------------------
170 * VM map locking is a little complicated. There are both shared
171 * and exclusive locks on maps. However, it is sometimes required
172 * to downgrade an exclusive lock to a shared lock, and upgrade to
173 * an exclusive lock again (to perform error recovery). However,
174 * another thread *must not* queue itself to receive an exclusive
175 * lock while before we upgrade back to exclusive, otherwise the
176 * error recovery becomes extremely difficult, if not impossible.
178 * In order to prevent this scenario, we introduce the notion of
179 * a `busy' map. A `busy' map is read-locked, but other threads
180 * attempting to write-lock wait for this flag to clear before
181 * entering the lock manager. A map may only be marked busy
182 * when the map is write-locked (and then the map must be downgraded
183 * to read-locked), and may only be marked unbusy by the thread
184 * which marked it busy (holding *either* a read-lock or a
185 * write-lock, the latter being gained by an upgrade).
187 * Access to the map `flags' member is controlled by the `flags_lock'
188 * simple lock. Note that some flags are static (set once at map
189 * creation time, and never changed), and thus require no locking
190 * to check those flags. All flags which are r/w must be set or
191 * cleared while the `flags_lock' is asserted. Additional locking
194 * VM_MAP_PAGEABLE r/o static flag; no locking required
196 * VM_MAP_INTRSAFE r/o static flag; no locking required
198 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
199 * map is write-locked. may be tested
200 * without asserting `flags_lock'.
202 * VM_MAP_DYING r/o; set when a vmspace is being
203 * destroyed to indicate that updates
204 * to the pmap can be skipped.
206 * VM_MAP_TOPDOWN r/o; set when the vmspace is
207 * created if the unspecified map
208 * allocations are to be arranged in
209 * a "top down" manner.
212 struct pmap
* pmap
; /* Physical map */
213 krwlock_t lock
; /* Non-intrsafe lock */
214 struct lwp
* busy
; /* LWP holding map busy */
215 kmutex_t mutex
; /* INTRSAFE lock */
216 kmutex_t misc_lock
; /* Lock for ref_count, cv */
217 kcondvar_t cv
; /* For signalling */
218 int flags
; /* flags */
219 struct rb_tree rb_tree
; /* Tree for entries */
220 struct vm_map_entry header
; /* List of entries */
221 int nentries
; /* Number of entries */
222 vsize_t size
; /* virtual size */
223 int ref_count
; /* Reference count */
224 struct vm_map_entry
* hint
; /* hint for quick lookups */
225 struct vm_map_entry
* first_free
; /* First free space hint */
226 unsigned int timestamp
; /* Version number */
231 #include <sys/callback.h>
233 struct vm_map_kernel
{
234 struct vm_map vmk_map
;
235 LIST_HEAD(, uvm_kmapent_hdr
) vmk_kentry_free
;
236 /* Freelist of map entry */
237 struct vm_map_entry
*vmk_merged_entries
;
238 /* Merged entries, kept for later splitting */
240 struct callback_head vmk_reclaim_callback
;
241 #if !defined(PMAP_MAP_POOLPAGE)
242 struct pool vmk_vacache
; /* kva cache */
243 struct pool_allocator vmk_vacache_allocator
; /* ... and its allocator */
246 #endif /* defined(_KERNEL) */
248 #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel())
251 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
252 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
253 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
254 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
255 #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
256 #define VM_MAP_VACACHE 0x80 /* ro: use kva cache */
257 #define VM_MAP_WANTVA 0x100 /* rw: want va */
260 struct uvm_mapent_reservation
{
261 struct vm_map_entry
*umr_entries
[2];
264 #define UMR_EMPTY(umr) ((umr) == NULL || (umr)->umr_nentries == 0)
265 #define UMR_GETENTRY(umr) ((umr)->umr_entries[--(umr)->umr_nentries])
266 #define UMR_PUTENTRY(umr, ent) \
267 (umr)->umr_entries[(umr)->umr_nentries++] = (ent)
269 struct uvm_map_args
{
270 struct vm_map_entry
*uma_prev
;
275 struct uvm_object
*uma_uobj
;
278 uvm_flag_t uma_flags
;
288 #include <sys/proc.h>
290 #ifdef PMAP_GROWKERNEL
291 extern vaddr_t uvm_maxkaddr
;
295 * protos: the following prototypes define the interface to vm_map
298 void uvm_map_deallocate(struct vm_map
*);
300 int uvm_map_willneed(struct vm_map
*, vaddr_t
, vaddr_t
);
301 int uvm_map_clean(struct vm_map
*, vaddr_t
, vaddr_t
, int);
302 void uvm_map_clip_start(struct vm_map
*, struct vm_map_entry
*,
303 vaddr_t
, struct uvm_mapent_reservation
*);
304 void uvm_map_clip_end(struct vm_map
*, struct vm_map_entry
*,
305 vaddr_t
, struct uvm_mapent_reservation
*);
306 struct vm_map
*uvm_map_create(pmap_t
, vaddr_t
, vaddr_t
, int);
307 int uvm_map_extract(struct vm_map
*, vaddr_t
, vsize_t
,
308 struct vm_map
*, vaddr_t
*, int);
309 struct vm_map_entry
*
310 uvm_map_findspace(struct vm_map
*, vaddr_t
, vsize_t
,
311 vaddr_t
*, struct uvm_object
*, voff_t
, vsize_t
, int);
312 int uvm_map_inherit(struct vm_map
*, vaddr_t
, vaddr_t
,
314 int uvm_map_advice(struct vm_map
*, vaddr_t
, vaddr_t
, int);
315 void uvm_map_init(void);
316 bool uvm_map_lookup_entry(struct vm_map
*, vaddr_t
,
317 struct vm_map_entry
**);
318 void uvm_map_reference(struct vm_map
*);
319 int uvm_map_reserve(struct vm_map
*, vsize_t
, vaddr_t
, vsize_t
,
320 vaddr_t
*, uvm_flag_t
);
321 void uvm_map_setup(struct vm_map
*, vaddr_t
, vaddr_t
, int);
322 void uvm_map_setup_kernel(struct vm_map_kernel
*,
323 vaddr_t
, vaddr_t
, int);
324 struct vm_map_kernel
*
325 vm_map_to_kernel(struct vm_map
*);
326 int uvm_map_submap(struct vm_map
*, vaddr_t
, vaddr_t
,
328 void uvm_unmap1(struct vm_map
*, vaddr_t
, vaddr_t
, int);
329 #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0)
330 void uvm_unmap_detach(struct vm_map_entry
*,int);
331 void uvm_unmap_remove(struct vm_map
*, vaddr_t
, vaddr_t
,
332 struct vm_map_entry
**, struct uvm_mapent_reservation
*,
335 int uvm_map_prepare(struct vm_map
*, vaddr_t
, vsize_t
,
336 struct uvm_object
*, voff_t
, vsize_t
, uvm_flag_t
,
337 struct uvm_map_args
*);
338 int uvm_map_enter(struct vm_map
*, const struct uvm_map_args
*,
339 struct vm_map_entry
*);
341 int uvm_mapent_reserve(struct vm_map
*,
342 struct uvm_mapent_reservation
*, int, int);
343 void uvm_mapent_unreserve(struct vm_map
*,
344 struct uvm_mapent_reservation
*);
346 vsize_t
uvm_mapent_overhead(vsize_t
, int);
348 int uvm_mapent_trymerge(struct vm_map
*,
349 struct vm_map_entry
*, int);
350 #define UVM_MERGE_COPYING 1
352 bool vm_map_starved_p(struct vm_map
*);
355 * VM map locking operations.
358 bool vm_map_lock_try(struct vm_map
*);
359 void vm_map_lock(struct vm_map
*);
360 void vm_map_unlock(struct vm_map
*);
361 void vm_map_unbusy(struct vm_map
*);
362 void vm_map_lock_read(struct vm_map
*);
363 void vm_map_unlock_read(struct vm_map
*);
364 void vm_map_busy(struct vm_map
*);
365 bool vm_map_locked_p(struct vm_map
*);
370 * Functions implemented as macros
372 #define vm_map_min(map) ((map)->header.end)
373 #define vm_map_max(map) ((map)->header.start)
374 #define vm_map_setmin(map, v) ((map)->header.end = (v))
375 #define vm_map_setmax(map, v) ((map)->header.start = (v))
377 #define vm_map_pmap(map) ((map)->pmap)
379 #endif /* _UVM_UVM_MAP_H_ */