1 /* $NetBSD: uvm_km.c,v 1.103 2008/12/13 11:34:43 ad Exp $ */
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 * Carnegie Mellon requests users of this software to return to
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
70 * uvm_km.c: handle kernel memory allocation and management
74 * overview of kernel memory management:
76 * the kernel virtual address space is mapped by "kernel_map." kernel_map
77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
80 * the kernel_map has several "submaps." submaps can only appear in
81 * the kernel_map (user processes can't use them). submaps "take over"
82 * the management of a sub-range of the kernel's address space. submaps
83 * are typically allocated at boot time and are never released. kernel
84 * virtual address space that is mapped by a submap is locked by the
85 * submap's lock -- not the kernel_map's lock.
87 * thus, the useful feature of submaps is that they allow us to break
88 * up the locking and protection of the kernel address space into smaller
91 * the vm system has several standard kernel submaps, including:
92 * kmem_map => contains only wired kernel memory for the kernel
94 * mb_map => memory for large mbufs,
95 * pager_map => used to map "buf" structures into kernel space
96 * exec_map => used during exec to handle exec args
99 * the kernel allocates its private memory out of special uvm_objects whose
100 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
101 * are "special" and never die). all kernel objects should be thought of
102 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
103 * object is equal to the size of kernel virtual address space (i.e. the
104 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
106 * note that just because a kernel object spans the entire kernel virtual
107 * address space doesn't mean that it has to be mapped into the entire space.
108 * large chunks of a kernel object's space go unused either because
109 * that area of kernel VM is unmapped, or there is some other type of
110 * object mapped into that range (e.g. a vnode). for submap's kernel
111 * objects, the only part of the object that can ever be populated is the
112 * offsets that are managed by the submap.
114 * note that the "offset" in a kernel object is always the kernel virtual
115 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
117 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
118 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
119 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
120 * then that means that the page at offset 0x235000 in kernel_object is
121 * mapped at 0xf8235000.
123 * kernel object have one other special property: when the kernel virtual
124 * memory mapping them is unmapped, the backing memory in the object is
125 * freed right away. this is done with the uvm_km_pgremove() function.
126 * this has to be done because there is no backing store for kernel pages
127 * and no need to save them after they are no longer referenced.
130 #include <sys/cdefs.h>
131 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.103 2008/12/13 11:34:43 ad Exp $");
133 #include "opt_uvmhist.h"
135 #include <sys/param.h>
136 #include <sys/malloc.h>
137 #include <sys/systm.h>
138 #include <sys/proc.h>
139 #include <sys/pool.h>
144 * global data structures
147 struct vm_map
*kernel_map
= NULL
;
150 * local data structues
153 static struct vm_map_kernel kernel_map_store
;
154 static struct vm_map_entry kernel_first_mapent_store
;
156 #if !defined(PMAP_MAP_POOLPAGE)
161 * XXX maybe it's better to do this at the uvm_map layer.
164 #define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */
166 static void *km_vacache_alloc(struct pool
*, int);
167 static void km_vacache_free(struct pool
*, void *);
168 static void km_vacache_init(struct vm_map
*, const char *, size_t);
171 #define KM_VACACHE_POOL_TO_MAP(pp) \
172 ((struct vm_map *)((char *)(pp) - \
173 offsetof(struct vm_map_kernel, vmk_vacache)))
176 km_vacache_alloc(struct pool
*pp
, int flags
)
181 size
= pp
->pr_alloc
->pa_pagesz
;
183 map
= KM_VACACHE_POOL_TO_MAP(pp
);
185 va
= vm_map_min(map
); /* hint */
186 if (uvm_map(map
, &va
, size
, NULL
, UVM_UNKNOWN_OFFSET
, size
,
187 UVM_MAPFLAG(UVM_PROT_ALL
, UVM_PROT_ALL
, UVM_INH_NONE
,
188 UVM_ADV_RANDOM
, UVM_FLAG_QUANTUM
|
189 ((flags
& PR_WAITOK
) ? UVM_FLAG_WAITVA
:
190 UVM_FLAG_TRYLOCK
| UVM_FLAG_NOWAIT
))))
197 km_vacache_free(struct pool
*pp
, void *v
)
199 vaddr_t va
= (vaddr_t
)v
;
200 size_t size
= pp
->pr_alloc
->pa_pagesz
;
203 map
= KM_VACACHE_POOL_TO_MAP(pp
);
204 uvm_unmap1(map
, va
, va
+ size
, UVM_FLAG_QUANTUM
|UVM_FLAG_VAONLY
);
208 * km_vacache_init: initialize kva cache.
212 km_vacache_init(struct vm_map
*map
, const char *name
, size_t size
)
214 struct vm_map_kernel
*vmk
;
216 struct pool_allocator
*pa
;
219 KASSERT(VM_MAP_IS_KERNEL(map
));
220 KASSERT(size
< (vm_map_max(map
) - vm_map_min(map
)) / 2); /* sanity */
223 vmk
= vm_map_to_kernel(map
);
224 pp
= &vmk
->vmk_vacache
;
225 pa
= &vmk
->vmk_vacache_allocator
;
226 memset(pa
, 0, sizeof(*pa
));
227 pa
->pa_alloc
= km_vacache_alloc
;
228 pa
->pa_free
= km_vacache_free
;
229 pa
->pa_pagesz
= (unsigned int)size
;
230 pa
->pa_backingmap
= map
;
231 pa
->pa_backingmapptr
= NULL
;
233 if ((map
->flags
& VM_MAP_INTRSAFE
) != 0)
238 pool_init(pp
, PAGE_SIZE
, 0, 0, PR_NOTOUCH
| PR_RECURSIVE
, name
, pa
,
243 uvm_km_vacache_init(struct vm_map
*map
, const char *name
, size_t size
)
246 map
->flags
|= VM_MAP_VACACHE
;
248 size
= KM_VACACHE_SIZE
;
249 km_vacache_init(map
, name
, size
);
252 #else /* !defined(PMAP_MAP_POOLPAGE) */
255 uvm_km_vacache_init(struct vm_map
*map
, const char *name
, size_t size
)
261 #endif /* !defined(PMAP_MAP_POOLPAGE) */
264 uvm_km_va_drain(struct vm_map
*map
, uvm_flag_t flags
)
266 struct vm_map_kernel
*vmk
= vm_map_to_kernel(map
);
268 callback_run_roundrobin(&vmk
->vmk_reclaim_callback
, NULL
);
272 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
273 * KVM already allocated for text, data, bss, and static data structures).
275 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
276 * we assume that [vmin -> start] has already been allocated and that
281 uvm_km_init(vaddr_t start
, vaddr_t end
)
283 vaddr_t base
= VM_MIN_KERNEL_ADDRESS
;
286 * next, init kernel memory objects.
289 /* kernel_object: for pageable anonymous kernel memory */
291 uvm_kernel_object
= uao_create(VM_MAX_KERNEL_ADDRESS
-
292 VM_MIN_KERNEL_ADDRESS
, UAO_FLAG_KERNOBJ
);
295 * init the map and reserve any space that might already
296 * have been allocated kernel space before installing.
299 uvm_map_setup_kernel(&kernel_map_store
, base
, end
, VM_MAP_PAGEABLE
);
300 kernel_map_store
.vmk_map
.pmap
= pmap_kernel();
303 struct uvm_map_args args
;
305 error
= uvm_map_prepare(&kernel_map_store
.vmk_map
,
307 NULL
, UVM_UNKNOWN_OFFSET
, 0,
308 UVM_MAPFLAG(UVM_PROT_ALL
, UVM_PROT_ALL
, UVM_INH_NONE
,
309 UVM_ADV_RANDOM
, UVM_FLAG_FIXED
), &args
);
311 kernel_first_mapent_store
.flags
=
312 UVM_MAP_KERNEL
| UVM_MAP_FIRST
;
313 error
= uvm_map_enter(&kernel_map_store
.vmk_map
, &args
,
314 &kernel_first_mapent_store
);
319 "uvm_km_init: could not reserve space for kernel");
326 kernel_map
= &kernel_map_store
.vmk_map
;
327 uvm_km_vacache_init(kernel_map
, "kvakernel", 0);
331 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
332 * is allocated all references to that area of VM must go through it. this
333 * allows the locking of VAs in kernel_map to be broken up into regions.
335 * => if `fixed' is true, *vmin specifies where the region described
336 * by the submap must start
337 * => if submap is non NULL we use that as the submap, otherwise we
342 uvm_km_suballoc(struct vm_map
*map
, vaddr_t
*vmin
/* IN/OUT */,
343 vaddr_t
*vmax
/* OUT */, vsize_t size
, int flags
, bool fixed
,
344 struct vm_map_kernel
*submap
)
346 int mapflags
= UVM_FLAG_NOMERGE
| (fixed
? UVM_FLAG_FIXED
: 0);
348 KASSERT(vm_map_pmap(map
) == pmap_kernel());
350 size
= round_page(size
); /* round up to pagesize */
351 size
+= uvm_mapent_overhead(size
, flags
);
354 * first allocate a blank spot in the parent map
357 if (uvm_map(map
, vmin
, size
, NULL
, UVM_UNKNOWN_OFFSET
, 0,
358 UVM_MAPFLAG(UVM_PROT_ALL
, UVM_PROT_ALL
, UVM_INH_NONE
,
359 UVM_ADV_RANDOM
, mapflags
)) != 0) {
360 panic("uvm_km_suballoc: unable to allocate space in parent map");
364 * set VM bounds (vmin is filled in by uvm_map)
367 *vmax
= *vmin
+ size
;
370 * add references to pmap and create or init the submap
373 pmap_reference(vm_map_pmap(map
));
374 if (submap
== NULL
) {
375 submap
= malloc(sizeof(*submap
), M_VMMAP
, M_WAITOK
);
377 panic("uvm_km_suballoc: unable to create submap");
379 uvm_map_setup_kernel(submap
, *vmin
, *vmax
, flags
);
380 submap
->vmk_map
.pmap
= vm_map_pmap(map
);
383 * now let uvm_map_submap plug in it...
386 if (uvm_map_submap(map
, *vmin
, *vmax
, &submap
->vmk_map
) != 0)
387 panic("uvm_km_suballoc: submap allocation failed");
389 return(&submap
->vmk_map
);
393 * uvm_km_pgremove: remove pages from a kernel uvm_object.
395 * => when you unmap a part of anonymous kernel memory you want to toss
396 * the pages right away. (this gets called from uvm_unmap_...).
400 uvm_km_pgremove(vaddr_t startva
, vaddr_t endva
)
402 struct uvm_object
* const uobj
= uvm_kernel_object
;
403 const voff_t start
= startva
- vm_map_min(kernel_map
);
404 const voff_t end
= endva
- vm_map_min(kernel_map
);
406 voff_t curoff
, nextoff
;
407 int swpgonlydelta
= 0;
408 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist
);
410 KASSERT(VM_MIN_KERNEL_ADDRESS
<= startva
);
411 KASSERT(startva
< endva
);
412 KASSERT(endva
<= VM_MAX_KERNEL_ADDRESS
);
414 mutex_enter(&uobj
->vmobjlock
);
416 for (curoff
= start
; curoff
< end
; curoff
= nextoff
) {
417 nextoff
= curoff
+ PAGE_SIZE
;
418 pg
= uvm_pagelookup(uobj
, curoff
);
419 if (pg
!= NULL
&& pg
->flags
& PG_BUSY
) {
420 pg
->flags
|= PG_WANTED
;
421 UVM_UNLOCK_AND_WAIT(pg
, &uobj
->vmobjlock
, 0,
423 mutex_enter(&uobj
->vmobjlock
);
429 * free the swap slot, then the page.
433 uao_find_swslot(uobj
, curoff
>> PAGE_SHIFT
) > 0) {
436 uao_dropswap(uobj
, curoff
>> PAGE_SHIFT
);
438 mutex_enter(&uvm_pageqlock
);
440 mutex_exit(&uvm_pageqlock
);
443 mutex_exit(&uobj
->vmobjlock
);
445 if (swpgonlydelta
> 0) {
446 mutex_enter(&uvm_swap_data_lock
);
447 KASSERT(uvmexp
.swpgonly
>= swpgonlydelta
);
448 uvmexp
.swpgonly
-= swpgonlydelta
;
449 mutex_exit(&uvm_swap_data_lock
);
455 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
458 * => when you unmap a part of anonymous kernel memory you want to toss
459 * the pages right away. (this is called from uvm_unmap_...).
460 * => none of the pages will ever be busy, and none of them will ever
461 * be on the active or inactive queues (because they have no object).
465 uvm_km_pgremove_intrsafe(struct vm_map
*map
, vaddr_t start
, vaddr_t end
)
469 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist
);
471 KASSERT(VM_MAP_IS_KERNEL(map
));
472 KASSERT(vm_map_min(map
) <= start
);
473 KASSERT(start
< end
);
474 KASSERT(end
<= vm_map_max(map
));
476 for (; start
< end
; start
+= PAGE_SIZE
) {
477 if (!pmap_extract(pmap_kernel(), start
, &pa
)) {
480 pg
= PHYS_TO_VM_PAGE(pa
);
482 KASSERT(pg
->uobject
== NULL
&& pg
->uanon
== NULL
);
489 uvm_km_check_empty(struct vm_map
*map
, vaddr_t start
, vaddr_t end
)
495 KDASSERT(VM_MAP_IS_KERNEL(map
));
496 KDASSERT(vm_map_min(map
) <= start
);
497 KDASSERT(start
< end
);
498 KDASSERT(end
<= vm_map_max(map
));
500 for (va
= start
; va
< end
; va
+= PAGE_SIZE
) {
501 if (pmap_extract(pmap_kernel(), va
, &pa
)) {
502 panic("uvm_km_check_empty: va %p has pa 0x%llx",
503 (void *)va
, (long long)pa
);
505 if ((map
->flags
& VM_MAP_INTRSAFE
) == 0) {
506 mutex_enter(&uvm_kernel_object
->vmobjlock
);
507 pg
= uvm_pagelookup(uvm_kernel_object
,
508 va
- vm_map_min(kernel_map
));
509 mutex_exit(&uvm_kernel_object
->vmobjlock
);
511 panic("uvm_km_check_empty: "
512 "has page hashed at %p", (const void *)va
);
517 #endif /* defined(DEBUG) */
520 * uvm_km_alloc: allocate an area of kernel memory.
522 * => NOTE: we can return 0 even if we can wait if there is not enough
523 * free VM space in the map... caller should be prepared to handle
525 * => we return KVA of memory allocated
529 uvm_km_alloc(struct vm_map
*map
, vsize_t size
, vsize_t align
, uvm_flag_t flags
)
535 struct uvm_object
*obj
;
538 UVMHIST_FUNC(__func__
); UVMHIST_CALLED(maphist
);
540 KASSERT(vm_map_pmap(map
) == pmap_kernel());
541 KASSERT((flags
& UVM_KMF_TYPEMASK
) == UVM_KMF_WIRED
||
542 (flags
& UVM_KMF_TYPEMASK
) == UVM_KMF_PAGEABLE
||
543 (flags
& UVM_KMF_TYPEMASK
) == UVM_KMF_VAONLY
);
549 kva
= vm_map_min(map
); /* hint */
550 size
= round_page(size
);
551 obj
= (flags
& UVM_KMF_PAGEABLE
) ? uvm_kernel_object
: NULL
;
552 UVMHIST_LOG(maphist
," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
553 map
, obj
, size
, flags
);
556 * allocate some virtual space
559 if (__predict_false(uvm_map(map
, &kva
, size
, obj
, UVM_UNKNOWN_OFFSET
,
560 align
, UVM_MAPFLAG(UVM_PROT_ALL
, UVM_PROT_ALL
, UVM_INH_NONE
,
562 (flags
& (UVM_KMF_TRYLOCK
| UVM_KMF_NOWAIT
| UVM_KMF_WAITVA
))
563 | UVM_FLAG_QUANTUM
)) != 0)) {
564 UVMHIST_LOG(maphist
, "<- done (no VM)",0,0,0,0);
569 * if all we wanted was VA, return now
572 if (flags
& (UVM_KMF_VAONLY
| UVM_KMF_PAGEABLE
)) {
573 UVMHIST_LOG(maphist
,"<- done valloc (kva=0x%x)", kva
,0,0,0);
578 * recover object offset from virtual address
581 offset
= kva
- vm_map_min(kernel_map
);
582 UVMHIST_LOG(maphist
, " kva=0x%x, offset=0x%x", kva
, offset
,0,0);
585 * now allocate and map in the memory... note that we are the only ones
586 * whom should ever get a handle on this area of VM.
593 if (flags
& UVM_KMF_NOWAIT
)
594 pgaflags
|= UVM_PGA_USERESERVE
;
595 if (flags
& UVM_KMF_ZERO
)
596 pgaflags
|= UVM_PGA_ZERO
;
597 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
598 if (flags
& UVM_KMF_EXEC
)
599 prot
|= VM_PROT_EXECUTE
;
601 KASSERT(!pmap_extract(pmap_kernel(), loopva
, NULL
));
603 pg
= uvm_pagealloc(NULL
, offset
, NULL
, pgaflags
);
609 if (__predict_false(pg
== NULL
)) {
610 if ((flags
& UVM_KMF_NOWAIT
) ||
611 ((flags
& UVM_KMF_CANFAIL
) && !uvm_reclaimable())) {
612 /* free everything! */
613 uvm_km_free(map
, kva
, size
,
614 flags
& UVM_KMF_TYPEMASK
);
617 uvm_wait("km_getwait2"); /* sleep here */
622 pg
->flags
&= ~PG_BUSY
; /* new page */
623 UVM_PAGE_OWN(pg
, NULL
);
629 pmap_kenter_pa(loopva
, VM_PAGE_TO_PHYS(pg
),
630 prot
| PMAP_KMPAGE
, 0);
633 loopsize
-= PAGE_SIZE
;
636 pmap_update(pmap_kernel());
638 UVMHIST_LOG(maphist
,"<- done (kva=0x%x)", kva
,0,0,0);
643 * uvm_km_free: free an area of kernel memory
647 uvm_km_free(struct vm_map
*map
, vaddr_t addr
, vsize_t size
, uvm_flag_t flags
)
650 KASSERT((flags
& UVM_KMF_TYPEMASK
) == UVM_KMF_WIRED
||
651 (flags
& UVM_KMF_TYPEMASK
) == UVM_KMF_PAGEABLE
||
652 (flags
& UVM_KMF_TYPEMASK
) == UVM_KMF_VAONLY
);
653 KASSERT((addr
& PAGE_MASK
) == 0);
654 KASSERT(vm_map_pmap(map
) == pmap_kernel());
656 size
= round_page(size
);
658 if (flags
& UVM_KMF_PAGEABLE
) {
659 uvm_km_pgremove(addr
, addr
+ size
);
660 pmap_remove(pmap_kernel(), addr
, addr
+ size
);
661 } else if (flags
& UVM_KMF_WIRED
) {
662 uvm_km_pgremove_intrsafe(map
, addr
, addr
+ size
);
663 pmap_kremove(addr
, size
);
667 * uvm_unmap_remove calls pmap_update for us.
670 uvm_unmap1(map
, addr
, addr
+ size
, UVM_FLAG_QUANTUM
|UVM_FLAG_VAONLY
);
673 /* Sanity; must specify both or none. */
674 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
675 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
676 #error Must specify MAP and UNMAP together.
680 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
682 * => if the pmap specifies an alternate mapping method, we use it.
687 uvm_km_alloc_poolpage_cache(struct vm_map
*map
, bool waitok
)
689 #if defined(PMAP_MAP_POOLPAGE)
690 return uvm_km_alloc_poolpage(map
, waitok
);
693 struct pool
*pp
= &vm_map_to_kernel(map
)->vmk_vacache
;
696 if ((map
->flags
& VM_MAP_VACACHE
) == 0)
697 return uvm_km_alloc_poolpage(map
, waitok
);
699 va
= (vaddr_t
)pool_get(pp
, waitok
? PR_WAITOK
: PR_NOWAIT
);
702 KASSERT(!pmap_extract(pmap_kernel(), va
, NULL
));
704 pg
= uvm_pagealloc(NULL
, 0, NULL
, waitok
? 0 : UVM_PGA_USERESERVE
);
705 if (__predict_false(pg
== NULL
)) {
710 pool_put(pp
, (void *)va
);
714 pmap_kenter_pa(va
, VM_PAGE_TO_PHYS(pg
),
715 VM_PROT_READ
|VM_PROT_WRITE
|PMAP_KMPAGE
, 0);
716 pmap_update(pmap_kernel());
719 #endif /* PMAP_MAP_POOLPAGE */
723 uvm_km_alloc_poolpage(struct vm_map
*map
, bool waitok
)
725 #if defined(PMAP_MAP_POOLPAGE)
730 pg
= uvm_pagealloc(NULL
, 0, NULL
, waitok
? 0 : UVM_PGA_USERESERVE
);
731 if (__predict_false(pg
== NULL
)) {
738 va
= PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg
));
739 if (__predict_false(va
== 0))
745 va
= uvm_km_alloc(map
, PAGE_SIZE
, 0,
746 (waitok
? 0 : UVM_KMF_NOWAIT
| UVM_KMF_TRYLOCK
) | UVM_KMF_WIRED
);
748 #endif /* PMAP_MAP_POOLPAGE */
752 * uvm_km_free_poolpage: free a previously allocated pool page
754 * => if the pmap specifies an alternate unmapping method, we use it.
759 uvm_km_free_poolpage_cache(struct vm_map
*map
, vaddr_t addr
)
761 #if defined(PMAP_UNMAP_POOLPAGE)
762 uvm_km_free_poolpage(map
, addr
);
766 if ((map
->flags
& VM_MAP_VACACHE
) == 0) {
767 uvm_km_free_poolpage(map
, addr
);
771 KASSERT(pmap_extract(pmap_kernel(), addr
, NULL
));
772 uvm_km_pgremove_intrsafe(map
, addr
, addr
+ PAGE_SIZE
);
773 pmap_kremove(addr
, PAGE_SIZE
);
775 pmap_update(pmap_kernel());
777 KASSERT(!pmap_extract(pmap_kernel(), addr
, NULL
));
778 pp
= &vm_map_to_kernel(map
)->vmk_vacache
;
779 pool_put(pp
, (void *)addr
);
785 uvm_km_free_poolpage(struct vm_map
*map
, vaddr_t addr
)
787 #if defined(PMAP_UNMAP_POOLPAGE)
790 pa
= PMAP_UNMAP_POOLPAGE(addr
);
791 uvm_pagefree(PHYS_TO_VM_PAGE(pa
));
793 uvm_km_free(map
, addr
, PAGE_SIZE
, UVM_KMF_WIRED
);
794 #endif /* PMAP_UNMAP_POOLPAGE */