2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
62 * Kernel memory management.
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h> /* for ticks and hz */
71 #include <sys/eventhandler.h>
73 #include <sys/mutex.h>
75 #include <sys/malloc.h>
78 #include <vm/vm_param.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <vm/vm_extern.h>
87 vm_map_t kernel_map
=0;
91 vm_map_t buffer_map
=0;
96 * Allocate a virtual address range with no underlying object and
97 * no initial mapping to physical memory. Any mapping from this
98 * range to physical memory must be explicitly created prior to
99 * its use, typically with pmap_qenter(). Any attempt to create
100 * a mapping on demand through vm_fault() will result in a panic.
103 kmem_alloc_nofault(map
, size
)
110 size
= round_page(size
);
111 addr
= vm_map_min(map
);
112 result
= vm_map_find(map
, NULL
, 0, &addr
, size
, VMFS_ANY_SPACE
,
113 VM_PROT_ALL
, VM_PROT_ALL
, MAP_NOFAULT
);
114 if (result
!= KERN_SUCCESS
) {
121 * Allocate wired-down memory in the kernel's address map
125 kmem_alloc(map
, size
)
133 size
= round_page(size
);
136 * Use the kernel object for wired-down kernel pages. Assume that no
137 * region of the kernel object is referenced more than once.
141 * Locate sufficient space in the map. This will give us the final
142 * virtual address for the new memory, and thus will tell us the
143 * offset within the kernel map.
146 if (vm_map_findspace(map
, vm_map_min(map
), size
, &addr
)) {
150 offset
= addr
- VM_MIN_KERNEL_ADDRESS
;
151 vm_object_reference(kernel_object
);
152 vm_map_insert(map
, kernel_object
, offset
, addr
, addr
+ size
,
153 VM_PROT_ALL
, VM_PROT_ALL
, 0);
157 * Guarantee that there are pages already in this object before
158 * calling vm_map_wire. This is to prevent the following
161 * 1) Threads have swapped out, so that there is a pager for the
162 * kernel_object. 2) The kmsg zone is empty, and so we are
163 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
164 * there is no page, but there is a pager, so we call
165 * pager_data_request. But the kmsg zone is empty, so we must
166 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
167 * we get the data back from the pager, it will be (very stale)
168 * non-zero data. kmem_alloc is defined to return zero-filled memory.
170 * We're intentionally not activating the pages we allocate to prevent a
171 * race with page-out. vm_map_wire will wire the pages.
173 VM_OBJECT_LOCK(kernel_object
);
174 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
177 mem
= vm_page_grab(kernel_object
, OFF_TO_IDX(offset
+ i
),
178 VM_ALLOC_NOBUSY
| VM_ALLOC_ZERO
| VM_ALLOC_RETRY
);
179 mem
->valid
= VM_PAGE_BITS_ALL
;
180 KASSERT((mem
->flags
& PG_UNMANAGED
) != 0,
181 ("kmem_alloc: page %p is managed", mem
));
183 VM_OBJECT_UNLOCK(kernel_object
);
186 * And finally, mark the data as non-pageable.
188 (void) vm_map_wire(map
, addr
, addr
+ size
,
189 VM_MAP_WIRE_SYSTEM
|VM_MAP_WIRE_NOHOLES
);
197 * Release a region of kernel virtual memory allocated
198 * with kmem_alloc, and return the physical pages
199 * associated with that region.
201 * This routine may not block on kernel maps.
204 kmem_free(map
, addr
, size
)
210 (void) vm_map_remove(map
, trunc_page(addr
), round_page(addr
+ size
));
216 * Allocates a map to manage a subrange
217 * of the kernel virtual address space.
219 * Arguments are as follows:
221 * parent Map to take range from
222 * min, max Returned endpoints of map
223 * size Size of range to find
224 * superpage_align Request that min is superpage aligned
227 kmem_suballoc(vm_map_t parent
, vm_offset_t
*min
, vm_offset_t
*max
,
228 vm_size_t size
, boolean_t superpage_align
)
233 size
= round_page(size
);
235 *min
= vm_map_min(parent
);
236 ret
= vm_map_find(parent
, NULL
, 0, min
, size
, superpage_align
?
237 VMFS_ALIGNED_SPACE
: VMFS_ANY_SPACE
, VM_PROT_ALL
, VM_PROT_ALL
, 0);
238 if (ret
!= KERN_SUCCESS
)
239 panic("kmem_suballoc: bad status return of %d", ret
);
241 result
= vm_map_create(vm_map_pmap(parent
), *min
, *max
);
243 panic("kmem_suballoc: cannot create submap");
244 if (vm_map_submap(parent
, *min
, *max
, result
) != KERN_SUCCESS
)
245 panic("kmem_suballoc: unable to change range to submap");
252 * Allocate wired-down memory in the kernel's address map for the higher
253 * level kernel memory allocator (kern/kern_malloc.c). We cannot use
254 * kmem_alloc() because we may need to allocate memory at interrupt
255 * level where we cannot block (canwait == FALSE).
257 * This routine has its own private kernel submap (kmem_map) and object
258 * (kmem_object). This, combined with the fact that only malloc uses
259 * this routine, ensures that we will never block in map or object waits.
261 * We don't worry about expanding the map (adding entries) since entries
262 * for wired maps are statically allocated.
264 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
265 * which we never free.
268 kmem_malloc(map
, size
, flags
)
273 vm_offset_t offset
, i
;
274 vm_map_entry_t entry
;
279 size
= round_page(size
);
280 addr
= vm_map_min(map
);
283 * Locate sufficient space in the map. This will give us the final
284 * virtual address for the new memory, and thus will tell us the
285 * offset within the kernel map.
288 if (vm_map_findspace(map
, vm_map_min(map
), size
, &addr
)) {
290 if ((flags
& M_NOWAIT
) == 0) {
291 for (i
= 0; i
< 8; i
++) {
292 EVENTHANDLER_INVOKE(vm_lowmem
, 0);
295 if (vm_map_findspace(map
, vm_map_min(map
),
300 tsleep(&i
, 0, "nokva", (hz
/ 4) * (i
+ 1));
303 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
304 (long)size
, (long)map
->size
);
310 offset
= addr
- VM_MIN_KERNEL_ADDRESS
;
311 vm_object_reference(kmem_object
);
312 vm_map_insert(map
, kmem_object
, offset
, addr
, addr
+ size
,
313 VM_PROT_ALL
, VM_PROT_ALL
, 0);
315 if ((flags
& (M_NOWAIT
|M_USE_RESERVE
)) == M_NOWAIT
)
316 pflags
= VM_ALLOC_INTERRUPT
| VM_ALLOC_WIRED
;
318 pflags
= VM_ALLOC_SYSTEM
| VM_ALLOC_WIRED
;
321 pflags
|= VM_ALLOC_ZERO
;
323 VM_OBJECT_LOCK(kmem_object
);
324 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
326 m
= vm_page_alloc(kmem_object
, OFF_TO_IDX(offset
+ i
), pflags
);
329 * Ran out of space, free everything up and return. Don't need
330 * to lock page queues here as we know that the pages we got
331 * aren't on any queues.
334 if ((flags
& M_NOWAIT
) == 0) {
335 VM_OBJECT_UNLOCK(kmem_object
);
339 VM_OBJECT_LOCK(kmem_object
);
343 * Free the pages before removing the map entry.
344 * They are already marked busy. Calling
345 * vm_map_delete before the pages has been freed or
346 * unbusied will cause a deadlock.
350 m
= vm_page_lookup(kmem_object
,
351 OFF_TO_IDX(offset
+ i
));
352 vm_page_lock_queues();
353 vm_page_unwire(m
, 0);
355 vm_page_unlock_queues();
357 VM_OBJECT_UNLOCK(kmem_object
);
358 vm_map_delete(map
, addr
, addr
+ size
);
362 if (flags
& M_ZERO
&& (m
->flags
& PG_ZERO
) == 0)
364 m
->valid
= VM_PAGE_BITS_ALL
;
365 KASSERT((m
->flags
& PG_UNMANAGED
) != 0,
366 ("kmem_malloc: page %p is managed", m
));
368 VM_OBJECT_UNLOCK(kmem_object
);
371 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
372 * be able to extend the previous entry so there will be a new entry
373 * exactly corresponding to this address range and it will have
376 if (!vm_map_lookup_entry(map
, addr
, &entry
) ||
377 entry
->start
!= addr
|| entry
->end
!= addr
+ size
||
378 entry
->wired_count
!= 0)
379 panic("kmem_malloc: entry not found or misaligned");
380 entry
->wired_count
= 1;
383 * At this point, the kmem_object must be unlocked because
384 * vm_map_simplify_entry() calls vm_object_deallocate(), which
385 * locks the kmem_object.
387 vm_map_simplify_entry(map
, entry
);
390 * Loop thru pages, entering them in the pmap.
392 VM_OBJECT_LOCK(kmem_object
);
393 for (i
= 0; i
< size
; i
+= PAGE_SIZE
) {
394 m
= vm_page_lookup(kmem_object
, OFF_TO_IDX(offset
+ i
));
396 * Because this is kernel_pmap, this call will not block.
398 pmap_enter(kernel_pmap
, addr
+ i
, VM_PROT_ALL
, m
, VM_PROT_ALL
,
402 VM_OBJECT_UNLOCK(kmem_object
);
411 * Allocates pageable memory from a sub-map of the kernel. If the submap
412 * has no room, the caller sleeps waiting for more memory in the submap.
414 * This routine may block.
417 kmem_alloc_wait(map
, size
)
423 size
= round_page(size
);
427 * To make this work for more than one map, use the map's lock
428 * to lock out sleepers/wakers.
431 if (vm_map_findspace(map
, vm_map_min(map
), size
, &addr
) == 0)
433 /* no space now; see if we can ever get space */
434 if (vm_map_max(map
) - vm_map_min(map
) < size
) {
438 map
->needs_wakeup
= TRUE
;
439 vm_map_unlock_and_wait(map
, 0);
441 vm_map_insert(map
, NULL
, 0, addr
, addr
+ size
, VM_PROT_ALL
, VM_PROT_ALL
, 0);
449 * Returns memory to a submap of the kernel, and wakes up any processes
450 * waiting for memory in that map.
453 kmem_free_wakeup(map
, addr
, size
)
460 (void) vm_map_delete(map
, trunc_page(addr
), round_page(addr
+ size
));
461 if (map
->needs_wakeup
) {
462 map
->needs_wakeup
= FALSE
;
471 * Create the kernel map; insert a mapping covering kernel text,
472 * data, bss, and all space allocated thus far (`boostrap' data). The
473 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
474 * `start' as allocated, and the range between `start' and `end' as free.
477 kmem_init(start
, end
)
478 vm_offset_t start
, end
;
482 m
= vm_map_create(kernel_pmap
, VM_MIN_KERNEL_ADDRESS
, end
);
485 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
487 (void) vm_map_insert(m
, NULL
, (vm_ooffset_t
) 0,
491 VM_MIN_KERNEL_ADDRESS
,
493 start
, VM_PROT_ALL
, VM_PROT_ALL
, MAP_NOFAULT
);
494 /* ... and ending with the completion of the above `insert' */