No empty .Rs/.Re
[netbsd-mini2440.git] / sys / uvm / uvm_kmguard.c
blob0ee81a270b058b673448a2b24b26a98209659d3f
1 /* $NetBSD: uvm_kmguard.c,v 1.1 2009/03/29 10:51:53 ad Exp $ */
3 /*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * A simple memory allocator for debugging. It tries to catch:
35 * - Overflow, in realtime
36 * - Underflow, at free
37 * - Invalid pointer/size passed, at free
38 * - Use-after-free
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: uvm_kmguard.c,v 1.1 2009/03/29 10:51:53 ad Exp $");
43 #include <sys/param.h>
44 #include <sys/malloc.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/pool.h>
48 #include <sys/atomic.h>
50 #include <uvm/uvm.h>
51 #include <uvm/uvm_kmguard.h>
53 #define CANARY(va, size) ((void *)((va) ^ 0x9deeba9 ^ (size)))
54 #define MAXSIZE (PAGE_SIZE - sizeof(void *))
56 void
57 uvm_kmguard_init(struct uvm_kmguard *kg, u_int *depth, size_t *size,
58 struct vm_map *map)
60 vaddr_t va;
63 * if not enabled, we have nothing to do.
66 if (*depth == 0) {
67 return;
69 *depth = roundup((*depth), PAGE_SIZE / sizeof(void *));
70 KASSERT(*depth != 0);
72 /*
73 * allocate fifo.
76 va = uvm_km_alloc(kernel_map, *depth * sizeof(void *), PAGE_SIZE,
77 UVM_KMF_WIRED | UVM_KMF_ZERO);
78 if (va == 0) {
79 *depth = 0;
80 *size = 0;
81 } else {
82 *size = MAXSIZE;
86 * init object.
89 kg->kg_map = map;
90 kg->kg_fifo = (void *)va;
91 kg->kg_depth = *depth;
92 kg->kg_rotor = 0;
94 printf("uvm_kmguard(%p): depth %d\n", kg, *depth);
97 void *
98 uvm_kmguard_alloc(struct uvm_kmguard *kg, size_t len, bool waitok)
100 struct vm_page *pg;
101 void **p;
102 vaddr_t va;
103 int flag;
106 * can't handle >PAGE_SIZE allocations. let the caller handle it
107 * normally.
110 if (len > MAXSIZE) {
111 return NULL;
115 * allocate two pages of kernel VA, but do not map anything in yet.
118 if (waitok) {
119 flag = UVM_KMF_WAITVA;
120 } else {
121 flag = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT;
123 va = vm_map_min(kg->kg_map);
124 if (__predict_false(uvm_map(kg->kg_map, &va, PAGE_SIZE*2, NULL,
125 UVM_UNKNOWN_OFFSET, PAGE_SIZE, UVM_MAPFLAG(UVM_PROT_ALL,
126 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flag
127 | UVM_FLAG_QUANTUM)) != 0)) {
128 return NULL;
132 * allocate a single page and map in at the start of the two page
133 * block.
136 for (;;) {
137 pg = uvm_pagealloc(NULL, va - vm_map_min(kg->kg_map), NULL, 0);
138 if (__predict_true(pg != NULL)) {
139 break;
141 if (waitok) {
142 uvm_wait("kmguard"); /* sleep here */
143 continue;
144 } else {
145 uvm_km_free(kg->kg_map, va, PAGE_SIZE*2,
146 UVM_KMF_VAONLY);
147 return NULL;
150 pg->flags &= ~PG_BUSY; /* new page */
151 UVM_PAGE_OWN(pg, NULL);
152 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
153 VM_PROT_READ | VM_PROT_WRITE | PMAP_KMPAGE, 0);
154 pmap_update(pmap_kernel());
157 * offset the returned pointer so that the unmapped guard page
158 * sits immediatley after the returned object.
161 p = (void **)((va + PAGE_SIZE - len) & ~(uintptr_t)ALIGNBYTES);
162 p[-1] = CANARY(va, len);
163 return (void *)p;
166 bool
167 uvm_kmguard_free(struct uvm_kmguard *kg, size_t len, void *p)
169 vaddr_t va;
170 u_int rotor;
171 void **c;
173 if (len > MAXSIZE) {
174 return false;
178 * first, check that everything is as it should be.
181 va = trunc_page((vaddr_t)p);
182 c = (void **)((va + PAGE_SIZE - len) & ~(uintptr_t)ALIGNBYTES);
183 KASSERT(p == (void *)c);
184 KASSERT(c[-1] == CANARY(va, len));
185 KASSERT(pmap_extract(pmap_kernel(), va, NULL));
186 KASSERT(!pmap_extract(pmap_kernel(), va + PAGE_SIZE, NULL));
189 * unmap and free the first page. the second page is never
190 * allocated .
193 uvm_km_pgremove_intrsafe(kg->kg_map, va, va + PAGE_SIZE * 2);
194 pmap_kremove(va, PAGE_SIZE * 2);
195 pmap_update(pmap_kernel());
198 * put the VA allocation into the list and swap an old one
199 * out to free. this behaves mostly like a fifo.
202 rotor = atomic_inc_uint_nv(&kg->kg_rotor) % kg->kg_depth;
203 va = (vaddr_t)atomic_swap_ptr(&kg->kg_fifo[rotor], (void *)va);
204 if (va != 0) {
205 uvm_km_free(kg->kg_map, va, PAGE_SIZE*2, UVM_KMF_VAONLY);
208 return true;