1 /* $NetBSD: uvm_kmguard.c,v 1.1 2009/03/29 10:51:53 ad Exp $ */
4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * A simple memory allocator for debugging. It tries to catch:
35 * - Overflow, in realtime
36 * - Underflow, at free
37 * - Invalid pointer/size passed, at free
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: uvm_kmguard.c,v 1.1 2009/03/29 10:51:53 ad Exp $");
43 #include <sys/param.h>
44 #include <sys/malloc.h>
45 #include <sys/systm.h>
48 #include <sys/atomic.h>
51 #include <uvm/uvm_kmguard.h>
53 #define CANARY(va, size) ((void *)((va) ^ 0x9deeba9 ^ (size)))
54 #define MAXSIZE (PAGE_SIZE - sizeof(void *))
57 uvm_kmguard_init(struct uvm_kmguard
*kg
, u_int
*depth
, size_t *size
,
63 * if not enabled, we have nothing to do.
69 *depth
= roundup((*depth
), PAGE_SIZE
/ sizeof(void *));
76 va
= uvm_km_alloc(kernel_map
, *depth
* sizeof(void *), PAGE_SIZE
,
77 UVM_KMF_WIRED
| UVM_KMF_ZERO
);
90 kg
->kg_fifo
= (void *)va
;
91 kg
->kg_depth
= *depth
;
94 printf("uvm_kmguard(%p): depth %d\n", kg
, *depth
);
98 uvm_kmguard_alloc(struct uvm_kmguard
*kg
, size_t len
, bool waitok
)
106 * can't handle >PAGE_SIZE allocations. let the caller handle it
115 * allocate two pages of kernel VA, but do not map anything in yet.
119 flag
= UVM_KMF_WAITVA
;
121 flag
= UVM_KMF_TRYLOCK
| UVM_KMF_NOWAIT
;
123 va
= vm_map_min(kg
->kg_map
);
124 if (__predict_false(uvm_map(kg
->kg_map
, &va
, PAGE_SIZE
*2, NULL
,
125 UVM_UNKNOWN_OFFSET
, PAGE_SIZE
, UVM_MAPFLAG(UVM_PROT_ALL
,
126 UVM_PROT_ALL
, UVM_INH_NONE
, UVM_ADV_RANDOM
, flag
127 | UVM_FLAG_QUANTUM
)) != 0)) {
132 * allocate a single page and map in at the start of the two page
137 pg
= uvm_pagealloc(NULL
, va
- vm_map_min(kg
->kg_map
), NULL
, 0);
138 if (__predict_true(pg
!= NULL
)) {
142 uvm_wait("kmguard"); /* sleep here */
145 uvm_km_free(kg
->kg_map
, va
, PAGE_SIZE
*2,
150 pg
->flags
&= ~PG_BUSY
; /* new page */
151 UVM_PAGE_OWN(pg
, NULL
);
152 pmap_kenter_pa(va
, VM_PAGE_TO_PHYS(pg
),
153 VM_PROT_READ
| VM_PROT_WRITE
| PMAP_KMPAGE
, 0);
154 pmap_update(pmap_kernel());
157 * offset the returned pointer so that the unmapped guard page
158 * sits immediatley after the returned object.
161 p
= (void **)((va
+ PAGE_SIZE
- len
) & ~(uintptr_t)ALIGNBYTES
);
162 p
[-1] = CANARY(va
, len
);
167 uvm_kmguard_free(struct uvm_kmguard
*kg
, size_t len
, void *p
)
178 * first, check that everything is as it should be.
181 va
= trunc_page((vaddr_t
)p
);
182 c
= (void **)((va
+ PAGE_SIZE
- len
) & ~(uintptr_t)ALIGNBYTES
);
183 KASSERT(p
== (void *)c
);
184 KASSERT(c
[-1] == CANARY(va
, len
));
185 KASSERT(pmap_extract(pmap_kernel(), va
, NULL
));
186 KASSERT(!pmap_extract(pmap_kernel(), va
+ PAGE_SIZE
, NULL
));
189 * unmap and free the first page. the second page is never
193 uvm_km_pgremove_intrsafe(kg
->kg_map
, va
, va
+ PAGE_SIZE
* 2);
194 pmap_kremove(va
, PAGE_SIZE
* 2);
195 pmap_update(pmap_kernel());
198 * put the VA allocation into the list and swap an old one
199 * out to free. this behaves mostly like a fifo.
202 rotor
= atomic_inc_uint_nv(&kg
->kg_rotor
) % kg
->kg_depth
;
203 va
= (vaddr_t
)atomic_swap_ptr(&kg
->kg_fifo
[rotor
], (void *)va
);
205 uvm_km_free(kg
->kg_map
, va
, PAGE_SIZE
*2, UVM_KMF_VAONLY
);