Sync usage with man page.
[netbsd-mini2440.git] / usr.sbin / acpitools / aml / aml_memman.c
blob852f0625f5b4184a9c7054d2e03f2534c1c8a744
1 /* $NetBSD: aml_memman.c,v 1.2 2007/01/14 05:33:18 dogcow Exp $ */
3 /*-
4 * Copyright (c) 1999, 2000 Mitsuru IWASAKI <iwasaki@FreeBSD.org>
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
28 * Id: aml_memman.c,v 1.10 2000/08/09 14:47:43 iwasaki Exp
29 * $FreeBSD: src/usr.sbin/acpi/amldb/aml/aml_memman.c,v 1.2 2000/11/09 06:24:45 iwasaki Exp $
31 #include <sys/cdefs.h>
32 __RCSID("$NetBSD: aml_memman.c,v 1.2 2007/01/14 05:33:18 dogcow Exp $");
35 * Generic Memory Management
38 #include <sys/param.h>
40 #include <aml/aml_memman.h>
42 #ifndef _KERNEL
43 #include <stdlib.h>
44 #include <stdio.h>
45 #include <string.h>
46 #else /* _KERNEL */
47 #include <sys/kernel.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 MALLOC_DEFINE(M_MEMMAN, "memman", "Generic and Simple Memory Management");
51 #endif /* !_KERNEL */
53 unsigned int memid_unkown = 255;
55 static int manage_block(struct memman *memman, unsigned int id,
56 void *block, unsigned static_mem,
57 unsigned entries);
58 static int blockman_init(struct memman *memman, unsigned int id);
59 static void memman_flexsize_add_histogram(struct memman *memman,
60 size_t size,
61 int tolerance);
62 static int memman_comp_histogram_size(const void *a,
63 const void *b);
64 static void memman_sort_histogram_by_size(struct memman *memman);
65 static unsigned int memman_guess_memid(struct memman *memman, void *chunk);
66 static void memman_statistics_fixedsize(struct memman *memman);
67 static void memman_statistics_flexsize(struct memman *memman);
69 static int
70 manage_block(struct memman *memman, unsigned int id, void *block,
71 unsigned static_mem, unsigned entries)
73 unsigned int i;
74 size_t alloc_size;
75 void *tmp, *realblock;
76 struct memman_blockman *bmp;
77 struct memman_block *memblock;
78 struct memman_node *memnodes;
80 bmp = &memman->blockman[id];
81 alloc_size = MEMMAN_BLOCKNODE_SIZE(entries);
83 if (static_mem) {
84 tmp = (void *)block;
85 realblock = (char *)block + alloc_size;
86 } else {
87 tmp = MEMMAN_SYSMALLOC(alloc_size);
88 if (!tmp) {
89 return (-1);
91 realblock = block;
93 memman->allocated_mem += alloc_size;
94 memman->salloc_called++;
97 memblock = (struct memman_block *)tmp;
98 memnodes = (struct memman_node *)((char *)tmp + sizeof(struct memman_block));
100 memblock->block = realblock;
101 memblock->static_mem = static_mem;
102 memblock->allocated = entries;
103 memblock->available = entries;
104 if (!static_mem) {
105 alloc_size += roundup(bmp->size * entries, ROUNDUP_UNIT);
107 memblock->allocated_mem = alloc_size;
108 LIST_INSERT_HEAD(&bmp->block_list, memblock, links);
110 for (i = 0; i < entries; ++i) {
111 memnodes[i].node = ((char *)realblock + (i * (bmp->size)));
112 memnodes[i].memblock = memblock;
113 LIST_INSERT_HEAD(&bmp->free_node_list, &memnodes[i], links);
115 bmp->available = entries;
117 return (0);
120 static int
121 blockman_init(struct memman *memman, unsigned int id)
123 int status;
124 struct memman_blockman *bmp;
126 bmp = &memman->blockman[id];
127 bmp->initialized = 1;
128 LIST_INIT(&bmp->block_list);
129 LIST_INIT(&bmp->free_node_list);
130 LIST_INIT(&bmp->occupied_node_list);
131 status = manage_block(memman, id, bmp->initial_block,
132 1, MEMMAN_INITIAL_SIZE);
133 return (status);
136 void *
137 memman_alloc(struct memman *memman, unsigned int id)
139 size_t alloc_size;
140 void *chunk, *block;
141 struct memman_blockman *bmp;
142 struct memman_node *memnode;
144 if (memman->max_memid <= id) {
145 printf("memman_alloc: invalid memory type id\n");
146 return (NULL);
148 bmp = &memman->blockman[id];
149 if (!bmp->initialized) {
150 if (blockman_init(memman, id)) {
151 goto malloc_fail;
154 memman->alloc_called++;
156 if (bmp->available == 0) {
157 alloc_size = roundup(bmp->size * MEMMAN_INCR_SIZE,
158 ROUNDUP_UNIT);
159 block = MEMMAN_SYSMALLOC(alloc_size);
160 if (!block) {
161 goto malloc_fail;
163 memman->required_mem += bmp->size * MEMMAN_INCR_SIZE;
164 memman->allocated_mem += alloc_size;
165 memman->salloc_called++;
167 if (manage_block(memman, id, block, 0, MEMMAN_INCR_SIZE)) {
168 goto malloc_fail;
171 memnode = LIST_FIRST(&bmp->free_node_list);
172 LIST_REMOVE(memnode, links);
173 chunk = memnode->node;
174 LIST_INSERT_HEAD(&bmp->occupied_node_list, memnode, links);
175 memnode->memblock->available--;
176 bmp->available--;
178 return (chunk);
180 malloc_fail:
181 printf("memman_alloc: could not allocate memory\n");
182 return (NULL);
185 static void
186 memman_flexsize_add_histogram(struct memman *memman, size_t size,
187 int tolerance)
189 unsigned int i;
190 int gap;
192 if (size == 0) {
193 return;
195 for (i = 0; i < memman->flex_mem_histogram_ptr; i++) {
196 gap = memman->flex_mem_histogram[i].mem_size - size;
197 if (gap >= (tolerance * -1) && gap <= tolerance) {
198 memman->flex_mem_histogram[i].count++;
199 if (memman->flex_mem_histogram[i].mem_size < size) {
200 memman->flex_mem_histogram[i].mem_size = size;
202 return;
206 if (memman->flex_mem_histogram_ptr == MEMMAN_HISTOGRAM_SIZE) {
207 memman_flexsize_add_histogram(memman, size, tolerance + 1);
208 return;
210 i = memman->flex_mem_histogram_ptr;
211 memman->flex_mem_histogram[i].mem_size = size;
212 memman->flex_mem_histogram[i].count = 1;
213 memman->flex_mem_histogram_ptr++;
216 static int
217 memman_comp_histogram_size(const void *a, const void *b)
219 int delta;
221 delta = ((const struct memman_histogram *)a)->mem_size -
222 ((const struct memman_histogram *)b)->mem_size;
223 return (delta);
226 static void
227 memman_sort_histogram_by_size(struct memman *memman)
229 qsort(memman->flex_mem_histogram, memman->flex_mem_histogram_ptr,
230 sizeof(struct memman_histogram), memman_comp_histogram_size);
233 void *
234 memman_alloc_flexsize(struct memman *memman, size_t size)
236 void *mem;
237 struct memman_flexmem_info *info;
239 if (size == 0) {
240 return (NULL);
242 if ((mem = MEMMAN_SYSMALLOC(size)) != NULL) { /* XXX */
244 info = MEMMAN_SYSMALLOC(sizeof(struct memman_flexmem_info));
245 if (info) {
246 if (!memman->flex_mem_initialized) {
247 LIST_INIT(&memman->flexmem_info_list);
248 bzero(memman->flex_mem_histogram,
249 sizeof(struct memman_histogram));
250 memman->flex_mem_initialized = 1;
252 info->addr = mem;
253 info->mem_size = size;
254 LIST_INSERT_HEAD(&memman->flexmem_info_list, info, links);
256 memman->flex_alloc_called++;
257 memman->flex_salloc_called++;
258 memman->flex_required_mem += size;
259 memman->flex_allocated_mem += size;
260 if (memman->flex_mem_size_min == 0 ||
261 memman->flex_mem_size_min > size) {
262 memman->flex_mem_size_min = size;
264 if (memman->flex_mem_size_max < size) {
265 memman->flex_mem_size_max = size;
267 if (memman->flex_peak_mem_usage <
268 (memman->flex_allocated_mem - memman->flex_reclaimed_mem)) {
269 memman->flex_peak_mem_usage =
270 (memman->flex_allocated_mem - memman->flex_reclaimed_mem);
272 memman_flexsize_add_histogram(memman, size,
273 memman->flex_mem_histogram_initial_tolerance);
275 return (mem);
278 static unsigned int
279 memman_guess_memid(struct memman *memman, void *chunk)
281 unsigned int id;
282 struct memman_blockman *bmp;
283 struct memman_node *memnode;
285 for (id = 0; id < memman->max_memid; id++) {
286 bmp = &memman->blockman[id];
287 if (!bmp->initialized) {
288 if (blockman_init(memman, id)) {
289 printf("memman_free: could not initialized\n");
292 LIST_FOREACH(memnode, &bmp->occupied_node_list, links) {
293 if (memnode->node == chunk) {
294 return (id); /* got it! */
298 return (memid_unkown); /* gave up */
301 void
302 memman_free(struct memman *memman, unsigned int memid, void *chunk)
304 unsigned int id;
305 unsigned found;
306 void *block;
307 struct memman_blockman *bmp;
308 struct memman_block *memblock;
309 struct memman_node *memnode;
311 id = memid;
312 if (memid == memid_unkown) {
313 id = memman_guess_memid(memman, chunk);
315 if (memman->max_memid <= id) {
316 printf("memman_free: invalid memory type id\n");
317 MEMMAN_SYSABORT();
318 return;
320 bmp = &memman->blockman[id];
321 if (!bmp->initialized) {
322 if (blockman_init(memman, id)) {
323 printf("memman_free: could not initialized\n");
326 found = 0;
327 LIST_FOREACH(memnode, &bmp->occupied_node_list, links) {
328 if (memnode->node == chunk) {
329 found = 1;
330 break;
333 if (!found) {
334 printf("memman_free: invalid address\n");
335 return;
337 memman->free_called++;
339 LIST_REMOVE(memnode, links);
340 memblock = memnode->memblock;
341 memblock->available++;
342 LIST_INSERT_HEAD(&bmp->free_node_list, memnode, links);
343 bmp->available++;
345 if (!memblock->static_mem &&
346 memblock->available == memblock->allocated) {
347 LIST_FOREACH(memnode, &bmp->free_node_list, links) {
348 if (memnode->memblock != memblock) {
349 continue;
351 LIST_REMOVE(memnode, links);
352 bmp->available--;
354 block = memblock->block;
355 MEMMAN_SYSFREE(block);
356 memman->sfree_called++;
358 LIST_REMOVE(memblock, links);
359 memman->sfree_called++;
360 memman->reclaimed_mem += memblock->allocated_mem;
361 MEMMAN_SYSFREE(memblock);
365 void
366 memman_free_flexsize(struct memman *memman, void *chunk)
368 struct memman_flexmem_info *info;
370 LIST_FOREACH(info, &memman->flexmem_info_list, links) {
371 if (info->addr == chunk) {
372 memman->flex_reclaimed_mem += info->mem_size;
373 LIST_REMOVE(info, links);
374 MEMMAN_SYSFREE(info);
375 break;
378 /* XXX */
379 memman->flex_free_called++;
380 memman->flex_sfree_called++;
381 MEMMAN_SYSFREE(chunk);
384 void
385 memman_freeall(struct memman *memman)
387 unsigned int id;
388 void *chunk;
389 struct memman_blockman *bmp;
390 struct memman_node *memnode;
391 struct memman_block *memblock;
392 struct memman_flexmem_info *info;
394 for (id = 0; id < memman->max_memid; id++) {
395 bmp = &memman->blockman[id];
397 while ((memnode = LIST_FIRST(&bmp->occupied_node_list))) {
398 chunk = memnode->node;
399 printf("memman_freeall: fixed size (id = %u)\n", id);
400 memman_free(memman, id, chunk);
402 while ((memblock = LIST_FIRST(&bmp->block_list))) {
403 LIST_REMOVE(memblock, links);
404 if (!memblock->static_mem) {
405 memman->sfree_called++;
406 memman->reclaimed_mem += memblock->allocated_mem;
407 MEMMAN_SYSFREE(memblock);
410 bmp->initialized = 0;
413 LIST_FOREACH(info, &memman->flexmem_info_list, links) {
414 printf("memman_freeall: flex size (size = %zd, addr = %p)\n",
415 info->mem_size, info->addr);
416 memman_free_flexsize(memman, info->addr);
420 static void
421 memman_statistics_fixedsize(struct memman *memman)
423 printf(" fixed size memory blocks\n");
424 printf(" alloc(): %d times\n", memman->alloc_called);
425 printf(" system malloc(): %d times\n", memman->salloc_called);
426 printf(" free(): %d times\n", memman->free_called);
427 printf(" system free(): %d times\n", memman->sfree_called);
428 printf(" required memory: %zd bytes\n", memman->required_mem);
429 printf(" allocated memory: %zd bytes\n", memman->allocated_mem);
430 printf(" reclaimed memory: %zd bytes\n", memman->reclaimed_mem);
433 static void
434 memman_statistics_flexsize(struct memman *memman)
436 unsigned int i;
438 printf(" flexible size memory blocks\n");
439 printf(" alloc(): %d times\n", memman->flex_alloc_called);
440 printf(" system malloc(): %d times\n", memman->flex_salloc_called);
441 printf(" free(): %d times\n", memman->flex_free_called);
442 printf(" system free(): %d times\n", memman->flex_sfree_called);
443 printf(" required memory: %zd bytes\n", memman->flex_required_mem);
444 printf(" allocated memory: %zd bytes\n", memman->flex_allocated_mem);
445 printf(" reclaimed memory: %zd bytes\n", memman->flex_reclaimed_mem);
446 printf(" peak memory usage: %zd bytes\n", memman->flex_peak_mem_usage);
447 printf(" min memory size: %zd bytes\n", memman->flex_mem_size_min);
448 printf(" max memory size: %zd bytes\n", memman->flex_mem_size_max);
449 printf(" avg memory size: %zd bytes\n",
450 (memman->flex_alloc_called) ?
451 memman->flex_allocated_mem / memman->flex_alloc_called : 0);
453 printf(" memory size histogram (%d entries):\n",
454 memman->flex_mem_histogram_ptr);
455 printf(" size count\n");
456 memman_sort_histogram_by_size(memman);
457 for (i = 0; i < memman->flex_mem_histogram_ptr; i++) {
458 printf(" %zu %d\n",
459 memman->flex_mem_histogram[i].mem_size,
460 memman->flex_mem_histogram[i].count);
464 void
465 memman_statistics(struct memman *memman)
467 printf("memman: reporting statistics\n");
468 memman_statistics_fixedsize(memman);
469 memman_statistics_flexsize(memman);
472 size_t
473 memman_memid2size(struct memman *memman, unsigned int id)
475 if (memman->max_memid <= id) {
476 printf("memman_alloc: invalid memory type id\n");
477 return (0);
479 return (memman->blockman[id].size);