VM: only single page chunks
[minix.git] / servers / vm / alloc.c
blob5d9e6e49784c95a3153909f8bbd8401ca781feb7
1 /* This file is concerned with allocating and freeing arbitrary-size blocks of
2 * physical memory.
3 */
5 #define _SYSTEM 1
7 #include <minix/com.h>
8 #include <minix/callnr.h>
9 #include <minix/type.h>
10 #include <minix/config.h>
11 #include <minix/const.h>
12 #include <minix/sysutil.h>
13 #include <minix/syslib.h>
14 #include <minix/debug.h>
15 #include <minix/bitmap.h>
17 #include <sys/mman.h>
19 #include <limits.h>
20 #include <string.h>
21 #include <errno.h>
22 #include <assert.h>
23 #include <memory.h>
25 #include "vm.h"
26 #include "proto.h"
27 #include "util.h"
28 #include "glo.h"
29 #include "sanitycheck.h"
30 #include "memlist.h"
32 /* Number of physical pages in a 32-bit address space */
33 #define NUMBER_PHYSICAL_PAGES (0x100000000ULL/VM_PAGE_SIZE)
34 #define PAGE_BITMAP_CHUNKS BITMAP_CHUNKS(NUMBER_PHYSICAL_PAGES)
35 static bitchunk_t free_pages_bitmap[PAGE_BITMAP_CHUNKS];
36 #define PAGE_CACHE_MAX 10000
37 static int free_page_cache[PAGE_CACHE_MAX];
38 static int free_page_cache_size = 0;
40 /* Used for sanity check. */
41 static phys_bytes mem_low, mem_high;
43 static void free_pages(phys_bytes addr, int pages);
44 static phys_bytes alloc_pages(int pages, int flags);
46 #if SANITYCHECKS
47 struct {
48 int used;
49 char *file;
50 int line;
51 } pagemap[NUMBER_PHYSICAL_PAGES];
52 #endif
54 #define page_isfree(i) GET_BIT(free_pages_bitmap, i)
56 /*===========================================================================*
57 * alloc_mem *
58 *===========================================================================*/
59 phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags)
61 /* Allocate a block of memory from the free list using first fit. The block
62 * consists of a sequence of contiguous bytes, whose length in clicks is
63 * given by 'clicks'. A pointer to the block is returned. The block is
64 * always on a click boundary. This procedure is called when memory is
65 * needed for FORK or EXEC.
67 phys_clicks mem = NO_MEM, align_clicks = 0;
69 if(memflags & PAF_ALIGN64K) {
70 align_clicks = (64 * 1024) / CLICK_SIZE;
71 clicks += align_clicks;
72 } else if(memflags & PAF_ALIGN16K) {
73 align_clicks = (16 * 1024) / CLICK_SIZE;
74 clicks += align_clicks;
77 mem = alloc_pages(clicks, memflags);
78 if(mem == NO_MEM) {
79 free_yielded(clicks * CLICK_SIZE);
80 mem = alloc_pages(clicks, memflags);
83 if(mem == NO_MEM)
84 return mem;
86 if(align_clicks) {
87 phys_clicks o;
88 o = mem % align_clicks;
89 if(o > 0) {
90 phys_clicks e;
91 e = align_clicks - o;
92 free_mem(mem, e);
93 mem += e;
97 return mem;
100 /*===========================================================================*
101 * free_mem *
102 *===========================================================================*/
103 void free_mem(phys_clicks base, phys_clicks clicks)
105 /* Return a block of free memory to the hole list. The parameters tell where
106 * the block starts in physical memory and how big it is. The block is added
107 * to the hole list. If it is contiguous with an existing hole on either end,
108 * it is merged with the hole or holes.
110 if (clicks == 0) return;
112 assert(CLICK_SIZE == VM_PAGE_SIZE);
113 free_pages(base, clicks);
114 return;
117 /*===========================================================================*
118 * mem_init *
119 *===========================================================================*/
120 void mem_init(chunks)
121 struct memory *chunks; /* list of free memory chunks */
123 /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
124 * list of all the holes (unused memory) in the system; 'free_slots' points to
125 * a linked list of table entries that are not in use. Initially, the former
126 * list has one entry for each chunk of physical memory, and the second
127 * list links together the remaining table slots. As memory becomes more
128 * fragmented in the course of time (i.e., the initial big holes break up into
129 * smaller holes), new table slots are needed to represent them. These slots
130 * are taken from the list headed by 'free_slots'.
132 int i, first = 0;
134 total_pages = 0;
136 memset(free_pages_bitmap, 0, sizeof(free_pages_bitmap));
138 /* Use the chunks of physical memory to allocate holes. */
139 for (i=NR_MEMS-1; i>=0; i--) {
140 if (chunks[i].size > 0) {
141 phys_bytes from = CLICK2ABS(chunks[i].base),
142 to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
143 if(first || from < mem_low) mem_low = from;
144 if(first || to > mem_high) mem_high = to;
145 free_mem(chunks[i].base, chunks[i].size);
146 total_pages += chunks[i].size;
147 first = 0;
152 #if SANITYCHECKS
153 void mem_sanitycheck(char *file, int line)
155 int i;
156 for(i = 0; i < NUMBER_PHYSICAL_PAGES; i++) {
157 if(!page_isfree(i)) continue;
158 MYASSERT(usedpages_add(i * VM_PAGE_SIZE, VM_PAGE_SIZE) == OK);
161 #endif
163 void memstats(int *nodes, int *pages, int *largest)
165 int i;
166 *nodes = 0;
167 *pages = 0;
168 *largest = 0;
170 for(i = 0; i < NUMBER_PHYSICAL_PAGES; i++) {
171 int size = 0;
172 while(i < NUMBER_PHYSICAL_PAGES && page_isfree(i)) {
173 size++;
174 i++;
176 if(size == 0) continue;
177 (*nodes)++;
178 (*pages)+= size;
179 if(size > *largest)
180 *largest = size;
184 static int findbit(int low, int startscan, int pages, int memflags, int *len)
186 int run_length = 0, i, freerange_start;
188 for(i = startscan; i >= low; i--) {
189 if(!page_isfree(i)) {
190 int pi;
191 int chunk = i/BITCHUNK_BITS, moved = 0;
192 run_length = 0;
193 pi = i;
194 while(chunk > 0 &&
195 !MAP_CHUNK(free_pages_bitmap, chunk*BITCHUNK_BITS)) {
196 chunk--;
197 moved = 1;
199 if(moved) { i = chunk * BITCHUNK_BITS + BITCHUNK_BITS; }
200 continue;
202 if(!run_length) { freerange_start = i; run_length = 1; }
203 else { freerange_start--; run_length++; }
204 assert(run_length <= pages);
205 if(run_length == pages) {
206 /* good block found! */
207 *len = run_length;
208 return freerange_start;
212 return NO_MEM;
215 /*===========================================================================*
216 * alloc_pages *
217 *===========================================================================*/
218 static phys_bytes alloc_pages(int pages, int memflags)
220 phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
221 phys_bytes boundary1 = 1 * 1024 * 1024 / VM_PAGE_SIZE;
222 phys_bytes mem = NO_MEM;
223 int maxpage = NUMBER_PHYSICAL_PAGES - 1, i;
224 static int lastscan = -1;
225 int startscan, run_length;
227 if(memflags & PAF_LOWER16MB)
228 maxpage = boundary16 - 1;
229 else if(memflags & PAF_LOWER1MB)
230 maxpage = boundary1 - 1;
231 else {
232 /* no position restrictions: check page cache */
233 if(pages == 1) {
234 while(free_page_cache_size > 0) {
235 i = free_page_cache[free_page_cache_size-1];
236 if(page_isfree(i)) {
237 free_page_cache_size--;
238 mem = i;
239 assert(mem != NO_MEM);
240 run_length = 1;
241 break;
243 free_page_cache_size--;
248 if(lastscan < maxpage && lastscan >= 0)
249 startscan = lastscan;
250 else startscan = maxpage;
252 if(mem == NO_MEM)
253 mem = findbit(0, startscan, pages, memflags, &run_length);
254 if(mem == NO_MEM)
255 mem = findbit(0, maxpage, pages, memflags, &run_length);
256 if(mem == NO_MEM)
257 return NO_MEM;
259 /* remember for next time */
260 lastscan = mem;
262 for(i = mem; i < mem + pages; i++) {
263 UNSET_BIT(free_pages_bitmap, i);
266 if(memflags & PAF_CLEAR) {
267 int s;
268 if ((s= sys_memset(NONE, 0, CLICK_SIZE*mem,
269 VM_PAGE_SIZE*pages)) != OK)
270 panic("alloc_mem: sys_memset failed: %d", s);
273 return mem;
276 /*===========================================================================*
277 * free_pages *
278 *===========================================================================*/
279 static void free_pages(phys_bytes pageno, int npages)
281 int i, lim = pageno + npages - 1;
283 #if JUNKFREE
284 if(sys_memset(NONE, 0xa5a5a5a5, VM_PAGE_SIZE * pageno,
285 VM_PAGE_SIZE * npages) != OK)
286 panic("free_pages: sys_memset failed");
287 #endif
289 for(i = pageno; i <= lim; i++) {
290 SET_BIT(free_pages_bitmap, i);
291 if(free_page_cache_size < PAGE_CACHE_MAX) {
292 free_page_cache[free_page_cache_size++] = i;
297 /*===========================================================================*
298 * printmemstats *
299 *===========================================================================*/
300 void printmemstats(void)
302 int nodes, pages, largest;
303 memstats(&nodes, &pages, &largest);
304 printf("%d blocks, %d pages (%lukB) free, largest %d pages (%lukB)\n",
305 nodes, pages, (unsigned long) pages * (VM_PAGE_SIZE/1024),
306 largest, (unsigned long) largest * (VM_PAGE_SIZE/1024));
310 #if SANITYCHECKS
312 /*===========================================================================*
313 * usedpages_reset *
314 *===========================================================================*/
315 void usedpages_reset(void)
317 memset(pagemap, 0, sizeof(pagemap));
320 /*===========================================================================*
321 * usedpages_add *
322 *===========================================================================*/
323 int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
325 u32_t pagestart, pages;
327 if(!incheck)
328 return OK;
330 assert(!(addr % VM_PAGE_SIZE));
331 assert(!(len % VM_PAGE_SIZE));
332 assert(len > 0);
334 pagestart = addr / VM_PAGE_SIZE;
335 pages = len / VM_PAGE_SIZE;
337 while(pages > 0) {
338 phys_bytes thisaddr;
339 assert(pagestart > 0);
340 assert(pagestart < NUMBER_PHYSICAL_PAGES);
341 thisaddr = pagestart * VM_PAGE_SIZE;
342 assert(pagestart >= 0);
343 assert(pagestart < NUMBER_PHYSICAL_PAGES);
344 if(pagemap[pagestart].used) {
345 static int warnings = 0;
346 if(warnings++ < 100)
347 printf("%s:%d: usedpages_add: addr 0x%lx reused, first %s:%d\n",
348 file, line, thisaddr, pagemap[pagestart].file, pagemap[pagestart].line);
349 util_stacktrace();
350 return EFAULT;
352 pagemap[pagestart].used = 1;
353 pagemap[pagestart].file = file;
354 pagemap[pagestart].line = line;
355 pages--;
356 pagestart++;
359 return OK;
362 #endif
364 /*===========================================================================*
365 * alloc_mem_in_list *
366 *===========================================================================*/
367 struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags, phys_bytes known)
369 phys_bytes rempages, phys_count;
370 struct memlist *head = NULL, *tail = NULL;
372 assert(bytes > 0);
373 assert(!(bytes % VM_PAGE_SIZE));
375 rempages = bytes / VM_PAGE_SIZE;
377 assert(!(flags & PAF_CONTIG));
379 if(known != MAP_NONE)
380 phys_count = known;
382 do {
383 struct memlist *ml;
384 phys_bytes mem;
385 vir_bytes freed = 0;
387 do {
388 if(known == MAP_NONE) {
389 mem = alloc_pages(1, flags);
391 if(mem == NO_MEM) {
392 freed = free_yielded(rempages * VM_PAGE_SIZE);
394 assert(mem != MAP_NONE);
395 } else {
396 mem = ABS2CLICK(phys_count);
397 phys_count += VM_PAGE_SIZE;
398 assert(mem != MAP_NONE);
399 assert(mem != NO_MEM);
401 assert(mem != MAP_NONE);
402 } while(mem == NO_MEM && freed > 0);
404 if(mem == NO_MEM) {
405 printf("alloc_mem_in_list: giving up, %lukB missing\n",
406 rempages * VM_PAGE_SIZE/1024);
407 printmemstats();
408 free_mem_list(head, 1);
409 return NULL;
412 if(!(SLABALLOC(ml))) {
413 free_mem_list(head, 1);
414 free_pages(mem, VM_PAGE_SIZE);
415 return NULL;
418 USE(ml, ml->phys = CLICK2ABS(mem); ml->next = NULL;);
419 if(tail) {
420 USE(tail,
421 tail->next = ml;);
423 tail = ml;
424 if(!head)
425 head = ml;
426 rempages--;
427 } while(rempages > 0);
430 struct memlist *ml;
431 for(ml = head; ml; ml = ml->next) {
432 assert(ml->phys);
433 #if NONCONTIGUOUS
434 if(!(flags & PAF_CONTIG)) {
435 if(ml->next)
436 assert(ml->phys + ml->length != ml->next->phys);
438 #endif
442 return head;
445 /*===========================================================================*
446 * free_mem_list *
447 *===========================================================================*/
448 void free_mem_list(struct memlist *list, int all)
450 while(list) {
451 struct memlist *next;
452 next = list->next;
453 assert(!(list->phys % VM_PAGE_SIZE));
454 if(all)
455 free_pages(list->phys / VM_PAGE_SIZE, 1);
456 SLABFREE(list);
457 list = next;
461 /*===========================================================================*
462 * print_mem_list *
463 *===========================================================================*/
464 void print_mem_list(struct memlist *list)
466 while(list) {
467 printf("0x%lx-0x%lx", list->phys, list->phys+VM_PAGE_SIZE-1);
468 printf(" ");
469 list = list->next;
471 printf("\n");