vm: allow split of mem_anon_contig region
[minix3.git] / minix / servers / vm / alloc.c
blob3ba55bdeb14d5e926fffbfe656833055ab611e00
1 /* This file is concerned with allocating and freeing arbitrary-size blocks of
2 * physical memory.
3 */
5 #define _SYSTEM 1
7 #include <minix/com.h>
8 #include <minix/callnr.h>
9 #include <minix/type.h>
10 #include <minix/config.h>
11 #include <minix/const.h>
12 #include <minix/sysutil.h>
13 #include <minix/syslib.h>
14 #include <minix/debug.h>
15 #include <minix/bitmap.h>
17 #include <sys/mman.h>
19 #include <limits.h>
20 #include <string.h>
21 #include <errno.h>
22 #include <assert.h>
23 #include <memory.h>
25 #include "vm.h"
26 #include "proto.h"
27 #include "util.h"
28 #include "glo.h"
29 #include "sanitycheck.h"
30 #include "memlist.h"
32 /* Number of physical pages in a 32-bit address space */
33 #define NUMBER_PHYSICAL_PAGES (int)(0x100000000ULL/VM_PAGE_SIZE)
34 #define PAGE_BITMAP_CHUNKS BITMAP_CHUNKS(NUMBER_PHYSICAL_PAGES)
35 static bitchunk_t free_pages_bitmap[PAGE_BITMAP_CHUNKS];
36 #define PAGE_CACHE_MAX 10000
37 static int free_page_cache[PAGE_CACHE_MAX];
38 static int free_page_cache_size = 0;
40 /* Used for sanity check. */
41 static phys_bytes mem_low, mem_high;
43 static void free_pages(phys_bytes addr, int pages);
44 static phys_bytes alloc_pages(int pages, int flags);
46 #if SANITYCHECKS
47 struct {
48 int used;
49 const char *file;
50 int line;
51 } pagemap[NUMBER_PHYSICAL_PAGES];
52 #endif
54 #define page_isfree(i) GET_BIT(free_pages_bitmap, i)
56 #define RESERVEDMAGIC 0x6e4c74d5
57 #define MAXRESERVEDPAGES 300
58 #define MAXRESERVEDQUEUES 15
60 static struct reserved_pages {
61 struct reserved_pages *next; /* next in use */
62 int max_available; /* queue depth use, 0 if not in use at all */
63 int npages; /* number of consecutive pages */
64 int mappedin; /* must reserved pages also be mapped? */
65 int n_available; /* number of queue entries */
66 int allocflags; /* allocflags for alloc_mem */
67 struct reserved_pageslot {
68 phys_bytes phys;
69 void *vir;
70 } slots[MAXRESERVEDPAGES];
71 u32_t magic;
72 } reservedqueues[MAXRESERVEDQUEUES], *first_reserved_inuse = NULL;
74 int missing_spares = 0;
76 static void sanitycheck_queues(void)
78 struct reserved_pages *mrq;
79 int m = 0;
81 for(mrq = first_reserved_inuse; mrq; mrq = mrq->next) {
82 assert(mrq->max_available > 0);
83 assert(mrq->max_available >= mrq->n_available);
84 m += mrq->max_available - mrq->n_available;
87 assert(m == missing_spares);
90 static void sanitycheck_rq(struct reserved_pages *rq)
92 assert(rq->magic == RESERVEDMAGIC);
93 assert(rq->n_available >= 0);
94 assert(rq->n_available <= MAXRESERVEDPAGES);
95 assert(rq->n_available <= rq->max_available);
97 sanitycheck_queues();
100 void *reservedqueue_new(int max_available, int npages, int mapped, int allocflags)
102 int r;
103 struct reserved_pages *rq;
105 assert(max_available > 0);
106 assert(max_available < MAXRESERVEDPAGES);
107 assert(npages > 0);
108 assert(npages < 10);
110 for(r = 0; r < MAXRESERVEDQUEUES; r++)
111 if(!reservedqueues[r].max_available)
112 break;
114 if(r >= MAXRESERVEDQUEUES) {
115 printf("VM: %d reserved queues in use\n", MAXRESERVEDQUEUES);
116 return NULL;
119 rq = &reservedqueues[r];
121 memset(rq, 0, sizeof(*rq));
122 rq->next = first_reserved_inuse;
123 first_reserved_inuse = rq;
125 rq->max_available = max_available;
126 rq->npages = npages;
127 rq->mappedin = mapped;
128 rq->allocflags = allocflags;
129 rq->magic = RESERVEDMAGIC;
131 missing_spares += max_available;
133 return rq;
136 static void
137 reservedqueue_fillslot(struct reserved_pages *rq,
138 struct reserved_pageslot *rps, phys_bytes ph, void *vir)
140 rps->phys = ph;
141 rps->vir = vir;
142 assert(missing_spares > 0);
143 if(rq->mappedin) assert(vir);
144 missing_spares--;
145 rq->n_available++;
148 static int
149 reservedqueue_addslot(struct reserved_pages *rq)
151 phys_bytes cl, cl_addr;
152 void *vir;
153 struct reserved_pageslot *rps;
155 sanitycheck_rq(rq);
157 if((cl = alloc_mem(rq->npages, rq->allocflags)) == NO_MEM)
158 return ENOMEM;
160 cl_addr = CLICK2ABS(cl);
162 vir = NULL;
164 if(rq->mappedin) {
165 if(!(vir = vm_mappages(cl_addr, rq->npages))) {
166 free_mem(cl, rq->npages);
167 printf("reservedqueue_addslot: vm_mappages failed\n");
168 return ENOMEM;
172 rps = &rq->slots[rq->n_available];
174 reservedqueue_fillslot(rq, rps, cl_addr, vir);
176 return OK;
179 void reservedqueue_add(void *rq_v, void *vir, phys_bytes ph)
181 struct reserved_pages *rq = rq_v;
182 struct reserved_pageslot *rps;
184 sanitycheck_rq(rq);
186 rps = &rq->slots[rq->n_available];
188 reservedqueue_fillslot(rq, rps, ph, vir);
191 static int reservedqueue_fill(void *rq_v)
193 struct reserved_pages *rq = rq_v;
194 int r;
196 sanitycheck_rq(rq);
198 while(rq->n_available < rq->max_available)
199 if((r=reservedqueue_addslot(rq)) != OK)
200 return r;
202 return OK;
206 reservedqueue_alloc(void *rq_v, phys_bytes *ph, void **vir)
208 struct reserved_pages *rq = rq_v;
209 struct reserved_pageslot *rps;
211 sanitycheck_rq(rq);
213 if(rq->n_available < 1) return ENOMEM;
215 rq->n_available--;
216 missing_spares++;
217 rps = &rq->slots[rq->n_available];
219 *ph = rps->phys;
220 *vir = rps->vir;
222 sanitycheck_rq(rq);
224 return OK;
227 void alloc_cycle(void)
229 struct reserved_pages *rq;
230 sanitycheck_queues();
231 for(rq = first_reserved_inuse; rq && missing_spares > 0; rq = rq->next) {
232 sanitycheck_rq(rq);
233 reservedqueue_fill(rq);
234 sanitycheck_rq(rq);
236 sanitycheck_queues();
239 /*===========================================================================*
240 * alloc_mem *
241 *===========================================================================*/
242 phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags)
244 /* Allocate a block of memory from the free list using first fit. The block
245 * consists of a sequence of contiguous bytes, whose length in clicks is
246 * given by 'clicks'. A pointer to the block is returned. The block is
247 * always on a click boundary. This procedure is called when memory is
248 * needed for FORK or EXEC.
250 phys_clicks mem = NO_MEM, align_clicks = 0;
252 if(memflags & PAF_ALIGN64K) {
253 align_clicks = (64 * 1024) / CLICK_SIZE;
254 clicks += align_clicks;
255 } else if(memflags & PAF_ALIGN16K) {
256 align_clicks = (16 * 1024) / CLICK_SIZE;
257 clicks += align_clicks;
260 do {
261 mem = alloc_pages(clicks, memflags);
262 } while(mem == NO_MEM && cache_freepages(clicks) > 0);
264 if(mem == NO_MEM)
265 return mem;
267 if(align_clicks) {
268 phys_clicks o;
269 o = mem % align_clicks;
270 if(o > 0) {
271 phys_clicks e;
272 e = align_clicks - o;
273 free_mem(mem, e);
274 mem += e;
278 return mem;
281 void mem_add_total_pages(int pages)
283 total_pages += pages;
286 /*===========================================================================*
287 * free_mem *
288 *===========================================================================*/
289 void free_mem(phys_clicks base, phys_clicks clicks)
291 /* Return a block of free memory to the hole list. The parameters tell where
292 * the block starts in physical memory and how big it is. The block is added
293 * to the hole list. If it is contiguous with an existing hole on either end,
294 * it is merged with the hole or holes.
296 if (clicks == 0) return;
298 assert(CLICK_SIZE == VM_PAGE_SIZE);
299 free_pages(base, clicks);
300 return;
303 /*===========================================================================*
304 * mem_init *
305 *===========================================================================*/
306 void mem_init(struct memory *chunks)
308 /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
309 * list of all the holes (unused memory) in the system; 'free_slots' points to
310 * a linked list of table entries that are not in use. Initially, the former
311 * list has one entry for each chunk of physical memory, and the second
312 * list links together the remaining table slots. As memory becomes more
313 * fragmented in the course of time (i.e., the initial big holes break up into
314 * smaller holes), new table slots are needed to represent them. These slots
315 * are taken from the list headed by 'free_slots'.
317 int i, first = 0;
319 total_pages = 0;
321 memset(free_pages_bitmap, 0, sizeof(free_pages_bitmap));
323 /* Use the chunks of physical memory to allocate holes. */
324 for (i=NR_MEMS-1; i>=0; i--) {
325 if (chunks[i].size > 0) {
326 phys_bytes from = CLICK2ABS(chunks[i].base),
327 to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
328 if(first || from < mem_low) mem_low = from;
329 if(first || to > mem_high) mem_high = to;
330 free_mem(chunks[i].base, chunks[i].size);
331 total_pages += chunks[i].size;
332 first = 0;
337 #if SANITYCHECKS
338 void mem_sanitycheck(const char *file, int line)
340 int i;
341 for(i = 0; i < NUMBER_PHYSICAL_PAGES; i++) {
342 if(!page_isfree(i)) continue;
343 MYASSERT(usedpages_add(i * VM_PAGE_SIZE, VM_PAGE_SIZE) == OK);
346 #endif
348 void memstats(int *nodes, int *pages, int *largest)
350 int i;
351 *nodes = 0;
352 *pages = 0;
353 *largest = 0;
355 for(i = 0; i < NUMBER_PHYSICAL_PAGES; i++) {
356 int size = 0;
357 while(i < NUMBER_PHYSICAL_PAGES && page_isfree(i)) {
358 size++;
359 i++;
361 if(size == 0) continue;
362 (*nodes)++;
363 (*pages)+= size;
364 if(size > *largest)
365 *largest = size;
369 static int findbit(int low, int startscan, int pages, int memflags, int *len)
371 int run_length = 0, i;
372 int freerange_start = startscan;
374 for(i = startscan; i >= low; i--) {
375 if(!page_isfree(i)) {
376 int pi;
377 int chunk = i/BITCHUNK_BITS, moved = 0;
378 run_length = 0;
379 pi = i;
380 while(chunk > 0 &&
381 !MAP_CHUNK(free_pages_bitmap, chunk*BITCHUNK_BITS)) {
382 chunk--;
383 moved = 1;
385 if(moved) { i = chunk * BITCHUNK_BITS + BITCHUNK_BITS; }
386 continue;
388 if(!run_length) { freerange_start = i; run_length = 1; }
389 else { freerange_start--; run_length++; }
390 assert(run_length <= pages);
391 if(run_length == pages) {
392 /* good block found! */
393 *len = run_length;
394 return freerange_start;
398 return NO_MEM;
401 /*===========================================================================*
402 * alloc_pages *
403 *===========================================================================*/
404 static phys_bytes alloc_pages(int pages, int memflags)
406 phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
407 phys_bytes boundary1 = 1 * 1024 * 1024 / VM_PAGE_SIZE;
408 phys_bytes mem = NO_MEM, i; /* page number */
409 int maxpage = NUMBER_PHYSICAL_PAGES - 1;
410 static int lastscan = -1;
411 int startscan, run_length;
413 if(memflags & PAF_LOWER16MB)
414 maxpage = boundary16 - 1;
415 else if(memflags & PAF_LOWER1MB)
416 maxpage = boundary1 - 1;
417 else {
418 /* no position restrictions: check page cache */
419 if(pages == 1) {
420 while(free_page_cache_size > 0) {
421 i = free_page_cache[free_page_cache_size-1];
422 if(page_isfree(i)) {
423 free_page_cache_size--;
424 mem = i;
425 assert(mem != NO_MEM);
426 run_length = 1;
427 break;
429 free_page_cache_size--;
434 if(lastscan < maxpage && lastscan >= 0)
435 startscan = lastscan;
436 else startscan = maxpage;
438 if(mem == NO_MEM)
439 mem = findbit(0, startscan, pages, memflags, &run_length);
440 if(mem == NO_MEM)
441 mem = findbit(0, maxpage, pages, memflags, &run_length);
442 if(mem == NO_MEM)
443 return NO_MEM;
445 /* remember for next time */
446 lastscan = mem;
448 for(i = mem; i < mem + pages; i++) {
449 UNSET_BIT(free_pages_bitmap, i);
452 if(memflags & PAF_CLEAR) {
453 int s;
454 if ((s= sys_memset(NONE, 0, CLICK_SIZE*mem,
455 VM_PAGE_SIZE*pages)) != OK)
456 panic("alloc_mem: sys_memset failed: %d", s);
459 return mem;
462 /*===========================================================================*
463 * free_pages *
464 *===========================================================================*/
465 static void free_pages(phys_bytes pageno, int npages)
467 int i, lim = pageno + npages - 1;
469 #if JUNKFREE
470 if(sys_memset(NONE, 0xa5a5a5a5, VM_PAGE_SIZE * pageno,
471 VM_PAGE_SIZE * npages) != OK)
472 panic("free_pages: sys_memset failed");
473 #endif
475 for(i = pageno; i <= lim; i++) {
476 SET_BIT(free_pages_bitmap, i);
477 if(free_page_cache_size < PAGE_CACHE_MAX) {
478 free_page_cache[free_page_cache_size++] = i;
483 /*===========================================================================*
484 * printmemstats *
485 *===========================================================================*/
486 void printmemstats(void)
488 int nodes, pages, largest;
489 memstats(&nodes, &pages, &largest);
490 printf("%d blocks, %d pages (%lukB) free, largest %d pages (%lukB)\n",
491 nodes, pages, (unsigned long) pages * (VM_PAGE_SIZE/1024),
492 largest, (unsigned long) largest * (VM_PAGE_SIZE/1024));
496 #if SANITYCHECKS
498 /*===========================================================================*
499 * usedpages_reset *
500 *===========================================================================*/
501 void usedpages_reset(void)
503 memset(pagemap, 0, sizeof(pagemap));
506 /*===========================================================================*
507 * usedpages_add *
508 *===========================================================================*/
509 int usedpages_add_f(phys_bytes addr, phys_bytes len, const char *file, int line)
511 u32_t pagestart, pages;
513 if(!incheck)
514 return OK;
516 assert(!(addr % VM_PAGE_SIZE));
517 assert(!(len % VM_PAGE_SIZE));
518 assert(len > 0);
520 pagestart = addr / VM_PAGE_SIZE;
521 pages = len / VM_PAGE_SIZE;
523 while(pages > 0) {
524 phys_bytes thisaddr;
525 assert(pagestart > 0);
526 assert(pagestart < NUMBER_PHYSICAL_PAGES);
527 thisaddr = pagestart * VM_PAGE_SIZE;
528 assert(pagestart < NUMBER_PHYSICAL_PAGES);
529 if(pagemap[pagestart].used) {
530 static int warnings = 0;
531 if(warnings++ < 100)
532 printf("%s:%d: usedpages_add: addr 0x%lx reused, first %s:%d\n",
533 file, line, thisaddr, pagemap[pagestart].file, pagemap[pagestart].line);
534 util_stacktrace();
535 return EFAULT;
537 pagemap[pagestart].used = 1;
538 pagemap[pagestart].file = file;
539 pagemap[pagestart].line = line;
540 pages--;
541 pagestart++;
544 return OK;
547 #endif