secondary cache feature in vm.
[minix.git] / servers / vm / alloc.c
blob2aa513973861547a2397c625f8180d819edbcc8c
1 /* This file is concerned with allocating and freeing arbitrary-size blocks of
2 * physical memory on behalf of the FORK and EXEC system calls. The key data
3 * structure used is the hole table, which maintains a list of holes in memory.
4 * It is kept sorted in order of increasing memory address. The addresses
5 * it contains refers to physical memory, starting at absolute address 0
6 * (i.e., they are not relative to the start of PM). During system
7 * initialization, that part of memory containing the interrupt vectors,
8 * kernel, and PM are "allocated" to mark them as not available and to
9 * remove them from the hole list.
11 * The entry points into this file are:
12 * alloc_mem: allocate a given sized chunk of memory
13 * free_mem: release a previously allocated chunk of memory
14 * mem_init: initialize the tables when PM start up
17 #define _SYSTEM 1
19 #include <minix/com.h>
20 #include <minix/callnr.h>
21 #include <minix/type.h>
22 #include <minix/config.h>
23 #include <minix/const.h>
24 #include <minix/sysutil.h>
25 #include <minix/syslib.h>
26 #include <minix/debug.h>
27 #include <minix/bitmap.h>
29 #include <sys/mman.h>
31 #include <limits.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <assert.h>
35 #include <memory.h>
37 #include "vm.h"
38 #include "proto.h"
39 #include "util.h"
40 #include "glo.h"
41 #include "pagerange.h"
42 #include "addravl.h"
43 #include "sanitycheck.h"
44 #include "memlist.h"
46 /* AVL tree of free pages. */
47 addr_avl addravl;
49 /* Used for sanity check. */
50 PRIVATE phys_bytes mem_low, mem_high;
51 #define assert_range(addr, len) \
52 assert((addr) >= mem_low); \
53 assert((addr) + (len) - 1 <= mem_high);
55 struct hole {
56 struct hole *h_next; /* pointer to next entry on the list */
57 phys_clicks h_base; /* where does the hole begin? */
58 phys_clicks h_len; /* how big is the hole? */
59 int freelist;
60 int holelist;
63 static int startpages;
65 #define NIL_HOLE (struct hole *) 0
67 #define _NR_HOLES (_NR_PROCS*2) /* No. of memory holes maintained by VM */
69 PRIVATE struct hole hole[_NR_HOLES];
71 PRIVATE struct hole *hole_head; /* pointer to first hole */
72 PRIVATE struct hole *free_slots;/* ptr to list of unused table slots */
74 FORWARD _PROTOTYPE( void del_slot, (struct hole *prev_ptr, struct hole *hp) );
75 FORWARD _PROTOTYPE( void merge, (struct hole *hp) );
76 FORWARD _PROTOTYPE( void free_pages, (phys_bytes addr, int pages) );
77 FORWARD _PROTOTYPE( phys_bytes alloc_pages, (int pages, int flags,
78 phys_bytes *ret));
80 #if SANITYCHECKS
81 FORWARD _PROTOTYPE( void holes_sanity_f, (char *fn, int line) );
82 #define CHECKHOLES holes_sanity_f(__FILE__, __LINE__)
84 #define PAGESPERGB (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */
85 #define MAXPAGES (2*PAGESPERGB)
86 #define CHUNKS BITMAP_CHUNKS(MAXPAGES)
87 PRIVATE bitchunk_t pagemap[CHUNKS];
89 #else
90 #define CHECKHOLES
91 #endif
93 #if SANITYCHECKS
95 /*===========================================================================*
96 * holes_sanity_f *
97 *===========================================================================*/
98 PRIVATE void holes_sanity_f(file, line)
99 char *file;
100 int line;
102 #define myassert(c) { \
103 if(!(c)) { \
104 printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
105 util_stacktrace(); \
106 panic("assert failed"); } \
109 int h, c = 0, n = 0;
110 struct hole *hp;
112 /* Reset flags */
113 for(h = 0; h < _NR_HOLES; h++) {
114 hole[h].freelist = 0;
115 hole[h].holelist = 0;
118 /* Mark all holes on freelist. */
119 for(hp = free_slots; hp; hp = hp->h_next) {
120 myassert(!hp->freelist);
121 myassert(!hp->holelist);
122 hp->freelist = 1;
123 myassert(c < _NR_HOLES);
124 c++;
125 n++;
128 /* Mark all holes on holelist. */
129 c = 0;
130 for(hp = hole_head; hp; hp = hp->h_next) {
131 myassert(!hp->freelist);
132 myassert(!hp->holelist);
133 hp->holelist = 1;
134 myassert(c < _NR_HOLES);
135 c++;
136 n++;
139 /* Check there are exactly the right number of nodes. */
140 myassert(n == _NR_HOLES);
142 /* Make sure each slot is on exactly one of the list. */
143 c = 0;
144 for(h = 0; h < _NR_HOLES; h++) {
145 hp = &hole[h];
146 myassert(hp->holelist || hp->freelist);
147 myassert(!(hp->holelist && hp->freelist));
148 myassert(c < _NR_HOLES);
149 c++;
152 /* Make sure no holes overlap. */
153 for(hp = hole_head; hp && hp->h_next; hp = hp->h_next) {
154 myassert(hp->holelist);
155 hp->holelist = 1;
156 /* No holes overlap. */
157 myassert(hp->h_base + hp->h_len <= hp->h_next->h_base);
159 /* No uncoalesced holes. */
160 myassert(hp->h_base + hp->h_len < hp->h_next->h_base);
163 #endif
165 /*===========================================================================*
166 * alloc_mem *
167 *===========================================================================*/
168 PUBLIC phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags)
170 /* Allocate a block of memory from the free list using first fit. The block
171 * consists of a sequence of contiguous bytes, whose length in clicks is
172 * given by 'clicks'. A pointer to the block is returned. The block is
173 * always on a click boundary. This procedure is called when memory is
174 * needed for FORK or EXEC.
176 register struct hole *hp, *prev_ptr;
177 phys_clicks old_base, mem = NO_MEM, align_clicks = 0;
178 int s;
180 if(memflags & PAF_ALIGN64K) {
181 align_clicks = (64 * 1024) / CLICK_SIZE;
182 clicks += align_clicks;
185 if(vm_paged) {
186 mem = alloc_pages(clicks, memflags, NULL);
187 if(mem == NO_MEM) {
188 free_yielded(clicks * CLICK_SIZE);
189 mem = alloc_pages(clicks, memflags, NULL);
191 } else {
192 CHECKHOLES;
193 prev_ptr = NIL_HOLE;
194 hp = hole_head;
195 while (hp != NIL_HOLE) {
196 if (hp->h_len >= clicks) {
197 /* We found a hole that is big enough. Use it. */
198 old_base = hp->h_base; /* remember where it started */
199 hp->h_base += clicks; /* bite a piece off */
200 hp->h_len -= clicks; /* ditto */
202 /* Delete the hole if used up completely. */
203 if (hp->h_len == 0) del_slot(prev_ptr, hp);
205 /* Anything special needs to happen? */
206 if(memflags & PAF_CLEAR) {
207 if ((s= sys_memset(0, CLICK_SIZE*old_base,
208 CLICK_SIZE*clicks)) != OK) {
209 panic("alloc_mem: sys_memset failed: %d", s);
213 /* Return the start address of the acquired block. */
214 CHECKHOLES;
215 mem = old_base;
216 break;
219 prev_ptr = hp;
220 hp = hp->h_next;
224 if(mem == NO_MEM)
225 return mem;
227 CHECKHOLES;
229 if(align_clicks) {
230 phys_clicks o;
231 o = mem % align_clicks;
232 if(o > 0) {
233 phys_clicks e;
234 e = align_clicks - o;
235 free_mem(mem, e);
236 mem += e;
239 CHECKHOLES;
241 return mem;
244 /*===========================================================================*
245 * free_mem *
246 *===========================================================================*/
247 PUBLIC void free_mem(phys_clicks base, phys_clicks clicks)
249 /* Return a block of free memory to the hole list. The parameters tell where
250 * the block starts in physical memory and how big it is. The block is added
251 * to the hole list. If it is contiguous with an existing hole on either end,
252 * it is merged with the hole or holes.
254 register struct hole *hp, *new_ptr, *prev_ptr;
255 CHECKHOLES;
257 if (clicks == 0) return;
259 if(vm_paged) {
260 assert(CLICK_SIZE == VM_PAGE_SIZE);
261 free_pages(base, clicks);
262 return;
265 if ( (new_ptr = free_slots) == NIL_HOLE)
266 panic("hole table full");
267 new_ptr->h_base = base;
268 new_ptr->h_len = clicks;
269 free_slots = new_ptr->h_next;
270 hp = hole_head;
272 /* If this block's address is numerically less than the lowest hole currently
273 * available, or if no holes are currently available, put this hole on the
274 * front of the hole list.
276 if (hp == NIL_HOLE || base <= hp->h_base) {
277 /* Block to be freed goes on front of the hole list. */
278 new_ptr->h_next = hp;
279 hole_head = new_ptr;
280 merge(new_ptr);
281 CHECKHOLES;
282 return;
285 /* Block to be returned does not go on front of hole list. */
286 prev_ptr = NIL_HOLE;
287 while (hp != NIL_HOLE && base > hp->h_base) {
288 prev_ptr = hp;
289 hp = hp->h_next;
292 /* We found where it goes. Insert block after 'prev_ptr'. */
293 new_ptr->h_next = prev_ptr->h_next;
294 prev_ptr->h_next = new_ptr;
295 merge(prev_ptr); /* sequence is 'prev_ptr', 'new_ptr', 'hp' */
296 CHECKHOLES;
299 /*===========================================================================*
300 * del_slot *
301 *===========================================================================*/
302 PRIVATE void del_slot(prev_ptr, hp)
303 /* pointer to hole entry just ahead of 'hp' */
304 register struct hole *prev_ptr;
305 /* pointer to hole entry to be removed */
306 register struct hole *hp;
308 /* Remove an entry from the hole list. This procedure is called when a
309 * request to allocate memory removes a hole in its entirety, thus reducing
310 * the numbers of holes in memory, and requiring the elimination of one
311 * entry in the hole list.
313 if (hp == hole_head)
314 hole_head = hp->h_next;
315 else
316 prev_ptr->h_next = hp->h_next;
318 hp->h_next = free_slots;
319 hp->h_base = hp->h_len = 0;
320 free_slots = hp;
323 /*===========================================================================*
324 * merge *
325 *===========================================================================*/
326 PRIVATE void merge(hp)
327 register struct hole *hp; /* ptr to hole to merge with its successors */
329 /* Check for contiguous holes and merge any found. Contiguous holes can occur
330 * when a block of memory is freed, and it happens to abut another hole on
331 * either or both ends. The pointer 'hp' points to the first of a series of
332 * three holes that can potentially all be merged together.
334 register struct hole *next_ptr;
336 /* If 'hp' points to the last hole, no merging is possible. If it does not,
337 * try to absorb its successor into it and free the successor's table entry.
339 if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
340 if (hp->h_base + hp->h_len == next_ptr->h_base) {
341 hp->h_len += next_ptr->h_len; /* first one gets second one's mem */
342 del_slot(hp, next_ptr);
343 } else {
344 hp = next_ptr;
347 /* If 'hp' now points to the last hole, return; otherwise, try to absorb its
348 * successor into it.
350 if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
351 if (hp->h_base + hp->h_len == next_ptr->h_base) {
352 hp->h_len += next_ptr->h_len;
353 del_slot(hp, next_ptr);
357 /*===========================================================================*
358 * mem_init *
359 *===========================================================================*/
360 PUBLIC void mem_init(chunks)
361 struct memory *chunks; /* list of free memory chunks */
363 /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
364 * list of all the holes (unused memory) in the system; 'free_slots' points to
365 * a linked list of table entries that are not in use. Initially, the former
366 * list has one entry for each chunk of physical memory, and the second
367 * list links together the remaining table slots. As memory becomes more
368 * fragmented in the course of time (i.e., the initial big holes break up into
369 * smaller holes), new table slots are needed to represent them. These slots
370 * are taken from the list headed by 'free_slots'.
372 int i, first = 0;
373 register struct hole *hp;
374 int nodes, largest;
376 /* Put all holes on the free list. */
377 for (hp = &hole[0]; hp < &hole[_NR_HOLES]; hp++) {
378 hp->h_next = hp + 1;
379 hp->h_base = hp->h_len = 0;
381 hole[_NR_HOLES-1].h_next = NIL_HOLE;
382 hole_head = NIL_HOLE;
383 free_slots = &hole[0];
385 addr_init(&addravl);
387 total_pages = 0;
389 /* Use the chunks of physical memory to allocate holes. */
390 for (i=NR_MEMS-1; i>=0; i--) {
391 if (chunks[i].size > 0) {
392 phys_bytes from = CLICK2ABS(chunks[i].base),
393 to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
394 if(first || from < mem_low) mem_low = from;
395 if(first || to > mem_high) mem_high = to;
396 free_mem(chunks[i].base, chunks[i].size);
397 total_pages += chunks[i].size;
398 first = 0;
402 CHECKHOLES;
405 #if SANITYCHECKS
406 PRIVATE void sanitycheck(void)
408 pagerange_t *p, *prevp = NULL;
409 addr_iter iter;
410 addr_start_iter_least(&addravl, &iter);
411 while((p=addr_get_iter(&iter))) {
412 SLABSANE(p);
413 assert(p->size > 0);
414 if(prevp) {
415 assert(prevp->addr < p->addr);
416 assert(prevp->addr + p->addr < p->addr);
418 addr_incr_iter(&iter);
421 #endif
423 PUBLIC void memstats(int *nodes, int *pages, int *largest)
425 pagerange_t *p, *prevp = NULL;
426 addr_iter iter;
427 addr_start_iter_least(&addravl, &iter);
428 *nodes = 0;
429 *pages = 0;
430 *largest = 0;
431 #if SANITYCHECKS
432 sanitycheck();
433 #endif
434 while((p=addr_get_iter(&iter))) {
435 SLABSANE(p);
436 (*nodes)++;
437 (*pages)+= p->size;
438 if(p->size > *largest)
439 *largest = p->size;
440 addr_incr_iter(&iter);
444 /*===========================================================================*
445 * alloc_pages *
446 *===========================================================================*/
447 PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
449 addr_iter iter;
450 pagerange_t *pr;
451 int incr;
452 phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
453 phys_bytes boundary1 = 1 * 1024 * 1024 / VM_PAGE_SIZE;
454 phys_bytes mem;
455 #if SANITYCHECKS
456 int firstnodes, firstpages, wantnodes, wantpages;
457 int finalnodes, finalpages;
458 int largest;
460 memstats(&firstnodes, &firstpages, &largest);
461 sanitycheck();
462 wantnodes = firstnodes;
463 wantpages = firstpages - pages;
464 #endif
466 if(memflags & (PAF_LOWER16MB|PAF_LOWER1MB)) {
467 addr_start_iter_least(&addravl, &iter);
468 incr = 1;
469 } else {
470 addr_start_iter_greatest(&addravl, &iter);
471 incr = 0;
474 while((pr = addr_get_iter(&iter))) {
475 SLABSANE(pr);
476 assert(pr->size > 0);
477 if(pr->size >= pages || (memflags & PAF_FIRSTBLOCK)) {
478 if(memflags & PAF_LOWER16MB) {
479 if(pr->addr + pages > boundary16)
480 return NO_MEM;
483 if(memflags & PAF_LOWER1MB) {
484 if(pr->addr + pages > boundary1)
485 return NO_MEM;
488 /* good block found! */
489 break;
491 if(incr)
492 addr_incr_iter(&iter);
493 else
494 addr_decr_iter(&iter);
497 if(!pr) {
498 if(len)
499 *len = 0;
500 #if SANITYCHECKS
501 assert(largest < pages);
502 #endif
503 return NO_MEM;
506 SLABSANE(pr);
508 if(memflags & PAF_FIRSTBLOCK) {
509 assert(len);
510 /* block doesn't have to as big as requested;
511 * return its size though.
513 if(pr->size < pages) {
514 pages = pr->size;
515 #if SANITYCHECKS
516 wantpages = firstpages - pages;
517 #endif
521 if(len)
522 *len = pages;
524 /* Allocated chunk is off the end. */
525 mem = pr->addr + pr->size - pages;
527 assert(pr->size >= pages);
528 if(pr->size == pages) {
529 pagerange_t *prr;
530 prr = addr_remove(&addravl, pr->addr);
531 assert(prr);
532 assert(prr == pr);
533 SLABFREE(pr);
534 #if SANITYCHECKS
535 wantnodes--;
536 #endif
537 } else {
538 USE(pr, pr->size -= pages;);
541 if(memflags & PAF_CLEAR) {
542 int s;
543 if ((s= sys_memset(0, CLICK_SIZE*mem,
544 VM_PAGE_SIZE*pages)) != OK)
545 panic("alloc_mem: sys_memset failed: %d", s);
548 #if SANITYCHECKS
549 memstats(&finalnodes, &finalpages, &largest);
550 sanitycheck();
552 assert(finalnodes == wantnodes);
553 assert(finalpages == wantpages);
554 #endif
556 return mem;
559 /*===========================================================================*
560 * free_pages *
561 *===========================================================================*/
562 PRIVATE void free_pages(phys_bytes pageno, int npages)
564 pagerange_t *pr, *p;
565 addr_iter iter;
566 #if SANITYCHECKS
567 int firstnodes, firstpages, wantnodes, wantpages;
568 int finalnodes, finalpages, largest;
570 memstats(&firstnodes, &firstpages, &largest);
571 sanitycheck();
573 wantnodes = firstnodes;
574 wantpages = firstpages + npages;
575 #endif
577 assert(!addr_search(&addravl, pageno, AVL_EQUAL));
579 /* try to merge with higher neighbour */
580 if((pr=addr_search(&addravl, pageno+npages, AVL_EQUAL))) {
581 USE(pr, pr->addr -= npages;
582 pr->size += npages;);
583 } else {
584 if(!SLABALLOC(pr))
585 panic("alloc_pages: can't alloc");
586 #if SANITYCHECKS
587 memstats(&firstnodes, &firstpages, &largest);
589 wantnodes = firstnodes;
590 wantpages = firstpages + npages;
592 sanitycheck();
593 #endif
594 assert(npages > 0);
595 USE(pr, pr->addr = pageno;
596 pr->size = npages;);
597 addr_insert(&addravl, pr);
598 #if SANITYCHECKS
599 wantnodes++;
600 #endif
603 addr_start_iter(&addravl, &iter, pr->addr, AVL_EQUAL);
604 p = addr_get_iter(&iter);
605 assert(p);
606 assert(p == pr);
608 addr_decr_iter(&iter);
609 if((p = addr_get_iter(&iter))) {
610 SLABSANE(p);
611 if(p->addr + p->size == pr->addr) {
612 USE(p, p->size += pr->size;);
613 addr_remove(&addravl, pr->addr);
614 SLABFREE(pr);
615 #if SANITYCHECKS
616 wantnodes--;
617 #endif
622 #if SANITYCHECKS
623 memstats(&finalnodes, &finalpages, &largest);
624 sanitycheck();
626 assert(finalnodes == wantnodes);
627 assert(finalpages == wantpages);
628 #endif
631 #define NR_DMA 16
633 PRIVATE struct dmatab
635 int dt_flags;
636 endpoint_t dt_proc;
637 phys_bytes dt_base;
638 phys_bytes dt_size;
639 phys_clicks dt_seg_base;
640 phys_clicks dt_seg_size;
641 } dmatab[NR_DMA];
643 #define DTF_INUSE 1
644 #define DTF_RELEASE_DMA 2
645 #define DTF_RELEASE_SEG 4
647 /*===========================================================================*
648 * do_adddma *
649 *===========================================================================*/
650 PUBLIC int do_adddma(message *msg)
652 endpoint_t req_proc_e, target_proc_e;
653 int i, proc_n;
654 phys_bytes base, size;
655 struct vmproc *vmp;
657 req_proc_e= msg->VMAD_REQ;
658 target_proc_e= msg->VMAD_EP;
659 base= msg->VMAD_START;
660 size= msg->VMAD_SIZE;
662 /* Find empty slot */
663 for (i= 0; i<NR_DMA; i++)
665 if (!(dmatab[i].dt_flags & DTF_INUSE))
666 break;
668 if (i >= NR_DMA)
670 printf("vm:do_adddma: dma table full\n");
671 for (i= 0; i<NR_DMA; i++)
673 printf("%d: flags 0x%x proc %d base 0x%x size 0x%x\n",
674 i, dmatab[i].dt_flags,
675 dmatab[i].dt_proc,
676 dmatab[i].dt_base,
677 dmatab[i].dt_size);
679 panic("adddma: table full");
680 return ENOSPC;
683 /* Find target process */
684 if (vm_isokendpt(target_proc_e, &proc_n) != OK)
686 printf("vm:do_adddma: endpoint %d not found\n", target_proc_e);
687 return EINVAL;
689 vmp= &vmproc[proc_n];
690 vmp->vm_flags |= VMF_HAS_DMA;
692 dmatab[i].dt_flags= DTF_INUSE;
693 dmatab[i].dt_proc= target_proc_e;
694 dmatab[i].dt_base= base;
695 dmatab[i].dt_size= size;
697 return OK;
700 /*===========================================================================*
701 * do_deldma *
702 *===========================================================================*/
703 PUBLIC int do_deldma(message *msg)
705 endpoint_t req_proc_e, target_proc_e;
706 int i, j, proc_n;
707 phys_bytes base, size;
708 struct vmproc *vmp;
710 req_proc_e= msg->VMDD_REQ;
711 target_proc_e= msg->VMDD_EP;
712 base= msg->VMDD_START;
713 size= msg->VMDD_SIZE;
715 /* Find slot */
716 for (i= 0; i<NR_DMA; i++)
718 if (!(dmatab[i].dt_flags & DTF_INUSE))
719 continue;
720 if (dmatab[i].dt_proc == target_proc_e &&
721 dmatab[i].dt_base == base &&
722 dmatab[i].dt_size == size)
724 break;
727 if (i >= NR_DMA)
729 printf("vm:do_deldma: slot not found\n");
730 return ESRCH;
733 if (dmatab[i].dt_flags & DTF_RELEASE_SEG)
735 /* Check if we have to release the segment */
736 for (j= 0; j<NR_DMA; j++)
738 if (j == i)
739 continue;
740 if (!(dmatab[j].dt_flags & DTF_INUSE))
741 continue;
742 if (!(dmatab[j].dt_flags & DTF_RELEASE_SEG))
743 continue;
744 if (dmatab[i].dt_proc == target_proc_e)
745 break;
747 if (j >= NR_DMA)
749 /* Last segment */
750 free_mem(dmatab[i].dt_seg_base,
751 dmatab[i].dt_seg_size);
755 dmatab[i].dt_flags &= ~DTF_INUSE;
757 return OK;
760 /*===========================================================================*
761 * do_getdma *
762 *===========================================================================*/
763 PUBLIC int do_getdma(message *msg)
765 endpoint_t target_proc_e;
766 int i, proc_n;
767 phys_bytes base, size;
768 struct vmproc *vmp;
770 /* Find slot to report */
771 for (i= 0; i<NR_DMA; i++)
773 if (!(dmatab[i].dt_flags & DTF_INUSE))
774 continue;
775 if (!(dmatab[i].dt_flags & DTF_RELEASE_DMA))
776 continue;
778 printf("do_getdma: setting reply to 0x%x@0x%x proc %d\n",
779 dmatab[i].dt_size, dmatab[i].dt_base,
780 dmatab[i].dt_proc);
781 msg->VMGD_PROCP= dmatab[i].dt_proc;
782 msg->VMGD_BASEP= dmatab[i].dt_base;
783 msg->VMGD_SIZEP= dmatab[i].dt_size;
785 return OK;
788 /* Nothing */
789 return EAGAIN;
794 /*===========================================================================*
795 * release_dma *
796 *===========================================================================*/
797 PUBLIC void release_dma(struct vmproc *vmp)
799 int i, found_one;
801 panic("release_dma not done");
802 #if 0
804 found_one= FALSE;
805 for (i= 0; i<NR_DMA; i++)
807 if (!(dmatab[i].dt_flags & DTF_INUSE))
808 continue;
809 if (dmatab[i].dt_proc != vmp->vm_endpoint)
810 continue;
811 dmatab[i].dt_flags |= DTF_RELEASE_DMA | DTF_RELEASE_SEG;
812 dmatab[i].dt_seg_base= base;
813 dmatab[i].dt_seg_size= size;
814 found_one= TRUE;
817 if (!found_one)
818 free_mem(base, size);
820 msg->VMRD_FOUND = found_one;
821 #endif
823 return;
826 /*===========================================================================*
827 * printmemstats *
828 *===========================================================================*/
829 void printmemstats(void)
831 int nodes, pages, largest;
832 memstats(&nodes, &pages, &largest);
833 printf("%d blocks, %d pages (%ukB) free, largest %d pages (%ukB)\n",
834 nodes, pages, (u32_t) pages * (VM_PAGE_SIZE/1024),
835 largest, (u32_t) largest * (VM_PAGE_SIZE/1024));
839 #if SANITYCHECKS
841 /*===========================================================================*
842 * usedpages_reset *
843 *===========================================================================*/
844 void usedpages_reset(void)
846 memset(pagemap, 0, sizeof(pagemap));
849 /*===========================================================================*
850 * usedpages_add *
851 *===========================================================================*/
852 int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
854 pagerange_t *pr;
855 u32_t pagestart, pages;
857 if(!incheck)
858 return OK;
860 assert(!(addr % VM_PAGE_SIZE));
861 assert(!(len % VM_PAGE_SIZE));
862 assert(len > 0);
863 assert_range(addr, len);
865 pagestart = addr / VM_PAGE_SIZE;
866 pages = len / VM_PAGE_SIZE;
868 while(pages > 0) {
869 phys_bytes thisaddr;
870 assert(pagestart > 0);
871 assert(pagestart < MAXPAGES);
872 thisaddr = pagestart * VM_PAGE_SIZE;
873 if(GET_BIT(pagemap, pagestart)) {
874 int i;
875 printf("%s:%d: usedpages_add: addr 0x%lx reused.\n",
876 file, line, thisaddr);
877 return EFAULT;
879 SET_BIT(pagemap, pagestart);
880 pages--;
881 pagestart++;
884 return OK;
887 #endif
889 /*===========================================================================*
890 * alloc_mem_in_list *
891 *===========================================================================*/
892 struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags)
894 phys_bytes rempages;
895 struct memlist *head = NULL, *ml;
897 assert(bytes > 0);
898 assert(!(bytes % VM_PAGE_SIZE));
900 rempages = bytes / VM_PAGE_SIZE;
902 /* unless we are told to allocate all memory
903 * contiguously, tell alloc function to grab whatever
904 * block it can find.
906 if(!(flags & PAF_CONTIG))
907 flags |= PAF_FIRSTBLOCK;
909 do {
910 struct memlist *ml;
911 phys_bytes mem, gotpages;
912 vir_bytes freed = 0;
914 do {
915 mem = alloc_pages(rempages, flags, &gotpages);
917 if(mem == NO_MEM) {
918 printf("*");
919 freed = free_yielded(rempages * VM_PAGE_SIZE);
921 } while(mem == NO_MEM && freed > 0);
923 if(mem == NO_MEM) {
924 printf("alloc_mem_in_list: giving up, %dkB missing\n",
925 rempages * VM_PAGE_SIZE/1024);
926 printmemstats();
927 free_mem_list(head, 1);
928 return NULL;
931 assert(gotpages <= rempages);
932 assert(gotpages > 0);
934 if(!(SLABALLOC(ml))) {
935 free_mem_list(head, 1);
936 free_pages(mem, gotpages);
937 return NULL;
940 USE(ml,
941 ml->phys = CLICK2ABS(mem);
942 ml->length = CLICK2ABS(gotpages);
943 ml->next = head;);
944 head = ml;
945 rempages -= gotpages;
946 } while(rempages > 0);
948 for(ml = head; ml; ml = ml->next) {
949 assert(ml->phys);
950 assert(ml->length);
953 return head;
956 /*===========================================================================*
957 * free_mem_list *
958 *===========================================================================*/
959 void free_mem_list(struct memlist *list, int all)
961 while(list) {
962 struct memlist *next;
963 next = list->next;
964 assert(!(list->phys % VM_PAGE_SIZE));
965 assert(!(list->length % VM_PAGE_SIZE));
966 if(all)
967 free_pages(list->phys / VM_PAGE_SIZE,
968 list->length / VM_PAGE_SIZE);
969 SLABFREE(list);
970 list = next;
974 /*===========================================================================*
975 * print_mem_list *
976 *===========================================================================*/
977 void print_mem_list(struct memlist *list)
979 while(list) {
980 assert(list->length > 0);
981 printf("0x%lx-0x%lx", list->phys, list->phys+list->length-1);
982 printf(" ");
983 list = list->next;
985 printf("\n");