vm: use assert() instead of vm_assert(); remove vm_assert().
[minix.git] / servers / vm / alloc.c
blob4e1cb65deb22ff5f9244f07e5412782bfcaef153
1 /* This file is concerned with allocating and freeing arbitrary-size blocks of
2 * physical memory on behalf of the FORK and EXEC system calls. The key data
3 * structure used is the hole table, which maintains a list of holes in memory.
4 * It is kept sorted in order of increasing memory address. The addresses
5 * it contains refers to physical memory, starting at absolute address 0
6 * (i.e., they are not relative to the start of PM). During system
7 * initialization, that part of memory containing the interrupt vectors,
8 * kernel, and PM are "allocated" to mark them as not available and to
9 * remove them from the hole list.
11 * The entry points into this file are:
12 * alloc_mem: allocate a given sized chunk of memory
13 * free_mem: release a previously allocated chunk of memory
14 * mem_init: initialize the tables when PM start up
17 #define _SYSTEM 1
19 #include <minix/com.h>
20 #include <minix/callnr.h>
21 #include <minix/type.h>
22 #include <minix/config.h>
23 #include <minix/const.h>
24 #include <minix/sysutil.h>
25 #include <minix/syslib.h>
26 #include <minix/debug.h>
27 #include <minix/bitmap.h>
29 #include <sys/mman.h>
31 #include <limits.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <assert.h>
35 #include <memory.h>
37 #include "vm.h"
38 #include "proto.h"
39 #include "util.h"
40 #include "glo.h"
41 #include "pagerange.h"
42 #include "addravl.h"
43 #include "sanitycheck.h"
44 #include "memlist.h"
46 /* AVL tree of free pages. */
47 addr_avl addravl;
49 /* Used for sanity check. */
50 PRIVATE phys_bytes mem_low, mem_high;
51 #define assert_range(addr, len) \
52 assert((addr) >= mem_low); \
53 assert((addr) + (len) - 1 <= mem_high);
55 struct hole {
56 struct hole *h_next; /* pointer to next entry on the list */
57 phys_clicks h_base; /* where does the hole begin? */
58 phys_clicks h_len; /* how big is the hole? */
59 int freelist;
60 int holelist;
63 static int startpages;
65 #define NIL_HOLE (struct hole *) 0
67 #define _NR_HOLES (_NR_PROCS*2) /* No. of memory holes maintained by VM */
69 PRIVATE struct hole hole[_NR_HOLES];
71 PRIVATE struct hole *hole_head; /* pointer to first hole */
72 PRIVATE struct hole *free_slots;/* ptr to list of unused table slots */
74 FORWARD _PROTOTYPE( void del_slot, (struct hole *prev_ptr, struct hole *hp) );
75 FORWARD _PROTOTYPE( void merge, (struct hole *hp) );
76 FORWARD _PROTOTYPE( void free_pages, (phys_bytes addr, int pages) );
77 FORWARD _PROTOTYPE( phys_bytes alloc_pages, (int pages, int flags,
78 phys_bytes *ret));
80 #if SANITYCHECKS
81 FORWARD _PROTOTYPE( void holes_sanity_f, (char *fn, int line) );
82 #define CHECKHOLES holes_sanity_f(__FILE__, __LINE__)
84 #define PAGESPERGB (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */
85 #define MAXPAGES (2*PAGESPERGB)
86 #define CHUNKS BITMAP_CHUNKS(MAXPAGES)
87 PRIVATE bitchunk_t pagemap[CHUNKS];
89 #else
90 #define CHECKHOLES
91 #endif
94 #if SANITYCHECKS
96 /*===========================================================================*
97 * holes_sanity_f *
98 *===========================================================================*/
99 PRIVATE void holes_sanity_f(file, line)
100 char *file;
101 int line;
103 #define myassert(c) { \
104 if(!(c)) { \
105 printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
106 util_stacktrace(); \
107 panic("assert failed"); } \
110 int h, c = 0, n = 0;
111 struct hole *hp;
113 /* Reset flags */
114 for(h = 0; h < _NR_HOLES; h++) {
115 hole[h].freelist = 0;
116 hole[h].holelist = 0;
119 /* Mark all holes on freelist. */
120 for(hp = free_slots; hp; hp = hp->h_next) {
121 myassert(!hp->freelist);
122 myassert(!hp->holelist);
123 hp->freelist = 1;
124 myassert(c < _NR_HOLES);
125 c++;
126 n++;
129 /* Mark all holes on holelist. */
130 c = 0;
131 for(hp = hole_head; hp; hp = hp->h_next) {
132 myassert(!hp->freelist);
133 myassert(!hp->holelist);
134 hp->holelist = 1;
135 myassert(c < _NR_HOLES);
136 c++;
137 n++;
140 /* Check there are exactly the right number of nodes. */
141 myassert(n == _NR_HOLES);
143 /* Make sure each slot is on exactly one of the list. */
144 c = 0;
145 for(h = 0; h < _NR_HOLES; h++) {
146 hp = &hole[h];
147 myassert(hp->holelist || hp->freelist);
148 myassert(!(hp->holelist && hp->freelist));
149 myassert(c < _NR_HOLES);
150 c++;
153 /* Make sure no holes overlap. */
154 for(hp = hole_head; hp && hp->h_next; hp = hp->h_next) {
155 myassert(hp->holelist);
156 hp->holelist = 1;
157 /* No holes overlap. */
158 myassert(hp->h_base + hp->h_len <= hp->h_next->h_base);
160 /* No uncoalesced holes. */
161 myassert(hp->h_base + hp->h_len < hp->h_next->h_base);
164 #endif
166 /*===========================================================================*
167 * alloc_mem *
168 *===========================================================================*/
169 PUBLIC phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags)
171 /* Allocate a block of memory from the free list using first fit. The block
172 * consists of a sequence of contiguous bytes, whose length in clicks is
173 * given by 'clicks'. A pointer to the block is returned. The block is
174 * always on a click boundary. This procedure is called when memory is
175 * needed for FORK or EXEC.
177 register struct hole *hp, *prev_ptr;
178 phys_clicks old_base, mem = NO_MEM, align_clicks = 0;
179 int s;
181 if(memflags & PAF_ALIGN64K) {
182 align_clicks = (64 * 1024) / CLICK_SIZE;
183 clicks += align_clicks;
186 if(vm_paged) {
187 assert(CLICK_SIZE == VM_PAGE_SIZE);
188 mem = alloc_pages(clicks, memflags, NULL);
189 } else {
190 CHECKHOLES;
191 prev_ptr = NIL_HOLE;
192 hp = hole_head;
193 while (hp != NIL_HOLE) {
194 if (hp->h_len >= clicks) {
195 /* We found a hole that is big enough. Use it. */
196 old_base = hp->h_base; /* remember where it started */
197 hp->h_base += clicks; /* bite a piece off */
198 hp->h_len -= clicks; /* ditto */
200 /* Delete the hole if used up completely. */
201 if (hp->h_len == 0) del_slot(prev_ptr, hp);
203 /* Anything special needs to happen? */
204 if(memflags & PAF_CLEAR) {
205 if ((s= sys_memset(0, CLICK_SIZE*old_base,
206 CLICK_SIZE*clicks)) != OK) {
207 panic("alloc_mem: sys_memset failed: %d", s);
211 /* Return the start address of the acquired block. */
212 CHECKHOLES;
213 mem = old_base;
214 break;
217 prev_ptr = hp;
218 hp = hp->h_next;
222 if(mem == NO_MEM)
223 return mem;
225 CHECKHOLES;
227 if(align_clicks) {
228 phys_clicks o;
229 o = mem % align_clicks;
230 if(o > 0) {
231 phys_clicks e;
232 e = align_clicks - o;
233 free_mem(mem, e);
234 mem += e;
237 CHECKHOLES;
239 return mem;
242 /*===========================================================================*
243 * free_mem *
244 *===========================================================================*/
245 PUBLIC void free_mem(phys_clicks base, phys_clicks clicks)
247 /* Return a block of free memory to the hole list. The parameters tell where
248 * the block starts in physical memory and how big it is. The block is added
249 * to the hole list. If it is contiguous with an existing hole on either end,
250 * it is merged with the hole or holes.
252 register struct hole *hp, *new_ptr, *prev_ptr;
253 CHECKHOLES;
255 if (clicks == 0) return;
257 if(vm_paged) {
258 assert(CLICK_SIZE == VM_PAGE_SIZE);
259 free_pages(base, clicks);
260 return;
263 if ( (new_ptr = free_slots) == NIL_HOLE)
264 panic("hole table full");
265 new_ptr->h_base = base;
266 new_ptr->h_len = clicks;
267 free_slots = new_ptr->h_next;
268 hp = hole_head;
270 /* If this block's address is numerically less than the lowest hole currently
271 * available, or if no holes are currently available, put this hole on the
272 * front of the hole list.
274 if (hp == NIL_HOLE || base <= hp->h_base) {
275 /* Block to be freed goes on front of the hole list. */
276 new_ptr->h_next = hp;
277 hole_head = new_ptr;
278 merge(new_ptr);
279 CHECKHOLES;
280 return;
283 /* Block to be returned does not go on front of hole list. */
284 prev_ptr = NIL_HOLE;
285 while (hp != NIL_HOLE && base > hp->h_base) {
286 prev_ptr = hp;
287 hp = hp->h_next;
290 /* We found where it goes. Insert block after 'prev_ptr'. */
291 new_ptr->h_next = prev_ptr->h_next;
292 prev_ptr->h_next = new_ptr;
293 merge(prev_ptr); /* sequence is 'prev_ptr', 'new_ptr', 'hp' */
294 CHECKHOLES;
297 /*===========================================================================*
298 * del_slot *
299 *===========================================================================*/
300 PRIVATE void del_slot(prev_ptr, hp)
301 /* pointer to hole entry just ahead of 'hp' */
302 register struct hole *prev_ptr;
303 /* pointer to hole entry to be removed */
304 register struct hole *hp;
306 /* Remove an entry from the hole list. This procedure is called when a
307 * request to allocate memory removes a hole in its entirety, thus reducing
308 * the numbers of holes in memory, and requiring the elimination of one
309 * entry in the hole list.
311 if (hp == hole_head)
312 hole_head = hp->h_next;
313 else
314 prev_ptr->h_next = hp->h_next;
316 hp->h_next = free_slots;
317 hp->h_base = hp->h_len = 0;
318 free_slots = hp;
321 /*===========================================================================*
322 * merge *
323 *===========================================================================*/
324 PRIVATE void merge(hp)
325 register struct hole *hp; /* ptr to hole to merge with its successors */
327 /* Check for contiguous holes and merge any found. Contiguous holes can occur
328 * when a block of memory is freed, and it happens to abut another hole on
329 * either or both ends. The pointer 'hp' points to the first of a series of
330 * three holes that can potentially all be merged together.
332 register struct hole *next_ptr;
334 /* If 'hp' points to the last hole, no merging is possible. If it does not,
335 * try to absorb its successor into it and free the successor's table entry.
337 if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
338 if (hp->h_base + hp->h_len == next_ptr->h_base) {
339 hp->h_len += next_ptr->h_len; /* first one gets second one's mem */
340 del_slot(hp, next_ptr);
341 } else {
342 hp = next_ptr;
345 /* If 'hp' now points to the last hole, return; otherwise, try to absorb its
346 * successor into it.
348 if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
349 if (hp->h_base + hp->h_len == next_ptr->h_base) {
350 hp->h_len += next_ptr->h_len;
351 del_slot(hp, next_ptr);
355 /*===========================================================================*
356 * mem_init *
357 *===========================================================================*/
358 PUBLIC void mem_init(chunks)
359 struct memory *chunks; /* list of free memory chunks */
361 /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
362 * list of all the holes (unused memory) in the system; 'free_slots' points to
363 * a linked list of table entries that are not in use. Initially, the former
364 * list has one entry for each chunk of physical memory, and the second
365 * list links together the remaining table slots. As memory becomes more
366 * fragmented in the course of time (i.e., the initial big holes break up into
367 * smaller holes), new table slots are needed to represent them. These slots
368 * are taken from the list headed by 'free_slots'.
370 int i, first = 0;
371 register struct hole *hp;
372 int nodes, largest;
374 /* Put all holes on the free list. */
375 for (hp = &hole[0]; hp < &hole[_NR_HOLES]; hp++) {
376 hp->h_next = hp + 1;
377 hp->h_base = hp->h_len = 0;
379 hole[_NR_HOLES-1].h_next = NIL_HOLE;
380 hole_head = NIL_HOLE;
381 free_slots = &hole[0];
383 addr_init(&addravl);
385 total_pages = 0;
387 /* Use the chunks of physical memory to allocate holes. */
388 for (i=NR_MEMS-1; i>=0; i--) {
389 if (chunks[i].size > 0) {
390 phys_bytes from = CLICK2ABS(chunks[i].base),
391 to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
392 if(first || from < mem_low) mem_low = from;
393 if(first || to > mem_high) mem_high = to;
394 free_mem(chunks[i].base, chunks[i].size);
395 total_pages += chunks[i].size;
396 first = 0;
400 CHECKHOLES;
403 #if SANITYCHECKS
404 PRIVATE void sanitycheck(void)
406 pagerange_t *p, *prevp = NULL;
407 addr_iter iter;
408 addr_start_iter_least(&addravl, &iter);
409 while((p=addr_get_iter(&iter))) {
410 SLABSANE(p);
411 assert(p->size > 0);
412 if(prevp) {
413 assert(prevp->addr < p->addr);
414 assert(prevp->addr + p->addr < p->addr);
416 addr_incr_iter(&iter);
419 #endif
421 PUBLIC void memstats(int *nodes, int *pages, int *largest)
423 pagerange_t *p, *prevp = NULL;
424 addr_iter iter;
425 addr_start_iter_least(&addravl, &iter);
426 *nodes = 0;
427 *pages = 0;
428 *largest = 0;
429 #if SANITYCHECKS
430 sanitycheck();
431 #endif
432 while((p=addr_get_iter(&iter))) {
433 SLABSANE(p);
434 (*nodes)++;
435 (*pages)+= p->size;
436 if(p->size > *largest)
437 *largest = p->size;
438 addr_incr_iter(&iter);
442 /*===========================================================================*
443 * alloc_pages *
444 *===========================================================================*/
445 PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
447 addr_iter iter;
448 pagerange_t *pr;
449 int incr;
450 phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
451 phys_bytes boundary1 = 1 * 1024 * 1024 / VM_PAGE_SIZE;
452 phys_bytes mem;
453 #if SANITYCHECKS
454 int firstnodes, firstpages, wantnodes, wantpages;
455 int finalnodes, finalpages;
456 int largest;
458 memstats(&firstnodes, &firstpages, &largest);
459 sanitycheck();
460 wantnodes = firstnodes;
461 wantpages = firstpages - pages;
462 #endif
464 if(memflags & (PAF_LOWER16MB|PAF_LOWER1MB)) {
465 addr_start_iter_least(&addravl, &iter);
466 incr = 1;
467 } else {
468 addr_start_iter_greatest(&addravl, &iter);
469 incr = 0;
472 while((pr = addr_get_iter(&iter))) {
473 SLABSANE(pr);
474 assert(pr->size > 0);
475 if(pr->size >= pages || (memflags & PAF_FIRSTBLOCK)) {
476 if(memflags & PAF_LOWER16MB) {
477 if(pr->addr + pages > boundary16)
478 return NO_MEM;
481 if(memflags & PAF_LOWER1MB) {
482 if(pr->addr + pages > boundary1)
483 return NO_MEM;
486 /* good block found! */
487 break;
489 if(incr)
490 addr_incr_iter(&iter);
491 else
492 addr_decr_iter(&iter);
495 if(!pr) {
496 printf("VM: alloc_pages: alloc failed of %d pages\n", pages);
497 util_stacktrace();
498 printmemstats();
499 if(len)
500 *len = 0;
501 #if SANITYCHECKS
502 if(largest >= pages) {
503 panic("no memory but largest was enough");
505 #endif
506 return NO_MEM;
509 SLABSANE(pr);
511 if(memflags & PAF_FIRSTBLOCK) {
512 assert(len);
513 /* block doesn't have to as big as requested;
514 * return its size though.
516 if(pr->size < pages) {
517 pages = pr->size;
518 #if SANITYCHECKS
519 wantpages = firstpages - pages;
520 #endif
524 if(len)
525 *len = pages;
527 /* Allocated chunk is off the end. */
528 mem = pr->addr + pr->size - pages;
530 assert(pr->size >= pages);
531 if(pr->size == pages) {
532 pagerange_t *prr;
533 prr = addr_remove(&addravl, pr->addr);
534 assert(prr);
535 assert(prr == pr);
536 SLABFREE(pr);
537 #if SANITYCHECKS
538 wantnodes--;
539 #endif
540 } else {
541 USE(pr, pr->size -= pages;);
544 if(memflags & PAF_CLEAR) {
545 int s;
546 if ((s= sys_memset(0, CLICK_SIZE*mem,
547 VM_PAGE_SIZE*pages)) != OK)
548 panic("alloc_mem: sys_memset failed: %d", s);
551 #if SANITYCHECKS
552 memstats(&finalnodes, &finalpages, &largest);
553 sanitycheck();
555 if(finalpages != wantpages) {
556 printf("pages start: %d req: %d final: %d\n",
557 firstpages, pages, finalpages);
559 assert(finalnodes == wantnodes);
560 assert(finalpages == wantpages);
561 #endif
563 return mem;
566 /*===========================================================================*
567 * free_pages *
568 *===========================================================================*/
569 PRIVATE void free_pages(phys_bytes pageno, int npages)
571 pagerange_t *pr, *p;
572 addr_iter iter;
573 #if SANITYCHECKS
574 int firstnodes, firstpages, wantnodes, wantpages;
575 int finalnodes, finalpages, largest;
577 memstats(&firstnodes, &firstpages, &largest);
578 sanitycheck();
580 wantnodes = firstnodes;
581 wantpages = firstpages + npages;
582 #endif
584 assert(!addr_search(&addravl, pageno, AVL_EQUAL));
586 /* try to merge with higher neighbour */
587 if((pr=addr_search(&addravl, pageno+npages, AVL_EQUAL))) {
588 USE(pr, pr->addr -= npages;
589 pr->size += npages;);
590 } else {
591 if(!SLABALLOC(pr))
592 panic("alloc_pages: can't alloc");
593 #if SANITYCHECKS
594 memstats(&firstnodes, &firstpages, &largest);
596 wantnodes = firstnodes;
597 wantpages = firstpages + npages;
599 sanitycheck();
600 #endif
601 assert(npages > 0);
602 USE(pr, pr->addr = pageno;
603 pr->size = npages;);
604 addr_insert(&addravl, pr);
605 #if SANITYCHECKS
606 wantnodes++;
607 #endif
610 addr_start_iter(&addravl, &iter, pr->addr, AVL_EQUAL);
611 p = addr_get_iter(&iter);
612 assert(p);
613 assert(p == pr);
615 addr_decr_iter(&iter);
616 if((p = addr_get_iter(&iter))) {
617 SLABSANE(p);
618 if(p->addr + p->size == pr->addr) {
619 USE(p, p->size += pr->size;);
620 addr_remove(&addravl, pr->addr);
621 SLABFREE(pr);
622 #if SANITYCHECKS
623 wantnodes--;
624 #endif
629 #if SANITYCHECKS
630 memstats(&finalnodes, &finalpages, &largest);
631 sanitycheck();
633 assert(finalnodes == wantnodes);
634 assert(finalpages == wantpages);
635 #endif
638 #define NR_DMA 16
640 PRIVATE struct dmatab
642 int dt_flags;
643 endpoint_t dt_proc;
644 phys_bytes dt_base;
645 phys_bytes dt_size;
646 phys_clicks dt_seg_base;
647 phys_clicks dt_seg_size;
648 } dmatab[NR_DMA];
650 #define DTF_INUSE 1
651 #define DTF_RELEASE_DMA 2
652 #define DTF_RELEASE_SEG 4
654 /*===========================================================================*
655 * do_adddma *
656 *===========================================================================*/
657 PUBLIC int do_adddma(message *msg)
659 endpoint_t req_proc_e, target_proc_e;
660 int i, proc_n;
661 phys_bytes base, size;
662 struct vmproc *vmp;
664 req_proc_e= msg->VMAD_REQ;
665 target_proc_e= msg->VMAD_EP;
666 base= msg->VMAD_START;
667 size= msg->VMAD_SIZE;
669 /* Find empty slot */
670 for (i= 0; i<NR_DMA; i++)
672 if (!(dmatab[i].dt_flags & DTF_INUSE))
673 break;
675 if (i >= NR_DMA)
677 printf("vm:do_adddma: dma table full\n");
678 for (i= 0; i<NR_DMA; i++)
680 printf("%d: flags 0x%x proc %d base 0x%x size 0x%x\n",
681 i, dmatab[i].dt_flags,
682 dmatab[i].dt_proc,
683 dmatab[i].dt_base,
684 dmatab[i].dt_size);
686 panic("adddma: table full");
687 return ENOSPC;
690 /* Find target process */
691 if (vm_isokendpt(target_proc_e, &proc_n) != OK)
693 printf("vm:do_adddma: endpoint %d not found\n", target_proc_e);
694 return EINVAL;
696 vmp= &vmproc[proc_n];
697 vmp->vm_flags |= VMF_HAS_DMA;
699 dmatab[i].dt_flags= DTF_INUSE;
700 dmatab[i].dt_proc= target_proc_e;
701 dmatab[i].dt_base= base;
702 dmatab[i].dt_size= size;
704 return OK;
707 /*===========================================================================*
708 * do_deldma *
709 *===========================================================================*/
710 PUBLIC int do_deldma(message *msg)
712 endpoint_t req_proc_e, target_proc_e;
713 int i, j, proc_n;
714 phys_bytes base, size;
715 struct vmproc *vmp;
717 req_proc_e= msg->VMDD_REQ;
718 target_proc_e= msg->VMDD_EP;
719 base= msg->VMDD_START;
720 size= msg->VMDD_SIZE;
722 /* Find slot */
723 for (i= 0; i<NR_DMA; i++)
725 if (!(dmatab[i].dt_flags & DTF_INUSE))
726 continue;
727 if (dmatab[i].dt_proc == target_proc_e &&
728 dmatab[i].dt_base == base &&
729 dmatab[i].dt_size == size)
731 break;
734 if (i >= NR_DMA)
736 printf("vm:do_deldma: slot not found\n");
737 return ESRCH;
740 if (dmatab[i].dt_flags & DTF_RELEASE_SEG)
742 /* Check if we have to release the segment */
743 for (j= 0; j<NR_DMA; j++)
745 if (j == i)
746 continue;
747 if (!(dmatab[j].dt_flags & DTF_INUSE))
748 continue;
749 if (!(dmatab[j].dt_flags & DTF_RELEASE_SEG))
750 continue;
751 if (dmatab[i].dt_proc == target_proc_e)
752 break;
754 if (j >= NR_DMA)
756 /* Last segment */
757 free_mem(dmatab[i].dt_seg_base,
758 dmatab[i].dt_seg_size);
762 dmatab[i].dt_flags &= ~DTF_INUSE;
764 return OK;
767 /*===========================================================================*
768 * do_getdma *
769 *===========================================================================*/
770 PUBLIC int do_getdma(message *msg)
772 endpoint_t target_proc_e;
773 int i, proc_n;
774 phys_bytes base, size;
775 struct vmproc *vmp;
777 /* Find slot to report */
778 for (i= 0; i<NR_DMA; i++)
780 if (!(dmatab[i].dt_flags & DTF_INUSE))
781 continue;
782 if (!(dmatab[i].dt_flags & DTF_RELEASE_DMA))
783 continue;
785 printf("do_getdma: setting reply to 0x%x@0x%x proc %d\n",
786 dmatab[i].dt_size, dmatab[i].dt_base,
787 dmatab[i].dt_proc);
788 msg->VMGD_PROCP= dmatab[i].dt_proc;
789 msg->VMGD_BASEP= dmatab[i].dt_base;
790 msg->VMGD_SIZEP= dmatab[i].dt_size;
792 return OK;
795 /* Nothing */
796 return EAGAIN;
801 /*===========================================================================*
802 * release_dma *
803 *===========================================================================*/
804 PUBLIC void release_dma(struct vmproc *vmp)
806 int i, found_one;
808 panic("release_dma not done");
809 #if 0
811 found_one= FALSE;
812 for (i= 0; i<NR_DMA; i++)
814 if (!(dmatab[i].dt_flags & DTF_INUSE))
815 continue;
816 if (dmatab[i].dt_proc != vmp->vm_endpoint)
817 continue;
818 dmatab[i].dt_flags |= DTF_RELEASE_DMA | DTF_RELEASE_SEG;
819 dmatab[i].dt_seg_base= base;
820 dmatab[i].dt_seg_size= size;
821 found_one= TRUE;
824 if (!found_one)
825 free_mem(base, size);
827 msg->VMRD_FOUND = found_one;
828 #endif
830 return;
833 /*===========================================================================*
834 * printmemstats *
835 *===========================================================================*/
836 void printmemstats(void)
838 int nodes, pages, largest;
839 memstats(&nodes, &pages, &largest);
840 printf("%d blocks, %d pages (%ukB) free, largest %d pages (%ukB)\n",
841 nodes, pages, (u32_t) pages * (VM_PAGE_SIZE/1024),
842 largest, (u32_t) largest * (VM_PAGE_SIZE/1024));
846 #if SANITYCHECKS
848 /*===========================================================================*
849 * usedpages_reset *
850 *===========================================================================*/
851 void usedpages_reset(void)
853 memset(pagemap, 0, sizeof(pagemap));
856 /*===========================================================================*
857 * usedpages_add *
858 *===========================================================================*/
859 int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
861 pagerange_t *pr;
862 u32_t pagestart, pages;
864 if(!incheck)
865 return OK;
867 assert(!(addr % VM_PAGE_SIZE));
868 assert(!(len % VM_PAGE_SIZE));
869 assert(len > 0);
870 assert_range(addr, len);
872 pagestart = addr / VM_PAGE_SIZE;
873 pages = len / VM_PAGE_SIZE;
875 while(pages > 0) {
876 phys_bytes thisaddr;
877 assert(pagestart > 0);
878 assert(pagestart < MAXPAGES);
879 thisaddr = pagestart * VM_PAGE_SIZE;
880 if(GET_BIT(pagemap, pagestart)) {
881 int i;
882 printf("%s:%d: usedpages_add: addr 0x%lx reused.\n",
883 file, line, thisaddr);
884 return EFAULT;
886 SET_BIT(pagemap, pagestart);
887 pages--;
888 pagestart++;
891 return OK;
894 #endif
896 /*===========================================================================*
897 * alloc_mem_in_list *
898 *===========================================================================*/
899 struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags)
901 phys_bytes rempages;
902 struct memlist *head = NULL, *ml;
904 assert(bytes > 0);
905 assert(!(bytes % VM_PAGE_SIZE));
907 rempages = bytes / VM_PAGE_SIZE;
909 /* unless we are told to allocate all memory
910 * contiguously, tell alloc function to grab whatever
911 * block it can find.
913 if(!(flags & PAF_CONTIG))
914 flags |= PAF_FIRSTBLOCK;
916 do {
917 struct memlist *ml;
918 phys_bytes mem, gotpages;
919 mem = alloc_pages(rempages, flags, &gotpages);
921 if(mem == NO_MEM) {
922 free_mem_list(head, 1);
923 return NULL;
926 assert(gotpages <= rempages);
927 assert(gotpages > 0);
929 if(!(SLABALLOC(ml))) {
930 free_mem_list(head, 1);
931 free_pages(mem, gotpages);
932 return NULL;
935 USE(ml,
936 ml->phys = CLICK2ABS(mem);
937 ml->length = CLICK2ABS(gotpages);
938 ml->next = head;);
939 head = ml;
940 rempages -= gotpages;
941 } while(rempages > 0);
943 for(ml = head; ml; ml = ml->next) {
944 assert(ml->phys);
945 assert(ml->length);
948 return head;
951 /*===========================================================================*
952 * free_mem_list *
953 *===========================================================================*/
954 void free_mem_list(struct memlist *list, int all)
956 while(list) {
957 struct memlist *next;
958 next = list->next;
959 assert(!(list->phys % VM_PAGE_SIZE));
960 assert(!(list->length % VM_PAGE_SIZE));
961 if(all)
962 free_pages(list->phys / VM_PAGE_SIZE,
963 list->length / VM_PAGE_SIZE);
964 SLABFREE(list);
965 list = next;
969 /*===========================================================================*
970 * print_mem_list *
971 *===========================================================================*/
972 void print_mem_list(struct memlist *list)
974 while(list) {
975 assert(list->length > 0);
976 printf("0x%lx-0x%lx", list->phys, list->phys+list->length-1);
977 printf(" ");
978 list = list->next;
980 printf("\n");