Fixed extern declaration from pointer to array
[minix.git] / servers / vm / alloc.c
blob0af48ed8afce34e8cd4a9481bf41a2a340e80786
1 /* This file is concerned with allocating and freeing arbitrary-size blocks of
2 * physical memory on behalf of the FORK and EXEC system calls. The key data
3 * structure used is the hole table, which maintains a list of holes in memory.
4 * It is kept sorted in order of increasing memory address. The addresses
5 * it contains refers to physical memory, starting at absolute address 0
6 * (i.e., they are not relative to the start of PM). During system
7 * initialization, that part of memory containing the interrupt vectors,
8 * kernel, and PM are "allocated" to mark them as not available and to
9 * remove them from the hole list.
11 * The entry points into this file are:
12 * alloc_mem: allocate a given sized chunk of memory
13 * free_mem: release a previously allocated chunk of memory
14 * mem_init: initialize the tables when PM start up
17 #define _SYSTEM 1
19 #include <minix/com.h>
20 #include <minix/callnr.h>
21 #include <minix/type.h>
22 #include <minix/config.h>
23 #include <minix/const.h>
24 #include <minix/sysutil.h>
25 #include <minix/syslib.h>
26 #include <minix/debug.h>
27 #include <minix/bitmap.h>
29 #include <sys/mman.h>
31 #include <limits.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <assert.h>
35 #include <memory.h>
37 #include "vm.h"
38 #include "proto.h"
39 #include "util.h"
40 #include "glo.h"
41 #include "pagerange.h"
42 #include "addravl.h"
43 #include "sanitycheck.h"
45 /* AVL tree of free pages. */
46 addr_avl addravl;
48 /* Used for sanity check. */
49 PRIVATE phys_bytes mem_low, mem_high;
50 #define vm_assert_range(addr, len) \
51 vm_assert((addr) >= mem_low); \
52 vm_assert((addr) + (len) - 1 <= mem_high);
54 struct hole {
55 struct hole *h_next; /* pointer to next entry on the list */
56 phys_clicks h_base; /* where does the hole begin? */
57 phys_clicks h_len; /* how big is the hole? */
58 int freelist;
59 int holelist;
62 static int startpages;
64 #define NIL_HOLE (struct hole *) 0
66 #define _NR_HOLES (_NR_PROCS*2) /* No. of memory holes maintained by VM */
68 PRIVATE struct hole hole[_NR_HOLES];
70 PRIVATE struct hole *hole_head; /* pointer to first hole */
71 PRIVATE struct hole *free_slots;/* ptr to list of unused table slots */
73 FORWARD _PROTOTYPE( void del_slot, (struct hole *prev_ptr, struct hole *hp) );
74 FORWARD _PROTOTYPE( void merge, (struct hole *hp) );
75 FORWARD _PROTOTYPE( void free_pages, (phys_bytes addr, int pages) );
76 FORWARD _PROTOTYPE( phys_bytes alloc_pages, (int pages, int flags) );
78 #if SANITYCHECKS
79 FORWARD _PROTOTYPE( void holes_sanity_f, (char *fn, int line) );
80 #define CHECKHOLES holes_sanity_f(__FILE__, __LINE__)
82 #define MAXPAGES (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */
83 #define CHUNKS BITMAP_CHUNKS(MAXPAGES)
84 PRIVATE bitchunk_t pagemap[CHUNKS];
86 #else
87 #define CHECKHOLES
88 #endif
90 /* Sanity check for parameters of node p. */
91 #define vm_assert_params(p, bytes, next) { \
92 vm_assert((p) != NO_MEM); \
93 vm_assert(!((bytes) % VM_PAGE_SIZE)); \
94 vm_assert(!((next) % VM_PAGE_SIZE)); \
95 vm_assert((bytes) > 0); \
96 vm_assert((p) + (bytes) > (p)); \
97 vm_assert((next) == NO_MEM || ((p) + (bytes) <= (next))); \
98 vm_assert_range((p), (bytes)); \
99 vm_assert_range((next), 1); \
102 /* Retrieve size of free block and pointer to next block from physical
103 * address (page) p.
105 #define GET_PARAMS(p, bytes, next) { \
106 phys_readaddr((p), &(bytes), &(next)); \
107 vm_assert_params((p), (bytes), (next)); \
110 /* Write parameters to physical page p. */
111 #define SET_PARAMS(p, bytes, next) { \
112 vm_assert_params((p), (bytes), (next)); \
113 phys_writeaddr((p), (bytes), (next)); \
117 #if SANITYCHECKS
119 /*===========================================================================*
120 * holes_sanity_f *
121 *===========================================================================*/
122 PRIVATE void holes_sanity_f(file, line)
123 char *file;
124 int line;
126 #define myassert(c) { \
127 if(!(c)) { \
128 printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
129 util_stacktrace(); \
130 vm_panic("assert failed.", NO_NUM); } \
133 int h, c = 0, n = 0;
134 struct hole *hp;
136 /* Reset flags */
137 for(h = 0; h < _NR_HOLES; h++) {
138 hole[h].freelist = 0;
139 hole[h].holelist = 0;
142 /* Mark all holes on freelist. */
143 for(hp = free_slots; hp; hp = hp->h_next) {
144 myassert(!hp->freelist);
145 myassert(!hp->holelist);
146 hp->freelist = 1;
147 myassert(c < _NR_HOLES);
148 c++;
149 n++;
152 /* Mark all holes on holelist. */
153 c = 0;
154 for(hp = hole_head; hp; hp = hp->h_next) {
155 myassert(!hp->freelist);
156 myassert(!hp->holelist);
157 hp->holelist = 1;
158 myassert(c < _NR_HOLES);
159 c++;
160 n++;
163 /* Check there are exactly the right number of nodes. */
164 myassert(n == _NR_HOLES);
166 /* Make sure each slot is on exactly one of the list. */
167 c = 0;
168 for(h = 0; h < _NR_HOLES; h++) {
169 hp = &hole[h];
170 myassert(hp->holelist || hp->freelist);
171 myassert(!(hp->holelist && hp->freelist));
172 myassert(c < _NR_HOLES);
173 c++;
176 /* Make sure no holes overlap. */
177 for(hp = hole_head; hp && hp->h_next; hp = hp->h_next) {
178 myassert(hp->holelist);
179 hp->holelist = 1;
180 /* No holes overlap. */
181 myassert(hp->h_base + hp->h_len <= hp->h_next->h_base);
183 /* No uncoalesced holes. */
184 myassert(hp->h_base + hp->h_len < hp->h_next->h_base);
187 #endif
189 /*===========================================================================*
190 * alloc_mem_f *
191 *===========================================================================*/
192 PUBLIC phys_clicks alloc_mem_f(phys_clicks clicks, u32_t memflags)
194 /* Allocate a block of memory from the free list using first fit. The block
195 * consists of a sequence of contiguous bytes, whose length in clicks is
196 * given by 'clicks'. A pointer to the block is returned. The block is
197 * always on a click boundary. This procedure is called when memory is
198 * needed for FORK or EXEC.
200 register struct hole *hp, *prev_ptr;
201 phys_clicks old_base, mem = NO_MEM, align_clicks = 0;
202 int s;
204 if(memflags & PAF_ALIGN64K) {
205 align_clicks = (64 * 1024) / CLICK_SIZE;
206 clicks += align_clicks;
209 if(vm_paged) {
210 vm_assert(CLICK_SIZE == VM_PAGE_SIZE);
211 mem = alloc_pages(clicks, memflags);
212 } else {
213 CHECKHOLES;
214 prev_ptr = NIL_HOLE;
215 hp = hole_head;
216 while (hp != NIL_HOLE) {
217 if (hp->h_len >= clicks) {
218 /* We found a hole that is big enough. Use it. */
219 old_base = hp->h_base; /* remember where it started */
220 hp->h_base += clicks; /* bite a piece off */
221 hp->h_len -= clicks; /* ditto */
223 /* Delete the hole if used up completely. */
224 if (hp->h_len == 0) del_slot(prev_ptr, hp);
226 /* Anything special needs to happen? */
227 if(memflags & PAF_CLEAR) {
228 if ((s= sys_memset(0, CLICK_SIZE*old_base,
229 CLICK_SIZE*clicks)) != OK) {
230 vm_panic("alloc_mem: sys_memset failed", s);
234 /* Return the start address of the acquired block. */
235 CHECKHOLES;
236 mem = old_base;
237 break;
240 prev_ptr = hp;
241 hp = hp->h_next;
245 if(mem == NO_MEM)
246 return mem;
248 CHECKHOLES;
250 if(align_clicks) {
251 phys_clicks o;
252 o = mem % align_clicks;
253 if(o > 0) {
254 phys_clicks e;
255 e = align_clicks - o;
256 FREE_MEM(mem, e);
257 mem += e;
260 CHECKHOLES;
262 return mem;
265 /*===========================================================================*
266 * free_mem_f *
267 *===========================================================================*/
268 PUBLIC void free_mem_f(phys_clicks base, phys_clicks clicks)
270 /* Return a block of free memory to the hole list. The parameters tell where
271 * the block starts in physical memory and how big it is. The block is added
272 * to the hole list. If it is contiguous with an existing hole on either end,
273 * it is merged with the hole or holes.
275 register struct hole *hp, *new_ptr, *prev_ptr;
276 CHECKHOLES;
278 if (clicks == 0) return;
280 if(vm_paged) {
281 vm_assert(CLICK_SIZE == VM_PAGE_SIZE);
282 free_pages(base, clicks);
283 return;
286 if ( (new_ptr = free_slots) == NIL_HOLE)
287 vm_panic("hole table full", NO_NUM);
288 new_ptr->h_base = base;
289 new_ptr->h_len = clicks;
290 free_slots = new_ptr->h_next;
291 hp = hole_head;
293 /* If this block's address is numerically less than the lowest hole currently
294 * available, or if no holes are currently available, put this hole on the
295 * front of the hole list.
297 if (hp == NIL_HOLE || base <= hp->h_base) {
298 /* Block to be freed goes on front of the hole list. */
299 new_ptr->h_next = hp;
300 hole_head = new_ptr;
301 merge(new_ptr);
302 CHECKHOLES;
303 return;
306 /* Block to be returned does not go on front of hole list. */
307 prev_ptr = NIL_HOLE;
308 while (hp != NIL_HOLE && base > hp->h_base) {
309 prev_ptr = hp;
310 hp = hp->h_next;
313 /* We found where it goes. Insert block after 'prev_ptr'. */
314 new_ptr->h_next = prev_ptr->h_next;
315 prev_ptr->h_next = new_ptr;
316 merge(prev_ptr); /* sequence is 'prev_ptr', 'new_ptr', 'hp' */
317 CHECKHOLES;
320 /*===========================================================================*
321 * del_slot *
322 *===========================================================================*/
323 PRIVATE void del_slot(prev_ptr, hp)
324 /* pointer to hole entry just ahead of 'hp' */
325 register struct hole *prev_ptr;
326 /* pointer to hole entry to be removed */
327 register struct hole *hp;
329 /* Remove an entry from the hole list. This procedure is called when a
330 * request to allocate memory removes a hole in its entirety, thus reducing
331 * the numbers of holes in memory, and requiring the elimination of one
332 * entry in the hole list.
334 if (hp == hole_head)
335 hole_head = hp->h_next;
336 else
337 prev_ptr->h_next = hp->h_next;
339 hp->h_next = free_slots;
340 hp->h_base = hp->h_len = 0;
341 free_slots = hp;
344 /*===========================================================================*
345 * merge *
346 *===========================================================================*/
347 PRIVATE void merge(hp)
348 register struct hole *hp; /* ptr to hole to merge with its successors */
350 /* Check for contiguous holes and merge any found. Contiguous holes can occur
351 * when a block of memory is freed, and it happens to abut another hole on
352 * either or both ends. The pointer 'hp' points to the first of a series of
353 * three holes that can potentially all be merged together.
355 register struct hole *next_ptr;
357 /* If 'hp' points to the last hole, no merging is possible. If it does not,
358 * try to absorb its successor into it and free the successor's table entry.
360 if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
361 if (hp->h_base + hp->h_len == next_ptr->h_base) {
362 hp->h_len += next_ptr->h_len; /* first one gets second one's mem */
363 del_slot(hp, next_ptr);
364 } else {
365 hp = next_ptr;
368 /* If 'hp' now points to the last hole, return; otherwise, try to absorb its
369 * successor into it.
371 if ( (next_ptr = hp->h_next) == NIL_HOLE) return;
372 if (hp->h_base + hp->h_len == next_ptr->h_base) {
373 hp->h_len += next_ptr->h_len;
374 del_slot(hp, next_ptr);
378 /*===========================================================================*
379 * mem_init *
380 *===========================================================================*/
381 PUBLIC void mem_init(chunks)
382 struct memory *chunks; /* list of free memory chunks */
384 /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
385 * list of all the holes (unused memory) in the system; 'free_slots' points to
386 * a linked list of table entries that are not in use. Initially, the former
387 * list has one entry for each chunk of physical memory, and the second
388 * list links together the remaining table slots. As memory becomes more
389 * fragmented in the course of time (i.e., the initial big holes break up into
390 * smaller holes), new table slots are needed to represent them. These slots
391 * are taken from the list headed by 'free_slots'.
393 int i, first = 0;
394 register struct hole *hp;
395 int nodes, largest;
397 /* Put all holes on the free list. */
398 for (hp = &hole[0]; hp < &hole[_NR_HOLES]; hp++) {
399 hp->h_next = hp + 1;
400 hp->h_base = hp->h_len = 0;
402 hole[_NR_HOLES-1].h_next = NIL_HOLE;
403 hole_head = NIL_HOLE;
404 free_slots = &hole[0];
406 addr_init(&addravl);
408 /* Use the chunks of physical memory to allocate holes. */
409 for (i=NR_MEMS-1; i>=0; i--) {
410 if (chunks[i].size > 0) {
411 phys_bytes from = CLICK2ABS(chunks[i].base),
412 to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
413 if(first || from < mem_low) mem_low = from;
414 if(first || to > mem_high) mem_high = to;
415 FREE_MEM(chunks[i].base, chunks[i].size);
416 first = 0;
420 CHECKHOLES;
423 #if SANITYCHECKS
424 PRIVATE void sanitycheck(void)
426 pagerange_t *p, *prevp = NULL;
427 addr_iter iter;
428 addr_start_iter_least(&addravl, &iter);
429 while((p=addr_get_iter(&iter))) {
430 SLABSANE(p);
431 vm_assert(p->size > 0);
432 if(prevp) {
433 vm_assert(prevp->addr < p->addr);
434 vm_assert(prevp->addr + p->addr < p->addr);
436 addr_incr_iter(&iter);
439 #endif
441 PUBLIC void memstats(int *nodes, int *pages, int *largest)
443 pagerange_t *p, *prevp = NULL;
444 addr_iter iter;
445 addr_start_iter_least(&addravl, &iter);
446 *nodes = 0;
447 *pages = 0;
448 *largest = 0;
449 #if SANITYCHECKS
450 sanitycheck();
451 #endif
452 while((p=addr_get_iter(&iter))) {
453 SLABSANE(p);
454 (*nodes)++;
455 (*pages)+= p->size;
456 if(p->size > *largest)
457 *largest = p->size;
458 addr_incr_iter(&iter);
462 /*===========================================================================*
463 * alloc_pages *
464 *===========================================================================*/
465 PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags)
467 addr_iter iter;
468 pagerange_t *pr;
469 int incr;
470 phys_bytes boundary16 = 16 * 1024 * 1024 / VM_PAGE_SIZE;
471 phys_bytes boundary1 = 1 * 1024 * 1024 / VM_PAGE_SIZE;
472 phys_bytes mem;
473 #if SANITYCHECKS
474 int firstnodes, firstpages, wantnodes, wantpages;
475 int finalnodes, finalpages;
476 int largest;
478 memstats(&firstnodes, &firstpages, &largest);
479 sanitycheck();
480 wantnodes = firstnodes;
481 wantpages = firstpages - pages;
482 #endif
484 if(memflags & (PAF_LOWER16MB|PAF_LOWER1MB)) {
485 addr_start_iter_least(&addravl, &iter);
486 incr = 1;
487 } else {
488 addr_start_iter_greatest(&addravl, &iter);
489 incr = 0;
492 while((pr = addr_get_iter(&iter))) {
493 SLABSANE(pr);
494 if(pr->size >= pages) {
495 if(memflags & PAF_LOWER16MB) {
496 if(pr->addr + pages > boundary16)
497 return NO_MEM;
500 if(memflags & PAF_LOWER1MB) {
501 if(pr->addr + pages > boundary1)
502 return NO_MEM;
505 /* good block found! */
506 break;
508 if(incr)
509 addr_incr_iter(&iter);
510 else
511 addr_decr_iter(&iter);
514 if(!pr) {
515 printf("VM: alloc_pages: alloc failed of %d pages\n", pages);
516 util_stacktrace();
517 printmemstats();
518 #if SANITYCHECKS
519 if(largest >= pages) {
520 vm_panic("no memory but largest was enough", NO_NUM);
522 #endif
523 return NO_MEM;
526 SLABSANE(pr);
528 /* Allocated chunk is off the end. */
529 mem = pr->addr + pr->size - pages;
531 vm_assert(pr->size >= pages);
532 if(pr->size == pages) {
533 pagerange_t *prr;
534 prr = addr_remove(&addravl, pr->addr);
535 vm_assert(prr);
536 vm_assert(prr == pr);
537 SLABFREE(pr);
538 #if SANITYCHECKS
539 wantnodes--;
540 #endif
541 } else {
542 USE(pr, pr->size -= pages;);
545 if(memflags & PAF_CLEAR) {
546 int s;
547 if ((s= sys_memset(0, CLICK_SIZE*mem,
548 VM_PAGE_SIZE*pages)) != OK)
549 vm_panic("alloc_mem: sys_memset failed", s);
552 #if SANITYCHECKS
553 memstats(&finalnodes, &finalpages, &largest);
554 sanitycheck();
556 vm_assert(finalnodes == wantnodes);
557 vm_assert(finalpages == wantpages);
558 #endif
560 return mem;
563 /*===========================================================================*
564 * free_pages *
565 *===========================================================================*/
566 PRIVATE void free_pages(phys_bytes pageno, int npages)
568 pagerange_t *pr, *p;
569 addr_iter iter;
570 #if SANITYCHECKS
571 int firstnodes, firstpages, wantnodes, wantpages;
572 int finalnodes, finalpages, largest;
574 memstats(&firstnodes, &firstpages, &largest);
575 sanitycheck();
577 wantnodes = firstnodes;
578 wantpages = firstpages + npages;
579 #endif
581 vm_assert(!addr_search(&addravl, pageno, AVL_EQUAL));
583 /* try to merge with higher neighbour */
584 if((pr=addr_search(&addravl, pageno+npages, AVL_EQUAL))) {
585 USE(pr, pr->addr -= npages;
586 pr->size += npages;);
587 } else {
588 if(!SLABALLOC(pr))
589 vm_panic("alloc_pages: can't alloc", NO_NUM);
590 #if SANITYCHECKS
591 memstats(&firstnodes, &firstpages, &largest);
593 wantnodes = firstnodes;
594 wantpages = firstpages + npages;
596 sanitycheck();
597 #endif
598 vm_assert(npages > 0);
599 USE(pr, pr->addr = pageno;
600 pr->size = npages;);
601 addr_insert(&addravl, pr);
602 #if SANITYCHECKS
603 wantnodes++;
604 #endif
607 addr_start_iter(&addravl, &iter, pr->addr, AVL_EQUAL);
608 p = addr_get_iter(&iter);
609 vm_assert(p);
610 vm_assert(p == pr);
612 addr_decr_iter(&iter);
613 if((p = addr_get_iter(&iter))) {
614 SLABSANE(p);
615 if(p->addr + p->size == pr->addr) {
616 USE(p, p->size += pr->size;);
617 addr_remove(&addravl, pr->addr);
618 SLABFREE(pr);
619 #if SANITYCHECKS
620 wantnodes--;
621 #endif
626 #if SANITYCHECKS
627 memstats(&finalnodes, &finalpages, &largest);
628 sanitycheck();
630 vm_assert(finalnodes == wantnodes);
631 vm_assert(finalpages == wantpages);
632 #endif
635 #define NR_DMA 16
637 PRIVATE struct dmatab
639 int dt_flags;
640 endpoint_t dt_proc;
641 phys_bytes dt_base;
642 phys_bytes dt_size;
643 phys_clicks dt_seg_base;
644 phys_clicks dt_seg_size;
645 } dmatab[NR_DMA];
647 #define DTF_INUSE 1
648 #define DTF_RELEASE_DMA 2
649 #define DTF_RELEASE_SEG 4
651 /*===========================================================================*
652 * do_adddma *
653 *===========================================================================*/
654 PUBLIC int do_adddma(message *msg)
656 endpoint_t req_proc_e, target_proc_e;
657 int i, proc_n;
658 phys_bytes base, size;
659 struct vmproc *vmp;
661 req_proc_e= msg->VMAD_REQ;
662 target_proc_e= msg->VMAD_EP;
663 base= msg->VMAD_START;
664 size= msg->VMAD_SIZE;
666 /* Find empty slot */
667 for (i= 0; i<NR_DMA; i++)
669 if (!(dmatab[i].dt_flags & DTF_INUSE))
670 break;
672 if (i >= NR_DMA)
674 printf("vm:do_adddma: dma table full\n");
675 for (i= 0; i<NR_DMA; i++)
677 printf("%d: flags 0x%x proc %d base 0x%x size 0x%x\n",
678 i, dmatab[i].dt_flags,
679 dmatab[i].dt_proc,
680 dmatab[i].dt_base,
681 dmatab[i].dt_size);
683 vm_panic("adddma: table full", NO_NUM);
684 return ENOSPC;
687 /* Find target process */
688 if (vm_isokendpt(target_proc_e, &proc_n) != OK)
690 printf("vm:do_adddma: endpoint %d not found\n", target_proc_e);
691 return EINVAL;
693 vmp= &vmproc[proc_n];
694 vmp->vm_flags |= VMF_HAS_DMA;
696 dmatab[i].dt_flags= DTF_INUSE;
697 dmatab[i].dt_proc= target_proc_e;
698 dmatab[i].dt_base= base;
699 dmatab[i].dt_size= size;
701 return OK;
704 /*===========================================================================*
705 * do_deldma *
706 *===========================================================================*/
707 PUBLIC int do_deldma(message *msg)
709 endpoint_t req_proc_e, target_proc_e;
710 int i, j, proc_n;
711 phys_bytes base, size;
712 struct vmproc *vmp;
714 req_proc_e= msg->VMDD_REQ;
715 target_proc_e= msg->VMDD_EP;
716 base= msg->VMDD_START;
717 size= msg->VMDD_SIZE;
719 /* Find slot */
720 for (i= 0; i<NR_DMA; i++)
722 if (!(dmatab[i].dt_flags & DTF_INUSE))
723 continue;
724 if (dmatab[i].dt_proc == target_proc_e &&
725 dmatab[i].dt_base == base &&
726 dmatab[i].dt_size == size)
728 break;
731 if (i >= NR_DMA)
733 printf("vm:do_deldma: slot not found\n");
734 return ESRCH;
737 if (dmatab[i].dt_flags & DTF_RELEASE_SEG)
739 /* Check if we have to release the segment */
740 for (j= 0; j<NR_DMA; j++)
742 if (j == i)
743 continue;
744 if (!(dmatab[j].dt_flags & DTF_INUSE))
745 continue;
746 if (!(dmatab[j].dt_flags & DTF_RELEASE_SEG))
747 continue;
748 if (dmatab[i].dt_proc == target_proc_e)
749 break;
751 if (j >= NR_DMA)
753 /* Last segment */
754 FREE_MEM(dmatab[i].dt_seg_base,
755 dmatab[i].dt_seg_size);
759 dmatab[i].dt_flags &= ~DTF_INUSE;
761 return OK;
764 /*===========================================================================*
765 * do_getdma *
766 *===========================================================================*/
767 PUBLIC int do_getdma(message *msg)
769 endpoint_t target_proc_e;
770 int i, proc_n;
771 phys_bytes base, size;
772 struct vmproc *vmp;
774 /* Find slot to report */
775 for (i= 0; i<NR_DMA; i++)
777 if (!(dmatab[i].dt_flags & DTF_INUSE))
778 continue;
779 if (!(dmatab[i].dt_flags & DTF_RELEASE_DMA))
780 continue;
782 printf("do_getdma: setting reply to 0x%x@0x%x proc %d\n",
783 dmatab[i].dt_size, dmatab[i].dt_base,
784 dmatab[i].dt_proc);
785 msg->VMGD_PROCP= dmatab[i].dt_proc;
786 msg->VMGD_BASEP= dmatab[i].dt_base;
787 msg->VMGD_SIZEP= dmatab[i].dt_size;
789 return OK;
792 /* Nothing */
793 return EAGAIN;
798 /*===========================================================================*
799 * release_dma *
800 *===========================================================================*/
801 PUBLIC void release_dma(struct vmproc *vmp)
803 int i, found_one;
805 vm_panic("release_dma not done", NO_NUM);
806 #if 0
808 found_one= FALSE;
809 for (i= 0; i<NR_DMA; i++)
811 if (!(dmatab[i].dt_flags & DTF_INUSE))
812 continue;
813 if (dmatab[i].dt_proc != vmp->vm_endpoint)
814 continue;
815 dmatab[i].dt_flags |= DTF_RELEASE_DMA | DTF_RELEASE_SEG;
816 dmatab[i].dt_seg_base= base;
817 dmatab[i].dt_seg_size= size;
818 found_one= TRUE;
821 if (!found_one)
822 FREE_MEM(base, size);
824 msg->VMRD_FOUND = found_one;
825 #endif
827 return;
830 /*===========================================================================*
831 * printmemstats *
832 *===========================================================================*/
833 void printmemstats(void)
835 int nodes, pages, largest;
836 memstats(&nodes, &pages, &largest);
837 printf("%d blocks, %d pages (%ukB) free, largest %d pages (%ukB)\n",
838 nodes, pages, (u32_t) pages * (VM_PAGE_SIZE/1024),
839 largest, (u32_t) largest * (VM_PAGE_SIZE/1024));
843 #if SANITYCHECKS
845 /*===========================================================================*
846 * usedpages_reset *
847 *===========================================================================*/
848 void usedpages_reset(void)
850 memset(pagemap, 0, sizeof(pagemap));
853 /*===========================================================================*
854 * usedpages_add *
855 *===========================================================================*/
856 int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
858 pagerange_t *pr;
859 u32_t pagestart, pages;
861 if(!incheck)
862 return OK;
864 vm_assert(!(addr % VM_PAGE_SIZE));
865 vm_assert(!(len % VM_PAGE_SIZE));
866 vm_assert(len > 0);
867 vm_assert_range(addr, len);
869 pagestart = addr / VM_PAGE_SIZE;
870 pages = len / VM_PAGE_SIZE;
872 while(pages > 0) {
873 phys_bytes thisaddr;
874 vm_assert(pagestart > 0);
875 vm_assert(pagestart < MAXPAGES);
876 thisaddr = pagestart * VM_PAGE_SIZE;
877 if(GET_BIT(pagemap, pagestart)) {
878 int i;
879 printf("%s:%d: usedpages_add: addr 0x%lx reused.\n",
880 file, line, thisaddr);
881 return EFAULT;
883 SET_BIT(pagemap, pagestart);
884 pages--;
885 pagestart++;
888 return OK;
891 #endif