1 /* This file is concerned with allocating and freeing arbitrary-size blocks of
2 * physical memory on behalf of the FORK and EXEC system calls. The key data
3 * structure used is the hole table, which maintains a list of holes in memory.
4 * It is kept sorted in order of increasing memory address. The addresses
5 * it contains refers to physical memory, starting at absolute address 0
6 * (i.e., they are not relative to the start of PM). During system
7 * initialization, that part of memory containing the interrupt vectors,
8 * kernel, and PM are "allocated" to mark them as not available and to
9 * remove them from the hole list.
11 * The entry points into this file are:
12 * alloc_mem: allocate a given sized chunk of memory
13 * free_mem: release a previously allocated chunk of memory
14 * mem_init: initialize the tables when PM start up
19 #include <minix/com.h>
20 #include <minix/callnr.h>
21 #include <minix/type.h>
22 #include <minix/config.h>
23 #include <minix/const.h>
24 #include <minix/sysutil.h>
25 #include <minix/syslib.h>
26 #include <minix/debug.h>
27 #include <minix/bitmap.h>
41 #include "pagerange.h"
43 #include "sanitycheck.h"
45 /* AVL tree of free pages. */
48 /* Used for sanity check. */
49 PRIVATE phys_bytes mem_low
, mem_high
;
50 #define vm_assert_range(addr, len) \
51 vm_assert((addr) >= mem_low); \
52 vm_assert((addr) + (len) - 1 <= mem_high);
55 struct hole
*h_next
; /* pointer to next entry on the list */
56 phys_clicks h_base
; /* where does the hole begin? */
57 phys_clicks h_len
; /* how big is the hole? */
62 static int startpages
;
64 #define NIL_HOLE (struct hole *) 0
66 #define _NR_HOLES (_NR_PROCS*2) /* No. of memory holes maintained by VM */
68 PRIVATE
struct hole hole
[_NR_HOLES
];
70 PRIVATE
struct hole
*hole_head
; /* pointer to first hole */
71 PRIVATE
struct hole
*free_slots
;/* ptr to list of unused table slots */
73 FORWARD
_PROTOTYPE( void del_slot
, (struct hole
*prev_ptr
, struct hole
*hp
) );
74 FORWARD
_PROTOTYPE( void merge
, (struct hole
*hp
) );
75 FORWARD
_PROTOTYPE( void free_pages
, (phys_bytes addr
, int pages
) );
76 FORWARD
_PROTOTYPE( phys_bytes alloc_pages
, (int pages
, int flags
) );
79 FORWARD
_PROTOTYPE( void holes_sanity_f
, (char *fn
, int line
) );
80 #define CHECKHOLES holes_sanity_f(__FILE__, __LINE__)
82 #define MAXPAGES (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */
83 #define CHUNKS BITMAP_CHUNKS(MAXPAGES)
84 PRIVATE bitchunk_t pagemap
[CHUNKS
];
90 /* Sanity check for parameters of node p. */
91 #define vm_assert_params(p, bytes, next) { \
92 vm_assert((p) != NO_MEM); \
93 vm_assert(!((bytes) % VM_PAGE_SIZE)); \
94 vm_assert(!((next) % VM_PAGE_SIZE)); \
95 vm_assert((bytes) > 0); \
96 vm_assert((p) + (bytes) > (p)); \
97 vm_assert((next) == NO_MEM || ((p) + (bytes) <= (next))); \
98 vm_assert_range((p), (bytes)); \
99 vm_assert_range((next), 1); \
102 /* Retrieve size of free block and pointer to next block from physical
105 #define GET_PARAMS(p, bytes, next) { \
106 phys_readaddr((p), &(bytes), &(next)); \
107 vm_assert_params((p), (bytes), (next)); \
110 /* Write parameters to physical page p. */
111 #define SET_PARAMS(p, bytes, next) { \
112 vm_assert_params((p), (bytes), (next)); \
113 phys_writeaddr((p), (bytes), (next)); \
119 /*===========================================================================*
121 *===========================================================================*/
122 PRIVATE
void holes_sanity_f(file
, line
)
126 #define myassert(c) { \
128 printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
130 vm_panic("assert failed.", NO_NUM); } \
137 for(h
= 0; h
< _NR_HOLES
; h
++) {
138 hole
[h
].freelist
= 0;
139 hole
[h
].holelist
= 0;
142 /* Mark all holes on freelist. */
143 for(hp
= free_slots
; hp
; hp
= hp
->h_next
) {
144 myassert(!hp
->freelist
);
145 myassert(!hp
->holelist
);
147 myassert(c
< _NR_HOLES
);
152 /* Mark all holes on holelist. */
154 for(hp
= hole_head
; hp
; hp
= hp
->h_next
) {
155 myassert(!hp
->freelist
);
156 myassert(!hp
->holelist
);
158 myassert(c
< _NR_HOLES
);
163 /* Check there are exactly the right number of nodes. */
164 myassert(n
== _NR_HOLES
);
166 /* Make sure each slot is on exactly one of the list. */
168 for(h
= 0; h
< _NR_HOLES
; h
++) {
170 myassert(hp
->holelist
|| hp
->freelist
);
171 myassert(!(hp
->holelist
&& hp
->freelist
));
172 myassert(c
< _NR_HOLES
);
176 /* Make sure no holes overlap. */
177 for(hp
= hole_head
; hp
&& hp
->h_next
; hp
= hp
->h_next
) {
178 myassert(hp
->holelist
);
180 /* No holes overlap. */
181 myassert(hp
->h_base
+ hp
->h_len
<= hp
->h_next
->h_base
);
183 /* No uncoalesced holes. */
184 myassert(hp
->h_base
+ hp
->h_len
< hp
->h_next
->h_base
);
189 /*===========================================================================*
191 *===========================================================================*/
192 PUBLIC phys_clicks
alloc_mem_f(phys_clicks clicks
, u32_t memflags
)
194 /* Allocate a block of memory from the free list using first fit. The block
195 * consists of a sequence of contiguous bytes, whose length in clicks is
196 * given by 'clicks'. A pointer to the block is returned. The block is
197 * always on a click boundary. This procedure is called when memory is
198 * needed for FORK or EXEC.
200 register struct hole
*hp
, *prev_ptr
;
201 phys_clicks old_base
, mem
= NO_MEM
, align_clicks
= 0;
204 if(memflags
& PAF_ALIGN64K
) {
205 align_clicks
= (64 * 1024) / CLICK_SIZE
;
206 clicks
+= align_clicks
;
210 vm_assert(CLICK_SIZE
== VM_PAGE_SIZE
);
211 mem
= alloc_pages(clicks
, memflags
);
216 while (hp
!= NIL_HOLE
) {
217 if (hp
->h_len
>= clicks
) {
218 /* We found a hole that is big enough. Use it. */
219 old_base
= hp
->h_base
; /* remember where it started */
220 hp
->h_base
+= clicks
; /* bite a piece off */
221 hp
->h_len
-= clicks
; /* ditto */
223 /* Delete the hole if used up completely. */
224 if (hp
->h_len
== 0) del_slot(prev_ptr
, hp
);
226 /* Anything special needs to happen? */
227 if(memflags
& PAF_CLEAR
) {
228 if ((s
= sys_memset(0, CLICK_SIZE
*old_base
,
229 CLICK_SIZE
*clicks
)) != OK
) {
230 vm_panic("alloc_mem: sys_memset failed", s
);
234 /* Return the start address of the acquired block. */
252 o
= mem
% align_clicks
;
255 e
= align_clicks
- o
;
265 /*===========================================================================*
267 *===========================================================================*/
268 PUBLIC
void free_mem_f(phys_clicks base
, phys_clicks clicks
)
270 /* Return a block of free memory to the hole list. The parameters tell where
271 * the block starts in physical memory and how big it is. The block is added
272 * to the hole list. If it is contiguous with an existing hole on either end,
273 * it is merged with the hole or holes.
275 register struct hole
*hp
, *new_ptr
, *prev_ptr
;
278 if (clicks
== 0) return;
281 vm_assert(CLICK_SIZE
== VM_PAGE_SIZE
);
282 free_pages(base
, clicks
);
286 if ( (new_ptr
= free_slots
) == NIL_HOLE
)
287 vm_panic("hole table full", NO_NUM
);
288 new_ptr
->h_base
= base
;
289 new_ptr
->h_len
= clicks
;
290 free_slots
= new_ptr
->h_next
;
293 /* If this block's address is numerically less than the lowest hole currently
294 * available, or if no holes are currently available, put this hole on the
295 * front of the hole list.
297 if (hp
== NIL_HOLE
|| base
<= hp
->h_base
) {
298 /* Block to be freed goes on front of the hole list. */
299 new_ptr
->h_next
= hp
;
306 /* Block to be returned does not go on front of hole list. */
308 while (hp
!= NIL_HOLE
&& base
> hp
->h_base
) {
313 /* We found where it goes. Insert block after 'prev_ptr'. */
314 new_ptr
->h_next
= prev_ptr
->h_next
;
315 prev_ptr
->h_next
= new_ptr
;
316 merge(prev_ptr
); /* sequence is 'prev_ptr', 'new_ptr', 'hp' */
320 /*===========================================================================*
322 *===========================================================================*/
323 PRIVATE
void del_slot(prev_ptr
, hp
)
324 /* pointer to hole entry just ahead of 'hp' */
325 register struct hole
*prev_ptr
;
326 /* pointer to hole entry to be removed */
327 register struct hole
*hp
;
329 /* Remove an entry from the hole list. This procedure is called when a
330 * request to allocate memory removes a hole in its entirety, thus reducing
331 * the numbers of holes in memory, and requiring the elimination of one
332 * entry in the hole list.
335 hole_head
= hp
->h_next
;
337 prev_ptr
->h_next
= hp
->h_next
;
339 hp
->h_next
= free_slots
;
340 hp
->h_base
= hp
->h_len
= 0;
344 /*===========================================================================*
346 *===========================================================================*/
347 PRIVATE
void merge(hp
)
348 register struct hole
*hp
; /* ptr to hole to merge with its successors */
350 /* Check for contiguous holes and merge any found. Contiguous holes can occur
351 * when a block of memory is freed, and it happens to abut another hole on
352 * either or both ends. The pointer 'hp' points to the first of a series of
353 * three holes that can potentially all be merged together.
355 register struct hole
*next_ptr
;
357 /* If 'hp' points to the last hole, no merging is possible. If it does not,
358 * try to absorb its successor into it and free the successor's table entry.
360 if ( (next_ptr
= hp
->h_next
) == NIL_HOLE
) return;
361 if (hp
->h_base
+ hp
->h_len
== next_ptr
->h_base
) {
362 hp
->h_len
+= next_ptr
->h_len
; /* first one gets second one's mem */
363 del_slot(hp
, next_ptr
);
368 /* If 'hp' now points to the last hole, return; otherwise, try to absorb its
371 if ( (next_ptr
= hp
->h_next
) == NIL_HOLE
) return;
372 if (hp
->h_base
+ hp
->h_len
== next_ptr
->h_base
) {
373 hp
->h_len
+= next_ptr
->h_len
;
374 del_slot(hp
, next_ptr
);
378 /*===========================================================================*
380 *===========================================================================*/
381 PUBLIC
void mem_init(chunks
)
382 struct memory
*chunks
; /* list of free memory chunks */
384 /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
385 * list of all the holes (unused memory) in the system; 'free_slots' points to
386 * a linked list of table entries that are not in use. Initially, the former
387 * list has one entry for each chunk of physical memory, and the second
388 * list links together the remaining table slots. As memory becomes more
389 * fragmented in the course of time (i.e., the initial big holes break up into
390 * smaller holes), new table slots are needed to represent them. These slots
391 * are taken from the list headed by 'free_slots'.
394 register struct hole
*hp
;
397 /* Put all holes on the free list. */
398 for (hp
= &hole
[0]; hp
< &hole
[_NR_HOLES
]; hp
++) {
400 hp
->h_base
= hp
->h_len
= 0;
402 hole
[_NR_HOLES
-1].h_next
= NIL_HOLE
;
403 hole_head
= NIL_HOLE
;
404 free_slots
= &hole
[0];
408 /* Use the chunks of physical memory to allocate holes. */
409 for (i
=NR_MEMS
-1; i
>=0; i
--) {
410 if (chunks
[i
].size
> 0) {
411 phys_bytes from
= CLICK2ABS(chunks
[i
].base
),
412 to
= CLICK2ABS(chunks
[i
].base
+chunks
[i
].size
)-1;
413 if(first
|| from
< mem_low
) mem_low
= from
;
414 if(first
|| to
> mem_high
) mem_high
= to
;
415 FREE_MEM(chunks
[i
].base
, chunks
[i
].size
);
424 PRIVATE
void sanitycheck(void)
426 pagerange_t
*p
, *prevp
= NULL
;
428 addr_start_iter_least(&addravl
, &iter
);
429 while((p
=addr_get_iter(&iter
))) {
431 vm_assert(p
->size
> 0);
433 vm_assert(prevp
->addr
< p
->addr
);
434 vm_assert(prevp
->addr
+ p
->addr
< p
->addr
);
436 addr_incr_iter(&iter
);
441 PUBLIC
void memstats(int *nodes
, int *pages
, int *largest
)
443 pagerange_t
*p
, *prevp
= NULL
;
445 addr_start_iter_least(&addravl
, &iter
);
452 while((p
=addr_get_iter(&iter
))) {
456 if(p
->size
> *largest
)
458 addr_incr_iter(&iter
);
462 /*===========================================================================*
464 *===========================================================================*/
465 PRIVATE PUBLIC phys_bytes
alloc_pages(int pages
, int memflags
)
470 phys_bytes boundary16
= 16 * 1024 * 1024 / VM_PAGE_SIZE
;
471 phys_bytes boundary1
= 1 * 1024 * 1024 / VM_PAGE_SIZE
;
474 int firstnodes
, firstpages
, wantnodes
, wantpages
;
475 int finalnodes
, finalpages
;
478 memstats(&firstnodes
, &firstpages
, &largest
);
480 wantnodes
= firstnodes
;
481 wantpages
= firstpages
- pages
;
484 if(memflags
& (PAF_LOWER16MB
|PAF_LOWER1MB
)) {
485 addr_start_iter_least(&addravl
, &iter
);
488 addr_start_iter_greatest(&addravl
, &iter
);
492 while((pr
= addr_get_iter(&iter
))) {
494 if(pr
->size
>= pages
) {
495 if(memflags
& PAF_LOWER16MB
) {
496 if(pr
->addr
+ pages
> boundary16
)
500 if(memflags
& PAF_LOWER1MB
) {
501 if(pr
->addr
+ pages
> boundary1
)
505 /* good block found! */
509 addr_incr_iter(&iter
);
511 addr_decr_iter(&iter
);
515 printf("VM: alloc_pages: alloc failed of %d pages\n", pages
);
519 if(largest
>= pages
) {
520 vm_panic("no memory but largest was enough", NO_NUM
);
528 /* Allocated chunk is off the end. */
529 mem
= pr
->addr
+ pr
->size
- pages
;
531 vm_assert(pr
->size
>= pages
);
532 if(pr
->size
== pages
) {
534 prr
= addr_remove(&addravl
, pr
->addr
);
536 vm_assert(prr
== pr
);
542 USE(pr
, pr
->size
-= pages
;);
545 if(memflags
& PAF_CLEAR
) {
547 if ((s
= sys_memset(0, CLICK_SIZE
*mem
,
548 VM_PAGE_SIZE
*pages
)) != OK
)
549 vm_panic("alloc_mem: sys_memset failed", s
);
553 memstats(&finalnodes
, &finalpages
, &largest
);
556 vm_assert(finalnodes
== wantnodes
);
557 vm_assert(finalpages
== wantpages
);
563 /*===========================================================================*
565 *===========================================================================*/
566 PRIVATE
void free_pages(phys_bytes pageno
, int npages
)
571 int firstnodes
, firstpages
, wantnodes
, wantpages
;
572 int finalnodes
, finalpages
, largest
;
574 memstats(&firstnodes
, &firstpages
, &largest
);
577 wantnodes
= firstnodes
;
578 wantpages
= firstpages
+ npages
;
581 vm_assert(!addr_search(&addravl
, pageno
, AVL_EQUAL
));
583 /* try to merge with higher neighbour */
584 if((pr
=addr_search(&addravl
, pageno
+npages
, AVL_EQUAL
))) {
585 USE(pr
, pr
->addr
-= npages
;
586 pr
->size
+= npages
;);
589 vm_panic("alloc_pages: can't alloc", NO_NUM
);
591 memstats(&firstnodes
, &firstpages
, &largest
);
593 wantnodes
= firstnodes
;
594 wantpages
= firstpages
+ npages
;
598 vm_assert(npages
> 0);
599 USE(pr
, pr
->addr
= pageno
;
601 addr_insert(&addravl
, pr
);
607 addr_start_iter(&addravl
, &iter
, pr
->addr
, AVL_EQUAL
);
608 p
= addr_get_iter(&iter
);
612 addr_decr_iter(&iter
);
613 if((p
= addr_get_iter(&iter
))) {
615 if(p
->addr
+ p
->size
== pr
->addr
) {
616 USE(p
, p
->size
+= pr
->size
;);
617 addr_remove(&addravl
, pr
->addr
);
627 memstats(&finalnodes
, &finalpages
, &largest
);
630 vm_assert(finalnodes
== wantnodes
);
631 vm_assert(finalpages
== wantpages
);
637 PRIVATE
struct dmatab
643 phys_clicks dt_seg_base
;
644 phys_clicks dt_seg_size
;
648 #define DTF_RELEASE_DMA 2
649 #define DTF_RELEASE_SEG 4
651 /*===========================================================================*
653 *===========================================================================*/
654 PUBLIC
int do_adddma(message
*msg
)
656 endpoint_t req_proc_e
, target_proc_e
;
658 phys_bytes base
, size
;
661 req_proc_e
= msg
->VMAD_REQ
;
662 target_proc_e
= msg
->VMAD_EP
;
663 base
= msg
->VMAD_START
;
664 size
= msg
->VMAD_SIZE
;
666 /* Find empty slot */
667 for (i
= 0; i
<NR_DMA
; i
++)
669 if (!(dmatab
[i
].dt_flags
& DTF_INUSE
))
674 printf("vm:do_adddma: dma table full\n");
675 for (i
= 0; i
<NR_DMA
; i
++)
677 printf("%d: flags 0x%x proc %d base 0x%x size 0x%x\n",
678 i
, dmatab
[i
].dt_flags
,
683 vm_panic("adddma: table full", NO_NUM
);
687 /* Find target process */
688 if (vm_isokendpt(target_proc_e
, &proc_n
) != OK
)
690 printf("vm:do_adddma: endpoint %d not found\n", target_proc_e
);
693 vmp
= &vmproc
[proc_n
];
694 vmp
->vm_flags
|= VMF_HAS_DMA
;
696 dmatab
[i
].dt_flags
= DTF_INUSE
;
697 dmatab
[i
].dt_proc
= target_proc_e
;
698 dmatab
[i
].dt_base
= base
;
699 dmatab
[i
].dt_size
= size
;
704 /*===========================================================================*
706 *===========================================================================*/
707 PUBLIC
int do_deldma(message
*msg
)
709 endpoint_t req_proc_e
, target_proc_e
;
711 phys_bytes base
, size
;
714 req_proc_e
= msg
->VMDD_REQ
;
715 target_proc_e
= msg
->VMDD_EP
;
716 base
= msg
->VMDD_START
;
717 size
= msg
->VMDD_SIZE
;
720 for (i
= 0; i
<NR_DMA
; i
++)
722 if (!(dmatab
[i
].dt_flags
& DTF_INUSE
))
724 if (dmatab
[i
].dt_proc
== target_proc_e
&&
725 dmatab
[i
].dt_base
== base
&&
726 dmatab
[i
].dt_size
== size
)
733 printf("vm:do_deldma: slot not found\n");
737 if (dmatab
[i
].dt_flags
& DTF_RELEASE_SEG
)
739 /* Check if we have to release the segment */
740 for (j
= 0; j
<NR_DMA
; j
++)
744 if (!(dmatab
[j
].dt_flags
& DTF_INUSE
))
746 if (!(dmatab
[j
].dt_flags
& DTF_RELEASE_SEG
))
748 if (dmatab
[i
].dt_proc
== target_proc_e
)
754 FREE_MEM(dmatab
[i
].dt_seg_base
,
755 dmatab
[i
].dt_seg_size
);
759 dmatab
[i
].dt_flags
&= ~DTF_INUSE
;
764 /*===========================================================================*
766 *===========================================================================*/
767 PUBLIC
int do_getdma(message
*msg
)
769 endpoint_t target_proc_e
;
771 phys_bytes base
, size
;
774 /* Find slot to report */
775 for (i
= 0; i
<NR_DMA
; i
++)
777 if (!(dmatab
[i
].dt_flags
& DTF_INUSE
))
779 if (!(dmatab
[i
].dt_flags
& DTF_RELEASE_DMA
))
782 printf("do_getdma: setting reply to 0x%x@0x%x proc %d\n",
783 dmatab
[i
].dt_size
, dmatab
[i
].dt_base
,
785 msg
->VMGD_PROCP
= dmatab
[i
].dt_proc
;
786 msg
->VMGD_BASEP
= dmatab
[i
].dt_base
;
787 msg
->VMGD_SIZEP
= dmatab
[i
].dt_size
;
798 /*===========================================================================*
800 *===========================================================================*/
801 PUBLIC
void release_dma(struct vmproc
*vmp
)
805 vm_panic("release_dma not done", NO_NUM
);
809 for (i
= 0; i
<NR_DMA
; i
++)
811 if (!(dmatab
[i
].dt_flags
& DTF_INUSE
))
813 if (dmatab
[i
].dt_proc
!= vmp
->vm_endpoint
)
815 dmatab
[i
].dt_flags
|= DTF_RELEASE_DMA
| DTF_RELEASE_SEG
;
816 dmatab
[i
].dt_seg_base
= base
;
817 dmatab
[i
].dt_seg_size
= size
;
822 FREE_MEM(base
, size
);
824 msg
->VMRD_FOUND
= found_one
;
830 /*===========================================================================*
832 *===========================================================================*/
833 void printmemstats(void)
835 int nodes
, pages
, largest
;
836 memstats(&nodes
, &pages
, &largest
);
837 printf("%d blocks, %d pages (%ukB) free, largest %d pages (%ukB)\n",
838 nodes
, pages
, (u32_t
) pages
* (VM_PAGE_SIZE
/1024),
839 largest
, (u32_t
) largest
* (VM_PAGE_SIZE
/1024));
845 /*===========================================================================*
847 *===========================================================================*/
848 void usedpages_reset(void)
850 memset(pagemap
, 0, sizeof(pagemap
));
853 /*===========================================================================*
855 *===========================================================================*/
856 int usedpages_add_f(phys_bytes addr
, phys_bytes len
, char *file
, int line
)
859 u32_t pagestart
, pages
;
864 vm_assert(!(addr
% VM_PAGE_SIZE
));
865 vm_assert(!(len
% VM_PAGE_SIZE
));
867 vm_assert_range(addr
, len
);
869 pagestart
= addr
/ VM_PAGE_SIZE
;
870 pages
= len
/ VM_PAGE_SIZE
;
874 vm_assert(pagestart
> 0);
875 vm_assert(pagestart
< MAXPAGES
);
876 thisaddr
= pagestart
* VM_PAGE_SIZE
;
877 if(GET_BIT(pagemap
, pagestart
)) {
879 printf("%s:%d: usedpages_add: addr 0x%lx reused.\n",
880 file
, line
, thisaddr
);
883 SET_BIT(pagemap
, pagestart
);