1 /* This file is concerned with allocating and freeing arbitrary-size blocks of
8 #include <minix/callnr.h>
9 #include <minix/type.h>
10 #include <minix/config.h>
11 #include <minix/const.h>
12 #include <minix/sysutil.h>
13 #include <minix/syslib.h>
14 #include <minix/debug.h>
15 #include <minix/bitmap.h>
29 #include "sanitycheck.h"
32 /* Number of physical pages in a 32-bit address space */
33 #define NUMBER_PHYSICAL_PAGES (int)(0x100000000ULL/VM_PAGE_SIZE)
34 #define PAGE_BITMAP_CHUNKS BITMAP_CHUNKS(NUMBER_PHYSICAL_PAGES)
35 static bitchunk_t free_pages_bitmap
[PAGE_BITMAP_CHUNKS
];
36 #define PAGE_CACHE_MAX 10000
37 static int free_page_cache
[PAGE_CACHE_MAX
];
38 static int free_page_cache_size
= 0;
40 /* Used for sanity check. */
41 static phys_bytes mem_low
, mem_high
;
43 static void free_pages(phys_bytes addr
, int pages
);
44 static phys_bytes
alloc_pages(int pages
, int flags
);
51 } pagemap
[NUMBER_PHYSICAL_PAGES
];
54 #define page_isfree(i) GET_BIT(free_pages_bitmap, i)
56 #define RESERVEDMAGIC 0x6e4c74d5
57 #define MAXRESERVEDPAGES 300
58 #define MAXRESERVEDQUEUES 15
60 static struct reserved_pages
{
61 struct reserved_pages
*next
; /* next in use */
62 int max_available
; /* queue depth use, 0 if not in use at all */
63 int npages
; /* number of consecutive pages */
64 int mappedin
; /* must reserved pages also be mapped? */
65 int n_available
; /* number of queue entries */
66 int allocflags
; /* allocflags for alloc_mem */
67 struct reserved_pageslot
{
70 } slots
[MAXRESERVEDPAGES
];
72 } reservedqueues
[MAXRESERVEDQUEUES
], *first_reserved_inuse
= NULL
;
74 int missing_spares
= 0;
76 static void sanitycheck_queues(void)
78 struct reserved_pages
*mrq
;
81 for(mrq
= first_reserved_inuse
; mrq
; mrq
= mrq
->next
) {
82 assert(mrq
->max_available
> 0);
83 assert(mrq
->max_available
>= mrq
->n_available
);
84 m
+= mrq
->max_available
- mrq
->n_available
;
87 assert(m
== missing_spares
);
90 static void sanitycheck_rq(struct reserved_pages
*rq
)
92 assert(rq
->magic
== RESERVEDMAGIC
);
93 assert(rq
->n_available
>= 0);
94 assert(rq
->n_available
<= MAXRESERVEDPAGES
);
95 assert(rq
->n_available
<= rq
->max_available
);
100 void *reservedqueue_new(int max_available
, int npages
, int mapped
, int allocflags
)
103 struct reserved_pages
*rq
;
105 assert(max_available
> 0);
106 assert(max_available
< MAXRESERVEDPAGES
);
110 for(r
= 0; r
< MAXRESERVEDQUEUES
; r
++)
111 if(!reservedqueues
[r
].max_available
)
114 if(r
>= MAXRESERVEDQUEUES
) {
115 printf("VM: %d reserved queues in use\n", MAXRESERVEDQUEUES
);
119 rq
= &reservedqueues
[r
];
121 memset(rq
, 0, sizeof(*rq
));
122 rq
->next
= first_reserved_inuse
;
123 first_reserved_inuse
= rq
;
125 rq
->max_available
= max_available
;
127 rq
->mappedin
= mapped
;
128 rq
->allocflags
= allocflags
;
129 rq
->magic
= RESERVEDMAGIC
;
131 missing_spares
+= max_available
;
137 reservedqueue_fillslot(struct reserved_pages
*rq
,
138 struct reserved_pageslot
*rps
, phys_bytes ph
, void *vir
)
142 assert(missing_spares
> 0);
143 if(rq
->mappedin
) assert(vir
);
149 reservedqueue_addslot(struct reserved_pages
*rq
)
151 phys_bytes cl
, cl_addr
;
153 struct reserved_pageslot
*rps
;
157 if((cl
= alloc_mem(rq
->npages
, rq
->allocflags
)) == NO_MEM
)
160 cl_addr
= CLICK2ABS(cl
);
165 if(!(vir
= vm_mappages(cl_addr
, rq
->npages
))) {
166 free_mem(cl
, rq
->npages
);
167 printf("reservedqueue_addslot: vm_mappages failed\n");
172 rps
= &rq
->slots
[rq
->n_available
];
174 reservedqueue_fillslot(rq
, rps
, cl_addr
, vir
);
179 void reservedqueue_add(void *rq_v
, void *vir
, phys_bytes ph
)
181 struct reserved_pages
*rq
= rq_v
;
182 struct reserved_pageslot
*rps
;
186 rps
= &rq
->slots
[rq
->n_available
];
188 reservedqueue_fillslot(rq
, rps
, ph
, vir
);
191 static int reservedqueue_fill(void *rq_v
)
193 struct reserved_pages
*rq
= rq_v
;
198 while(rq
->n_available
< rq
->max_available
)
199 if((r
=reservedqueue_addslot(rq
)) != OK
)
206 reservedqueue_alloc(void *rq_v
, phys_bytes
*ph
, void **vir
)
208 struct reserved_pages
*rq
= rq_v
;
209 struct reserved_pageslot
*rps
;
213 if(rq
->n_available
< 1) return ENOMEM
;
217 rps
= &rq
->slots
[rq
->n_available
];
227 void alloc_cycle(void)
229 struct reserved_pages
*rq
;
230 sanitycheck_queues();
231 for(rq
= first_reserved_inuse
; rq
&& missing_spares
> 0; rq
= rq
->next
) {
233 reservedqueue_fill(rq
);
236 sanitycheck_queues();
239 /*===========================================================================*
241 *===========================================================================*/
242 phys_clicks
alloc_mem(phys_clicks clicks
, u32_t memflags
)
244 /* Allocate a block of memory from the free list using first fit. The block
245 * consists of a sequence of contiguous bytes, whose length in clicks is
246 * given by 'clicks'. A pointer to the block is returned. The block is
247 * always on a click boundary. This procedure is called when memory is
248 * needed for FORK or EXEC.
250 phys_clicks mem
= NO_MEM
, align_clicks
= 0;
252 if(memflags
& PAF_ALIGN64K
) {
253 align_clicks
= (64 * 1024) / CLICK_SIZE
;
254 clicks
+= align_clicks
;
255 } else if(memflags
& PAF_ALIGN16K
) {
256 align_clicks
= (16 * 1024) / CLICK_SIZE
;
257 clicks
+= align_clicks
;
261 mem
= alloc_pages(clicks
, memflags
);
262 } while(mem
== NO_MEM
&& cache_freepages(clicks
) > 0);
269 o
= mem
% align_clicks
;
272 e
= align_clicks
- o
;
281 void mem_add_total_pages(int pages
)
283 total_pages
+= pages
;
286 /*===========================================================================*
288 *===========================================================================*/
289 void free_mem(phys_clicks base
, phys_clicks clicks
)
291 /* Return a block of free memory to the hole list. The parameters tell where
292 * the block starts in physical memory and how big it is. The block is added
293 * to the hole list. If it is contiguous with an existing hole on either end,
294 * it is merged with the hole or holes.
296 if (clicks
== 0) return;
298 assert(CLICK_SIZE
== VM_PAGE_SIZE
);
299 free_pages(base
, clicks
);
303 /*===========================================================================*
305 *===========================================================================*/
306 void mem_init(struct memory
*chunks
)
308 /* Initialize hole lists. There are two lists: 'hole_head' points to a linked
309 * list of all the holes (unused memory) in the system; 'free_slots' points to
310 * a linked list of table entries that are not in use. Initially, the former
311 * list has one entry for each chunk of physical memory, and the second
312 * list links together the remaining table slots. As memory becomes more
313 * fragmented in the course of time (i.e., the initial big holes break up into
314 * smaller holes), new table slots are needed to represent them. These slots
315 * are taken from the list headed by 'free_slots'.
321 memset(free_pages_bitmap
, 0, sizeof(free_pages_bitmap
));
323 /* Use the chunks of physical memory to allocate holes. */
324 for (i
=NR_MEMS
-1; i
>=0; i
--) {
325 if (chunks
[i
].size
> 0) {
326 phys_bytes from
= CLICK2ABS(chunks
[i
].base
),
327 to
= CLICK2ABS(chunks
[i
].base
+chunks
[i
].size
)-1;
328 if(first
|| from
< mem_low
) mem_low
= from
;
329 if(first
|| to
> mem_high
) mem_high
= to
;
330 free_mem(chunks
[i
].base
, chunks
[i
].size
);
331 total_pages
+= chunks
[i
].size
;
338 void mem_sanitycheck(const char *file
, int line
)
341 for(i
= 0; i
< NUMBER_PHYSICAL_PAGES
; i
++) {
342 if(!page_isfree(i
)) continue;
343 MYASSERT(usedpages_add(i
* VM_PAGE_SIZE
, VM_PAGE_SIZE
) == OK
);
348 void memstats(int *nodes
, int *pages
, int *largest
)
355 for(i
= 0; i
< NUMBER_PHYSICAL_PAGES
; i
++) {
357 while(i
< NUMBER_PHYSICAL_PAGES
&& page_isfree(i
)) {
361 if(size
== 0) continue;
369 static int findbit(int low
, int startscan
, int pages
, int memflags
, int *len
)
371 int run_length
= 0, i
;
372 int freerange_start
= startscan
;
374 for(i
= startscan
; i
>= low
; i
--) {
375 if(!page_isfree(i
)) {
377 int chunk
= i
/BITCHUNK_BITS
, moved
= 0;
381 !MAP_CHUNK(free_pages_bitmap
, chunk
*BITCHUNK_BITS
)) {
385 if(moved
) { i
= chunk
* BITCHUNK_BITS
+ BITCHUNK_BITS
; }
388 if(!run_length
) { freerange_start
= i
; run_length
= 1; }
389 else { freerange_start
--; run_length
++; }
390 assert(run_length
<= pages
);
391 if(run_length
== pages
) {
392 /* good block found! */
394 return freerange_start
;
401 /*===========================================================================*
403 *===========================================================================*/
404 static phys_bytes
alloc_pages(int pages
, int memflags
)
406 phys_bytes boundary16
= 16 * 1024 * 1024 / VM_PAGE_SIZE
;
407 phys_bytes boundary1
= 1 * 1024 * 1024 / VM_PAGE_SIZE
;
408 phys_bytes mem
= NO_MEM
, i
; /* page number */
409 int maxpage
= NUMBER_PHYSICAL_PAGES
- 1;
410 static int lastscan
= -1;
411 int startscan
, run_length
;
413 if(memflags
& PAF_LOWER16MB
)
414 maxpage
= boundary16
- 1;
415 else if(memflags
& PAF_LOWER1MB
)
416 maxpage
= boundary1
- 1;
418 /* no position restrictions: check page cache */
420 while(free_page_cache_size
> 0) {
421 i
= free_page_cache
[free_page_cache_size
-1];
423 free_page_cache_size
--;
425 assert(mem
!= NO_MEM
);
429 free_page_cache_size
--;
434 if(lastscan
< maxpage
&& lastscan
>= 0)
435 startscan
= lastscan
;
436 else startscan
= maxpage
;
439 mem
= findbit(0, startscan
, pages
, memflags
, &run_length
);
441 mem
= findbit(0, maxpage
, pages
, memflags
, &run_length
);
445 /* remember for next time */
448 for(i
= mem
; i
< mem
+ pages
; i
++) {
449 UNSET_BIT(free_pages_bitmap
, i
);
452 if(memflags
& PAF_CLEAR
) {
454 if ((s
= sys_memset(NONE
, 0, CLICK_SIZE
*mem
,
455 VM_PAGE_SIZE
*pages
)) != OK
)
456 panic("alloc_mem: sys_memset failed: %d", s
);
462 /*===========================================================================*
464 *===========================================================================*/
465 static void free_pages(phys_bytes pageno
, int npages
)
467 int i
, lim
= pageno
+ npages
- 1;
470 if(sys_memset(NONE
, 0xa5a5a5a5, VM_PAGE_SIZE
* pageno
,
471 VM_PAGE_SIZE
* npages
) != OK
)
472 panic("free_pages: sys_memset failed");
475 for(i
= pageno
; i
<= lim
; i
++) {
476 SET_BIT(free_pages_bitmap
, i
);
477 if(free_page_cache_size
< PAGE_CACHE_MAX
) {
478 free_page_cache
[free_page_cache_size
++] = i
;
483 /*===========================================================================*
485 *===========================================================================*/
486 void printmemstats(void)
488 int nodes
, pages
, largest
;
489 memstats(&nodes
, &pages
, &largest
);
490 printf("%d blocks, %d pages (%lukB) free, largest %d pages (%lukB)\n",
491 nodes
, pages
, (unsigned long) pages
* (VM_PAGE_SIZE
/1024),
492 largest
, (unsigned long) largest
* (VM_PAGE_SIZE
/1024));
498 /*===========================================================================*
500 *===========================================================================*/
501 void usedpages_reset(void)
503 memset(pagemap
, 0, sizeof(pagemap
));
506 /*===========================================================================*
508 *===========================================================================*/
509 int usedpages_add_f(phys_bytes addr
, phys_bytes len
, const char *file
, int line
)
511 u32_t pagestart
, pages
;
516 assert(!(addr
% VM_PAGE_SIZE
));
517 assert(!(len
% VM_PAGE_SIZE
));
520 pagestart
= addr
/ VM_PAGE_SIZE
;
521 pages
= len
/ VM_PAGE_SIZE
;
525 assert(pagestart
> 0);
526 assert(pagestart
< NUMBER_PHYSICAL_PAGES
);
527 thisaddr
= pagestart
* VM_PAGE_SIZE
;
528 assert(pagestart
< NUMBER_PHYSICAL_PAGES
);
529 if(pagemap
[pagestart
].used
) {
530 static int warnings
= 0;
532 printf("%s:%d: usedpages_add: addr 0x%lx reused, first %s:%d\n",
533 file
, line
, thisaddr
, pagemap
[pagestart
].file
, pagemap
[pagestart
].line
);
537 pagemap
[pagestart
].used
= 1;
538 pagemap
[pagestart
].file
= file
;
539 pagemap
[pagestart
].line
= line
;