2 Copyright © 2010-2017, The AROS Development Team. All rights reserved.
5 Desc: Page-based memory allocator, linear algorithm.
9 #ifndef __KERNEL_NOLIBBASE__
10 #define __KERNEL_NOLIBBASE__
11 #endif /* !__KERNEL_NOLIBBASE__ */
13 #include <exec/alerts.h>
14 #include <exec/execbase.h>
15 #include <proto/arossupport.h>
16 #include <proto/exec.h>
17 #include <proto/kernel.h>
21 #include <kernel_base.h>
22 #include <kernel_debug.h>
23 #include <kernel_mm.h>
24 #include "mm_linear.h"
29 * 'Linear' memory page allocator implementation.
30 * Goals of this implementation are simplicity and reduced memory overhead.
32 * It's a modified version of exec.library allocator, which works with variable-length blocks
33 * of pages. Instead of lists, it keeps the information about allocated/free pages in
34 * a linear memory map, which is separated from the data itself. It allows to block all access
35 * to unallocated pages. When allocating blocks at arbitrary addresses, the memory space is
36 * searched for the best matching block. MEMF_REVERSE can be used to specify search direction.
41 * Change state of block of 'pages' pages starting at 'first' to 'state'.
42 * Checks blocks to the left and to the right from our block and merges/splits
43 * blocks if necessary, and updates counters.
45 static void SetBlockState(struct BlockHeader
*head
, IPTR first
, IPTR pages
, page_t state
)
47 /* Check state of block next to our region */
48 IPTR p
= first
+ pages
;
52 /* If we claim we want to access more pages than our BlockHeader has, this is bad */
53 Alert(AN_BadFreeAddr
);
54 else if (p
< head
->size
)
57 * If the block next to our region is in the same state as our
58 * block will have, pick up initial counter value from it. Our block
59 * will be merged with the next one.
61 if (P_STATUS(head
->map
[p
]) == state
)
63 cnt
= P_COUNT(head
->map
[p
]);
69 * Set state of our block. We set state from last to the first page, and every
70 * page adds 1 to the counter (until it hits the limit value).
74 head
->map
[--p
] = cnt
| state
;
79 * If our block starts at page 0, there's nothing to check before it.
86 * Preceding block can have either the same state as our new state,
88 * In both cases its counters need updating.
89 * - If it has the same state, we keep current counter value, so blocks
91 * - If it has different state, we restart counter from 1. This means
92 * we are splitting the block.
94 if (P_STATUS(head
->map
[p
-1]) != state
)
97 state
= P_STATUS(head
->map
[p
-1]);
101 * Update space counter for the block. We keep going until the state changes
102 * again. The block will keep its original state.
106 if (P_STATUS(head
->map
[--p
]) != state
)
109 head
->map
[p
] = cnt
| state
;
114 /* Allocate 'size' bytes from MemHeader mh */
115 APTR
mm_Allocate(struct MemHeader
*mh
, IPTR size
, ULONG flags
)
117 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
119 IPTR align
= head
->pageSize
- 1;
122 IPTR candidate
, candidate_size
;
124 D(bug("[mm_Allocate] Request for %u bytes from BlockHeader %p, KernelBase 0x%p\n", size
, head
, KernelBase
));
128 * If mh_First is NULL, it's ROM header. We can't allocate from it.
133 * If either mc_Next or mc_Bytes is not zero, this MemHeader is not
134 * managed by us. We can't allocate from it.
136 if (head
->mc
.mc_Next
|| head
->mc
.mc_Bytes
)
139 /* Pad up size and convert it to number of pages */
140 size
= (size
+ align
) & ~align
;
141 pages
= size
/ head
->pageSize
;
144 ObtainSemaphore(&head
->sem
);
146 /* Start looking up from page zero */
152 * Look up best matching free block.
153 * We walk through the whole memory map in order to identify free blocks
154 * and get their sizes. We use the best-match criteria in order to avoid
155 * excessive memory fragmentation.
159 IPTR start
= p
; /* Starting page of the block being examined */
160 IPTR free
= 0; /* Count of free pages in the block */
162 while (P_STATUS(head
->map
[p
]) == P_FREE
)
164 UBYTE cnt
= P_COUNT(head
->map
[p
]); /* Get (partial) block length */
166 free
+= cnt
; /* Add length to total count */
167 p
+= cnt
; /* Advance past the block */
169 if (p
== head
->size
) /* Reached end of this memory chunk ? */
171 if (p
> head
->size
) /* Went past the chunk? This must never happen! */
172 Alert(AN_MemCorrupt
);
175 D(bug("[mm_Allocate] Have %u free pages starting from %u\n", free
, start
));
177 /* Does the block fit ? */
180 if (flags
& MEMF_REVERSE
)
183 * If MEMF_REVERSE is set, we remember new candidate if its size is less
184 * or equal to current one. This is effectively the same as best-match
185 * lookup starting from the end of the region.
187 if (free
<= candidate_size
)
189 D(bug("[mm_Allocate] Old candidate %u (size %d)\n", candidate
, candidate_size
));
191 candidate_size
= free
;
192 D(bug("[mm_Allocate] New candidate %u (size %d)\n", candidate
, candidate_size
));
198 * If the found block has smaller size than the
199 * previous candidate, remember it as a new candidate.
201 if (free
< candidate_size
)
203 D(bug("[mm_Allocate] Old candidate %u (size %d)\n", candidate
, candidate_size
));
205 candidate_size
= free
;
206 D(bug("[mm_Allocate] New candidate %u (size %d)\n", candidate
, candidate_size
));
209 /* If found exact match, we can't do better, so stop searching */
212 D(bug("[mm_Allocate] Exact match\n"));
219 * If we are at the end of memory map, we have nothing
220 * more to look at. We either already have a candidate,
225 D(bug("[mm_Allocate] Reached end of chunk\n"));
229 D(bug("[mm_Allocate] Allocated block starts at %u\n", p
));
230 /* Skip past the end of the allocated block */
231 while (P_STATUS(head
->map
[p
]) == P_ALLOC
)
233 p
+= P_COUNT(head
->map
[p
]);
237 D(bug("[mm_Allocate] Reached end of chunk\n"));
241 Alert(AN_MemCorrupt
);
243 D(bug("[mm_Allocate] Skipped up to page %u\n", p
));
245 } while (p
< head
->size
);
248 if (candidate_size
!= -1)
250 /* Mark the block as allocated */
251 D(bug("[mm_Allocate] Allocating %u pages starting from %u\n", pages
, candidate
));
252 SetBlockState(head
, candidate
, pages
, P_ALLOC
);
254 /* Calculate starting address of the first page */
255 addr
= head
->start
+ candidate
* head
->pageSize
;
256 /* Update free memory counter */
261 ReleaseSemaphore(&head
->sem
);
263 D(bug("[mm_Allocate] Allocated at address 0x%p\n", addr
));
267 /* Allocate 'size' bytes starting at 'addr' from MemHeader mh */
268 APTR
mm_AllocAbs(struct MemHeader
*mh
, void *addr
, IPTR size
)
270 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
271 IPTR align
= head
->pageSize
- 1;
276 D(bug("[mm_Allocate] Request for %u bytes from BlockHeader %p\n", size
, head
));
280 * If mh_First is NULL, it's ROM header. We can't allocate from it.
285 * If either mc_Next or mc_Bytes is not zero, this MemHeader is not
286 * managed by us. We can't allocate from it.
288 if (head
->mc
.mc_Next
|| head
->mc
.mc_Bytes
)
291 /* Align starting address */
292 addr
= (void *)((IPTR
)addr
& ~align
);
294 /* Requested address cat hit our administrative area. We can't satisfy such a request */
295 if (addr
< head
->start
)
298 /* Pad up size and convert it to number of pages */
299 size
= (size
+ align
) & ~align
;
300 pages
= size
/ head
->pageSize
;
302 ObtainSemaphore(&head
->sem
);
304 /* Get start page number */
305 start
= (addr
- head
->start
) / head
->pageSize
;
307 /* Check if we have enough free pages starting from the first one */
309 while (P_STATUS(head
->map
[p
]) == P_FREE
)
311 p
+= P_COUNT(head
->map
[p
]); /* Advance past the block */
312 if (p
>= start
+ pages
) /* Counted enough free pages? */
314 /* Allocate the block and exit */
316 SetBlockState(head
, start
, pages
, P_ALLOC
);
320 if (p
== head
->size
) /* Reached end of this memory chunk? */
322 if (p
> head
->size
) /* Went past the chunk? This must never happen! */
323 Alert(AN_MemCorrupt
);
326 ReleaseSemaphore(&head
->sem
);
330 /* Free 'size' bytes starting from address 'addr' in the MemHeader mh */
331 void mm_Free(struct MemHeader
*mh
, APTR addr
, IPTR size
)
333 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
334 /* Calculate number of the starting page within the region */
335 IPTR first
= (addr
- head
->start
) / head
->pageSize
;
336 IPTR align
= head
->pageSize
- 1;
339 /* Pad up size and convert it to number of pages */
340 size
= (size
+ align
) & ~align
;
341 pages
= size
/ head
->pageSize
;
343 ObtainSemaphore(&head
->sem
);
345 /* Set block state to free */
346 SetBlockState(head
, first
, pages
, P_FREE
);
348 /* Update free memory counter */
351 ReleaseSemaphore(&head
->sem
);
355 * Iniialize memory management in a given MemHeader.
356 * This routine takes into account only mh_Lower and mh_Upper, the rest is
358 * TODO: Currently it's assumed that the MemHeader itself is placed in the beginning
359 * of the region. In future this may be not true.
361 void mm_Init(struct MemHeader
*mh
, ULONG pageSize
)
363 struct BlockHeader
*head
;
372 * Currently we assume the struct MemHeader to be in the beginning
375 head
= (APTR
)mh
+ MEMHEADER_TOTAL
;
376 align
= pageSize
- 1;
378 /* Fill in the BlockHeader */
379 head
->mc
.mc_Next
= NULL
;
380 head
->mc
.mc_Bytes
= 0;
381 head
->pageSize
= pageSize
;
384 * Page-align boundaries.
385 * We intentionally make it start pointing to the previous page,
386 * we'll jump to the next page later, in the loop.
388 head
->start
= (APTR
)((IPTR
)head
->map
& ~align
);
389 end
= (APTR
)(((IPTR
)mh
->mh_Upper
+ 1) & ~align
);
391 D(bug("[mm_Init] MemHeader 0x%p, BlockHeader 0x%p, usable 0x%p - 0x%p\n", mh
, head
, head
->start
, end
));
395 /* Skip one page. This reserves some space (one page or less) for allocations map. */
396 head
->start
+= pageSize
;
397 /* Calculate resulting map size */
398 mapsize
= (head
->start
- (APTR
)head
->map
) / sizeof(ULONG
);
399 /* Calculate number of free bytes and pages */
400 memsize
= end
- head
->start
;
401 head
->size
= memsize
/ pageSize
;
403 * Repeat the operation if there's not enough memory for allocations map.
404 * This will take one more page from the area and use it for the map.
406 } while (mapsize
< head
->size
);
408 D(bug("[mm_Init] Got %u usable pages\n", head
->size
));
410 /* Mark all pages as free */
414 head
->map
[--p
] = free
;
419 /* Set BlockHeader pointer and free space counter */
420 mh
->mh_First
= &head
->mc
;
421 mh
->mh_Free
= memsize
;
425 * Apply memory protection to the MemHeader.
426 * This is a separate routine because MMU by itself requires some memory to place its
427 * control structures, and they need to be allocated using already working allocator.
428 * Only once MMU is up and running, we can apply protection to our memory.
430 void mm_Protect(struct MemHeader
*mh
, struct KernelBase
*KernelBase
)
432 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
434 InitSemaphore(&head
->sem
);
439 #define SET_LARGEST(ptr, val) \
446 #define SET_SMALLEST(ptr, val) \
458 /* Get statistics from the specified MemHeader */
459 void mm_StatMemHeader(struct MemHeader
*mh
, const struct TagItem
*query
, struct KernelBase
*KernelBase
)
461 struct TagItem
*tag
, *tstate
= (struct TagItem
*)query
;
462 IPTR
*largest_alloc
= NULL
;
463 IPTR
*smallest_alloc
= NULL
;
464 IPTR
*largest_free
= NULL
;
465 IPTR
*smallest_free
= NULL
;
466 IPTR
*num_alloc
= NULL
;
467 IPTR
*num_free
= NULL
;
468 BOOL do_traverse
= FALSE
;
470 while ((tag
= LibNextTagItem(&tstate
)))
475 *((IPTR
*)tag
->ti_Data
) += mh
->mh_Free
;
479 *((IPTR
*)tag
->ti_Data
) += mh
->mh_Upper
- mh
->mh_Lower
;
482 case KMS_LargestFree
:
483 largest_free
= (IPTR
*)tag
->ti_Data
;
487 case KMS_SmallestFree
:
488 smallest_free
= (IPTR
*)tag
->ti_Data
;
492 case KMS_LargestAlloc
:
493 largest_alloc
= (IPTR
*)tag
->ti_Data
;
497 case KMS_SmallestAlloc
:
498 smallest_alloc
= (IPTR
*)tag
->ti_Data
;
503 num_alloc
= (IPTR
*)tag
->ti_Data
;
508 num_free
= (IPTR
*)tag
->ti_Data
;
516 struct BlockHeader
*head
= (struct BlockHeader
*)mh
->mh_First
;
519 ObtainSemaphoreShared(&head
->sem
);
521 for (p
= 0; p
< head
->size
; )
523 /* Get total size and state of the current block */
525 page_t blkstate
= P_STATUS(head
->map
[p
]);
529 UBYTE cnt
= P_COUNT(head
->map
[p
]); /* Get (partial) block length */
531 blksize
+= cnt
; /* Add length to total count */
532 p
+= cnt
; /* Advance past the block */
534 if (p
== head
->size
) /* Reached end of this memory chunk ? */
536 if (p
> head
->size
) /* Went past the chunk? This must never happen! */
537 Alert(AN_MemCorrupt
);
538 } while (P_STATUS(head
->map
[p
]) == blkstate
);
540 blksize
*= head
->pageSize
; /* Convert to bytes */
542 if (blkstate
== P_ALLOC
)
544 SET_LARGEST(largest_alloc
, blksize
);
545 SET_SMALLEST(smallest_alloc
, blksize
);
552 SET_LARGEST(largest_free
, blksize
);
553 SET_SMALLEST(smallest_free
, blksize
);
560 ReleaseSemaphore(&head
->sem
);