grub2: bring back build of aros-side grub2 tools
[AROS.git] / rom / kernel / mm_linear.c
blobb1c95771fec061471244eea41ded986214394dcb
1 /*
2 Copyright © 2010-2011, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: Page-based memory allocator, linear algorithm.
6 Lang: english
7 */
9 #define __KERNEL_NOLIBBASE__
11 #include <exec/alerts.h>
12 #include <exec/execbase.h>
13 #include <proto/arossupport.h>
14 #include <proto/exec.h>
15 #include <proto/kernel.h>
17 #include <inttypes.h>
19 #include <kernel_base.h>
20 #include <kernel_debug.h>
21 #include <kernel_mm.h>
22 #include "mm_linear.h"
24 #define D(x)
27 * 'Linear' memory page allocator implementation.
28 * Goals of this implementation are simplicity and reduced memory overhead.
30 * It's a modified version of exec.library allocator, which works with variable-length blocks
31 * of pages. Instead of lists, it keeps the information about allocated/free pages in
32 * a linear memory map, which is separated from the data itself. It allows to block all access
33 * to unallocated pages. When allocating blocks at arbitrary addresses, the memory space is
34 * searched for the best matching block. MEMF_REVERSE can be used to specify search direction.
38 * Utility function.
39 * Change state of block of 'pages' pages starting at 'first' to 'state'.
40 * Checks blocks to the left and to the right from our block and merges/splits
41 * blocks if necessary, and updates counters.
43 static void SetBlockState(struct BlockHeader *head, IPTR first, IPTR pages, page_t state)
45 /* Check state of block next to our region */
46 IPTR p = first + pages;
47 page_t cnt = 1;
49 if (p > head->size)
50 /* If we claim we want to access more pages than our BlockHeader has, this is bad */
51 Alert(AN_BadFreeAddr);
52 else if (p < head->size)
55 * If the block next to our region is in the same state as our
56 * block will have, pick up initial counter value from it. Our block
57 * will be merged with the next one.
59 if (P_STATUS(head->map[p]) == state)
61 cnt = P_COUNT(head->map[p]);
62 INC_COUNT(cnt);
67 * Set state of our block. We set state from last to the first page, and every
68 * page adds 1 to the counter (until it hits the limit value).
72 head->map[--p] = cnt | state;
73 INC_COUNT(cnt);
74 } while (p > first);
77 * If our block starts at page 0, there's nothing to check before it.
78 * We're done.
80 if (p == 0)
81 return;
84 * Preceding block can have either the same state as our new state,
85 * or different state.
86 * In both cases its counters need updating.
87 * - If it has the same state, we keep current counter value, so blocks
88 * get merged.
89 * - If it has different state, we restart counter from 1. This means
90 * we are splitting the block.
92 if (P_STATUS(head->map[p-1]) != state)
94 cnt = 1;
95 state = P_STATUS(head->map[p-1]);
99 * Update space counter for the block. We keep going until the state changes
100 * again. The block will keep its original state.
104 if (P_STATUS(head->map[--p]) != state)
105 break;
107 head->map[p] = cnt | state;
108 INC_COUNT(cnt);
109 } while (p > 0);
112 /* Allocate 'size' bytes from MemHeader mh */
113 APTR mm_Allocate(struct MemHeader *mh, IPTR size, ULONG flags)
115 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
116 APTR addr = NULL;
117 IPTR align = head->pageSize - 1;
118 IPTR pages;
119 IPTR p;
120 IPTR candidate, candidate_size;
122 D(bug("[mm_Allocate] Request for %u bytes from BlockHeader %p, KernelBase 0x%p\n", size, head, KernelBase));
125 * Safety checks.
126 * If mh_First is NULL, it's ROM header. We can't allocate from it.
128 if (!head)
129 return NULL;
131 * If either mc_Next or mc_Bytes is not zero, this MemHeader is not
132 * managed by us. We can't allocate from it.
134 if (head->mc.mc_Next || head->mc.mc_Bytes)
135 return NULL;
137 /* Pad up size and convert it to number of pages */
138 size = (size + align) & ~align;
139 pages = size / head->pageSize;
141 if (KernelBase)
142 ObtainSemaphore(&head->sem);
144 /* Start looking up from page zero */
145 p = 0;
146 candidate = 0;
147 candidate_size = -1;
150 * Look up best matching free block.
151 * We walk through the whole memory map in order to identify free blocks
152 * and get their sizes. We use the best-match criteria in order to avoid
153 * excessive memory fragmentation.
157 IPTR start = p; /* Starting page of the block being examined */
158 IPTR free = 0; /* Count of free pages in the block */
160 while (P_STATUS(head->map[p]) == P_FREE)
162 UBYTE cnt = P_COUNT(head->map[p]); /* Get (partial) block length */
164 free += cnt; /* Add length to total count */
165 p += cnt; /* Advance past the block */
167 if (p == head->size) /* Reached end of this memory chunk ? */
168 break;
169 if (p > head->size) /* Went past the chunk? This must never happen! */
170 Alert(AN_MemCorrupt);
173 D(bug("[mm_Allocate] Have %u free pages starting from %u\n", free, start));
175 /* Does the block fit ? */
176 if (free >= pages)
178 if (flags & MEMF_REVERSE)
181 * If MEMF_REVERSE is set, we remember new candidate if its size is less
182 * or equal to current one. This is effectively the same as best-match
183 * lookup starting from the end of the region.
185 if (free <= candidate_size)
187 D(bug("[mm_Allocate] Old candidate %u (size %d)\n", candidate, candidate_size));
188 candidate = start;
189 candidate_size = free;
190 D(bug("[mm_Allocate] New candidate %u (size %d)\n", candidate, candidate_size));
193 else
196 * If the found block has smaller size than the
197 * previous candidate, remember it as a new candidate.
199 if (free < candidate_size)
201 D(bug("[mm_Allocate] Old candidate %u (size %d)\n", candidate, candidate_size));
202 candidate = start;
203 candidate_size = free;
204 D(bug("[mm_Allocate] New candidate %u (size %d)\n", candidate, candidate_size));
207 /* If found exact match, we can't do better, so stop searching */
208 if (free == pages)
210 D(bug("[mm_Allocate] Exact match\n"));
211 break;
217 * If we are at the end of memory map, we have nothing
218 * more to look at. We either already have a candidate,
219 * or no
221 if (p == head->size)
223 D(bug("[mm_Allocate] Reached end of chunk\n"));
224 break;
227 D(bug("[mm_Allocate] Allocated block starts at %u\n", p));
228 /* Skip past the end of the allocated block */
229 while (P_STATUS(head->map[p]) == P_ALLOC)
231 p += P_COUNT(head->map[p]);
233 if (p == head->size)
235 D(bug("[mm_Allocate] Reached end of chunk\n"));
236 break;
238 if (p > head->size)
239 Alert(AN_MemCorrupt);
241 D(bug("[mm_Allocate] Skipped up to page %u\n", p));
243 } while (p < head->size);
245 /* Found block ? */
246 if (candidate_size != -1)
248 /* Mark the block as allocated */
249 D(bug("[mm_Allocate] Allocating %u pages starting from %u\n", pages, candidate));
250 SetBlockState(head, candidate, pages, P_ALLOC);
252 /* Calculate starting address of the first page */
253 addr = head->start + candidate * head->pageSize;
254 /* Update free memory counter */
255 mh->mh_Free -= size;
258 if (KernelBase)
259 ReleaseSemaphore(&head->sem);
261 D(bug("[mm_Allocate] Allocated at address 0x%p\n", addr));
262 return addr;
265 /* Allocate 'size' bytes starting at 'addr' from MemHeader mh */
266 APTR mm_AllocAbs(struct MemHeader *mh, void *addr, IPTR size)
268 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
269 IPTR align = head->pageSize - 1;
270 IPTR pages;
271 IPTR start, p;
272 void *ret = NULL;
274 D(bug("[mm_Allocate] Request for %u bytes from BlockHeader %p\n", size, head));
277 * Safety checks.
278 * If mh_First is NULL, it's ROM header. We can't allocate from it.
280 if (!head)
281 return NULL;
283 * If either mc_Next or mc_Bytes is not zero, this MemHeader is not
284 * managed by us. We can't allocate from it.
286 if (head->mc.mc_Next || head->mc.mc_Bytes)
287 return NULL;
289 /* Align starting address */
290 addr = (void *)((IPTR)addr & ~align);
292 /* Requested address cat hit our administrative area. We can't satisfy such a request */
293 if (addr < head->start)
294 return NULL;
296 /* Pad up size and convert it to number of pages */
297 size = (size + align) & ~align;
298 pages = size / head->pageSize;
300 ObtainSemaphore(&head->sem);
302 /* Get start page number */
303 start = (addr - head->start) / head->pageSize;
305 /* Check if we have enough free pages starting from the first one */
306 p = start;
307 while (P_STATUS(head->map[p]) == P_FREE)
309 p += P_COUNT(head->map[p]); /* Advance past the block */
310 if (p >= start + pages) /* Counted enough free pages? */
312 /* Allocate the block and exit */
313 ret = addr;
314 SetBlockState(head, start, pages, P_ALLOC);
315 break;
318 if (p == head->size) /* Reached end of this memory chunk? */
319 break;
320 if (p > head->size) /* Went past the chunk? This must never happen! */
321 Alert(AN_MemCorrupt);
324 ReleaseSemaphore(&head->sem);
325 return ret;
328 /* Free 'size' bytes starting from address 'addr' in the MemHeader mh */
329 void mm_Free(struct MemHeader *mh, APTR addr, IPTR size)
331 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
332 /* Calculate number of the starting page within the region */
333 IPTR first = (addr - head->start) / head->pageSize;
334 IPTR align = head->pageSize - 1;
335 IPTR pages;
337 /* Pad up size and convert it to number of pages */
338 size = (size + align) & ~align;
339 pages = size / head->pageSize;
341 ObtainSemaphore(&head->sem);
343 /* Set block state to free */
344 SetBlockState(head, first, pages, P_FREE);
346 /* Update free memory counter */
347 mh->mh_Free += size;
349 ReleaseSemaphore(&head->sem);
353 * Iniialize memory management in a given MemHeader.
354 * This routine takes into account only mh_Lower and mh_Upper, the rest is
355 * ignored.
356 * TODO: Currently it's assumed that the MemHeader itself is placed in the beginning
357 * of the region. In future this may be not true.
359 void mm_Init(struct MemHeader *mh, ULONG pageSize)
361 struct BlockHeader *head;
362 IPTR align;
363 APTR end;
364 IPTR memsize;
365 IPTR mapsize;
366 IPTR p;
367 UBYTE free;
370 * Currently we assume the struct MemHeader to be in the beginning
371 * our our region.
373 head = (APTR)mh + MEMHEADER_TOTAL;
374 align = pageSize - 1;
376 /* Fill in the BlockHeader */
377 head->mc.mc_Next = NULL;
378 head->mc.mc_Bytes = 0;
379 head->pageSize = pageSize;
382 * Page-align boundaries.
383 * We intentionally make it start pointing to the previous page,
384 * we'll jump to the next page later, in the loop.
386 head->start = (APTR)((IPTR)head->map & ~align);
387 end = (APTR)(((IPTR)mh->mh_Upper + 1) & ~align);
389 D(bug("[mm_Init] MemHeader 0x%p, BlockHeader 0x%p, usable 0x%p - 0x%p\n", mh, head, head->start, end));
393 /* Skip one page. This reserves some space (one page or less) for allocations map. */
394 head->start += pageSize;
395 /* Calculate resulting map size */
396 mapsize = (head->start - (APTR)head->map) / sizeof(ULONG);
397 /* Calculate number of free bytes and pages */
398 memsize = end - head->start;
399 head->size = memsize / pageSize;
401 * Repeat the operation if there's not enough memory for allocations map.
402 * This will take one more page from the area and use it for the map.
404 } while (mapsize < head->size);
406 D(bug("[mm_Init] Got %u usable pages\n", head->size));
408 /* Mark all pages as free */
409 p = head->size;
410 free = 1;
411 do {
412 head->map[--p] = free;
413 if (free < 127)
414 free++;
415 } while (p > 0);
417 /* Set BlockHeader pointer and free space counter */
418 mh->mh_First = &head->mc;
419 mh->mh_Free = memsize;
423 * Apply memory protection to the MemHeader.
424 * This is a separate routine because MMU by itself requires some memory to place its
425 * control structures, and they need to be allocated using already working allocator.
426 * Only once MMU is up and running, we can apply protection to our memory.
428 void mm_Protect(struct MemHeader *mh, struct KernelBase *KernelBase)
430 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
432 InitSemaphore(&head->sem);
434 /* TODO */
437 #define SET_LARGEST(ptr, val) \
438 if (ptr) \
440 if (val > *ptr) \
441 *ptr = val; \
444 #define SET_SMALLEST(ptr, val) \
445 if (ptr) \
447 if (*ptr) \
449 if (val < *ptr) \
450 *ptr = val; \
452 else \
453 *ptr = val; \
456 /* Get statistics from the specified MemHeader */
457 void mm_StatMemHeader(struct MemHeader *mh, const struct TagItem *query, struct KernelBase *KernelBase)
459 struct TagItem *tag, *tstate = (struct TagItem *)query;
460 IPTR *largest_alloc = NULL;
461 IPTR *smallest_alloc = NULL;
462 IPTR *largest_free = NULL;
463 IPTR *smallest_free = NULL;
464 IPTR *num_alloc = NULL;
465 IPTR *num_free = NULL;
466 BOOL do_traverse = FALSE;
468 while ((tag = LibNextTagItem(&tstate)))
470 switch (tag->ti_Tag)
472 case KMS_Free:
473 *((IPTR *)tag->ti_Data) += mh->mh_Free;
474 break;
476 case KMS_Total:
477 *((IPTR *)tag->ti_Data) += mh->mh_Upper - mh->mh_Lower;
478 break;
480 case KMS_LargestFree:
481 largest_free = (IPTR *)tag->ti_Data;
482 do_traverse = TRUE;
483 break;
485 case KMS_SmallestFree:
486 smallest_free = (IPTR *)tag->ti_Data;
487 do_traverse = TRUE;
488 break;
490 case KMS_LargestAlloc:
491 largest_alloc = (IPTR *)tag->ti_Data;
492 do_traverse = TRUE;
493 break;
495 case KMS_SmallestAlloc:
496 smallest_alloc = (IPTR *)tag->ti_Data;
497 do_traverse = TRUE;
498 break;
500 case KMS_NumAlloc:
501 num_alloc = (IPTR *)tag->ti_Data;
502 do_traverse = TRUE;
503 break;
505 case KMS_NumFree:
506 num_free = (IPTR *)tag->ti_Data;
507 do_traverse = TRUE;
508 break;
512 if (do_traverse)
514 struct BlockHeader *head = (struct BlockHeader *)mh->mh_First;
515 IPTR p;
517 ObtainSemaphoreShared(&head->sem);
519 for (p = 0; p < head->size; )
521 /* Get total size and state of the current block */
522 IPTR blksize = 0;
523 page_t blkstate = P_STATUS(head->map[p]);
527 UBYTE cnt = P_COUNT(head->map[p]); /* Get (partial) block length */
529 blksize += cnt; /* Add length to total count */
530 p += cnt; /* Advance past the block */
532 if (p == head->size) /* Reached end of this memory chunk ? */
533 break;
534 if (p > head->size) /* Went past the chunk? This must never happen! */
535 Alert(AN_MemCorrupt);
536 } while (P_STATUS(head->map[p]) == blkstate);
538 blksize *= head->pageSize; /* Convert to bytes */
540 if (blkstate == P_ALLOC)
542 SET_LARGEST(largest_alloc, blksize);
543 SET_SMALLEST(smallest_alloc, blksize);
545 if (num_alloc)
546 *num_alloc += 1;
548 else
550 SET_LARGEST(largest_free, blksize);
551 SET_SMALLEST(smallest_free, blksize);
553 if (num_free)
554 *num_free += 1;
558 ReleaseSemaphore(&head->sem);