Test initialisation of MUIA_List_AdjustWidth and MUIA_List_AdjustHeight, and
[AROS.git] / arch / all-pc / kernel / kernel_mmap.c
blobcad749064e57bf2e44928bf49859a7d89aab619d
1 /*
2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 /*
7 * Functions for dealing with Multiboot memory map.
8 * This file overrides basic MemHeader creation functions in rom/kernel,
9 * because if you have a memory map you don't need them.
10 * This code builds a fully-functional set of MemHeaders and MemChunks
11 * based on memory map contents and physical breakout described in the array
12 * of MemRegion structures.
15 #include <aros/macros.h>
16 #include <aros/multiboot.h>
17 #include <exec/lists.h>
18 #include <exec/memory.h>
19 #include <exec/memheaderext.h>
21 #include "kernel_base.h"
22 #include "kernel_debug.h"
23 #include "kernel_mmap.h"
25 #include "memory.h"
27 #define D(x) x
30 * Append a single chunk to a MemHeader.
31 * If MemHeader address is not set, a MemHeader will be created in this chunk
32 * with the parameters specified in MemRegion structure.
33 * Returns the last MemChunk in the chain, for linking.
35 static struct MemChunk *krnAddMemChunk(struct MemHeader **mhPtr, struct MemChunk *prev, IPTR start, IPTR end,
36 IPTR mh_Start, const struct MemRegion *reg)
38 struct MemChunk *mc;
40 if (*mhPtr == NULL)
42 /* Align start address - who knows... */
43 start = AROS_ROUNDUP2(start, sizeof(IPTR));
45 /* Ignore the chunk if it's too small to place the MemHeader there */
46 if (start > end)
47 return NULL;
48 if (end - start < sizeof(struct MemHeader))
49 return NULL;
51 /* Create MemHeader if it is not there yet */
52 *mhPtr = (struct MemHeader *)start;
53 start += sizeof(struct MemHeader);
55 (*mhPtr)->mh_Node.ln_Name = reg->name;
56 (*mhPtr)->mh_Node.ln_Type = NT_MEMORY;
57 (*mhPtr)->mh_Node.ln_Pri = reg->pri;
58 (*mhPtr)->mh_Attributes = reg->flags;
59 (*mhPtr)->mh_Lower = (APTR)mh_Start;
60 (*mhPtr)->mh_First = NULL; /* We don't actually have any single MemChunk yet */
61 (*mhPtr)->mh_Free = 0;
63 /* The next MemChunk will be linked to our MemHeader */
64 prev = (struct MemChunk *)&(*mhPtr)->mh_First;
67 (*mhPtr)->mh_Upper = (APTR)end;
69 /* MemChunk must start and end on aligned addresses */
70 start = AROS_ROUNDUP2(start, MEMCHUNK_TOTAL);
71 end = AROS_ROUNDDOWN2(end, MEMCHUNK_TOTAL);
73 /* If there is not enough space, skip this chunk */
74 if (start > end)
75 return prev;
76 if (end - start < MEMCHUNK_TOTAL)
77 return prev;
79 mc = (struct MemChunk *)start;
80 mc->mc_Next = NULL;
81 mc->mc_Bytes = end - start;
83 /* Append this chunk to a MemHeader */
84 prev->mc_Next = mc;
85 (*mhPtr)->mh_Free += mc->mc_Bytes;
87 return mc;
91 * Build conventional memory lists out of multiboot memory map structure.
92 * Will add all MemHeaders to the specified list in the same order they
93 * were created, not in the priority one.
94 * Memory breakup is specified by an array of MemRegion structures.
96 * The algorithm is the following:
97 * 1. Traverse MemRegion array. For each region repeat all of the following:
98 * 2. Set starting address (cur_start) to the beginning of the region.
99 * 3. Traverse the entire memory map, locating the lowest fitting chunk.
100 * 4. If we have found a chunk in (3), we add it to the memory list.
101 * 5. If there's a gap between this chunk and the previously added one, we also start a new MemHeader.
102 * 6, Set cur_start to the end of this repeat the process from step (3).
104 * This effectively sorts memory map entries in ascending order and merges adjacent chunks into single MemHeaders.
106 void mmap_InitMemory(struct mb_mmap *mmap_addr, unsigned long mmap_len, struct MinList *memList,
107 IPTR klo, IPTR khi, IPTR reserve, const struct MemRegion *reg, ULONG allocator)
109 struct MemHeader *mh;
111 while (reg->name)
113 struct MemChunk *mc = NULL;
114 IPTR phys_start = ~0;
115 IPTR cur_start = reg->start;
116 IPTR chunk_start;
117 IPTR chunk_end;
118 unsigned int chunk_type;
120 mh = NULL;
122 D(nbug("[Kernel:MMAP] Processing region 0x%p - 0x%p (%s)...\n", reg->start, reg->end, reg->name));
126 struct mb_mmap *mmap = mmap_addr;
127 unsigned long len = mmap_len;
129 chunk_start = ~0;
130 chunk_end = 0;
131 chunk_type = 0;
133 while (len >= sizeof(struct mb_mmap))
135 IPTR start = mmap->addr;
136 IPTR end = 0;
138 #ifdef __i386__
139 /* We are on i386, ignore high memory */
140 if (mmap->addr_high)
142 /* Go to the next chunk */
143 len -= mmap->size + 4;
144 mmap = (struct mb_mmap *)(mmap->size + (IPTR)mmap + 4);
146 continue;
149 if (mmap->len_high)
150 end = 0x80000000;
151 else
152 #endif
153 end = mmap->addr + mmap->len;
155 if ((cur_start < end) && (reg->end > start))
157 if (cur_start > start)
158 start = cur_start;
159 if (reg->end < end)
160 end = reg->end;
162 if (start < chunk_start)
164 chunk_start = start;
165 chunk_end = end;
166 chunk_type = mmap->type;
168 if (chunk_start == cur_start)
171 * Terminate search early if the found chunk is in the beginning of the region
172 * to consider. There will be no better match.
174 break;
179 /* Go to the next chunk */
180 len -= mmap->size + 4;
181 mmap = (struct mb_mmap *)(mmap->size + (IPTR)mmap + 4);
184 if (chunk_end)
186 /* Have a chunk to add. Either reserved or free. */
188 if (mh && (chunk_start > cur_start))
191 * There is a physical gap in the memory. Add current MemHeader to the list and reset pointers
192 * in order to begin a new one.
194 D(nbug("[Kernel:MMAP] Physical gap 0x%p - 0x%p\n", cur_start, chunk_start));
195 D(nbug("[Kernel:MMAP] * mh @ 0x%p, region 0x%p - 0x%p\n", mh, mh->mh_Lower, mh->mh_Upper));
197 if (allocator == ALLOCATOR_TLSF)
198 mh = krnConvertMemHeaderToTLSF(mh);
200 ADDTAIL(memList, mh);
201 mh = NULL;
202 phys_start = ~0;
205 if (phys_start == ~0)
206 phys_start = chunk_start;
208 if (chunk_type == MMAP_TYPE_RAM)
210 /* Take reserved space into account */
211 if (reserve > chunk_start)
212 chunk_start = reserve;
214 D(nbug("[Kernel:MMAP] Usable chunk 0x%p - 0x%p\n", chunk_start, chunk_end));
217 * Now let's add the chunk. However, this is the right place to remember about klo and khi.
218 * Area occupied by kickstart must appear to be preallocated. This way our chunk can be
219 * split into up to three chunks, one of which will be occupied by the KS.
221 if ((klo >= chunk_end) || (khi <= chunk_start))
223 /* If the kickstart is placed outside of this region, just add it as it is */
224 mc = krnAddMemChunk(&mh, mc, chunk_start, chunk_end, phys_start, reg);
226 else
228 /* Have some usable space above the kickstart ? */
229 if (klo > chunk_start)
230 mc = krnAddMemChunk(&mh, mc, chunk_start, klo, phys_start, reg);
232 /* Have some usable space below the kickstart ? */
233 if (khi < chunk_end)
234 mc = krnAddMemChunk(&mh, mc, khi, chunk_end, phys_start, reg);
237 else if (mh)
239 /* Just expand physical MemHeader area, but do not add the chunk as free */
240 D(nbug("[Kernel:MMAP] Reserved chunk 0x%p - 0x%p\n", chunk_start, chunk_end));
242 mh->mh_Upper = (APTR)chunk_end;
245 if (chunk_end == reg->end)
247 /* Terminate early if we have reached the end of region */
248 break;
251 cur_start = chunk_end;
254 } while (chunk_end);
256 /* Add the last MemHeader if exists */
257 if (mh)
259 D(nbug("[Kernel:MMAP] * mh @ 0x%p, region 0x%p - 0x%p\n", mh, mh->mh_Lower, mh->mh_Upper));
260 if (allocator == ALLOCATOR_TLSF)
261 mh = krnConvertMemHeaderToTLSF(mh);
263 ADDTAIL(memList, mh);
266 reg++;
270 struct mb_mmap *mmap_FindRegion(IPTR addr, struct mb_mmap *mmap, unsigned long len)
272 while (len >= sizeof(struct mb_mmap))
274 IPTR end;
276 #ifdef __i386__
277 /* We are on i386, ignore high memory */
278 if (mmap->addr_high)
279 return NULL;
281 if (mmap->len_high)
282 end = 0x80000000;
283 else
284 #endif
285 end = mmap->addr + mmap->len;
287 /* Returh chunk pointer if matches */
288 if ((addr >= mmap->addr) && (addr < end))
289 return mmap;
291 /* Go to the next chunk */
292 len -= mmap->size + 4;
293 mmap = (struct mb_mmap *)(mmap->size + (IPTR)mmap + 4);
295 return NULL;
298 /* Validate the specified region via memory map */
299 BOOL mmap_ValidateRegion(unsigned long addr, unsigned long len, struct mb_mmap *mmap, unsigned long mmap_len)
301 /* Locate a memory region */
302 struct mb_mmap *region = mmap_FindRegion(addr, mmap, mmap_len);
304 /* If it exists, and free for usage... */
305 if (region && region->type == MMAP_TYPE_RAM)
307 IPTR end = region->addr + region->len;
309 /* Make sure it covers the whole our specified area */
310 if (addr + len < end)
311 return TRUE;
314 return FALSE;
317 IPTR mmap_LargestAddress(struct mb_mmap *mmap, unsigned long len)
319 IPTR top = 0;
321 while(len >= sizeof(struct mb_mmap))
323 if (mmap->type == MMAP_TYPE_RAM)
325 D(bug("type %02x ", mmap->type));
326 if (top < (mmap->addr + mmap->len))
327 top = mmap->addr + mmap->len;
329 D(bug("base %p end %p\n", mmap->addr, mmap->addr + mmap->len - 1));
332 /* Go to the next chunk */
333 len -= mmap->size + 4;
334 mmap = (struct mb_mmap *)(mmap->size + (IPTR)mmap + 4);
337 return top;