2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
7 * Functions for dealing with Multiboot memory map.
8 * This file overrides basic MemHeader creation functions in rom/kernel,
9 * because if you have a memory map you don't need them.
10 * This code builds a fully-functional set of MemHeaders and MemChunks
11 * based on memory map contents and physical breakout described in the array
12 * of MemRegion structures.
15 #include <aros/macros.h>
16 #include <aros/multiboot.h>
17 #include <exec/lists.h>
18 #include <exec/memory.h>
19 #include <exec/memheaderext.h>
21 #include "kernel_base.h"
22 #include "kernel_debug.h"
23 #include "kernel_mmap.h"
30 * Append a single chunk to a MemHeader.
31 * If MemHeader address is not set, a MemHeader will be created in this chunk
32 * with the parameters specified in MemRegion structure.
33 * Returns the last MemChunk in the chain, for linking.
35 static struct MemChunk
*krnAddMemChunk(struct MemHeader
**mhPtr
, struct MemChunk
*prev
, IPTR start
, IPTR end
,
36 IPTR mh_Start
, const struct MemRegion
*reg
)
42 /* Align start address - who knows... */
43 start
= AROS_ROUNDUP2(start
, sizeof(IPTR
));
45 /* Ignore the chunk if it's too small to place the MemHeader there */
48 if (end
- start
< sizeof(struct MemHeader
))
51 /* Create MemHeader if it is not there yet */
52 *mhPtr
= (struct MemHeader
*)start
;
53 start
+= sizeof(struct MemHeader
);
55 (*mhPtr
)->mh_Node
.ln_Name
= reg
->name
;
56 (*mhPtr
)->mh_Node
.ln_Type
= NT_MEMORY
;
57 (*mhPtr
)->mh_Node
.ln_Pri
= reg
->pri
;
58 (*mhPtr
)->mh_Attributes
= reg
->flags
;
59 (*mhPtr
)->mh_Lower
= (APTR
)mh_Start
;
60 (*mhPtr
)->mh_First
= NULL
; /* We don't actually have any single MemChunk yet */
61 (*mhPtr
)->mh_Free
= 0;
63 /* The next MemChunk will be linked to our MemHeader */
64 prev
= (struct MemChunk
*)&(*mhPtr
)->mh_First
;
67 (*mhPtr
)->mh_Upper
= (APTR
)end
;
69 /* MemChunk must start and end on aligned addresses */
70 start
= AROS_ROUNDUP2(start
, MEMCHUNK_TOTAL
);
71 end
= AROS_ROUNDDOWN2(end
, MEMCHUNK_TOTAL
);
73 /* If there is not enough space, skip this chunk */
76 if (end
- start
< MEMCHUNK_TOTAL
)
79 mc
= (struct MemChunk
*)start
;
81 mc
->mc_Bytes
= end
- start
;
83 /* Append this chunk to a MemHeader */
85 (*mhPtr
)->mh_Free
+= mc
->mc_Bytes
;
91 * Build conventional memory lists out of multiboot memory map structure.
92 * Will add all MemHeaders to the specified list in the same order they
93 * were created, not in the priority one.
94 * Memory breakup is specified by an array of MemRegion structures.
96 * The algorithm is the following:
97 * 1. Traverse MemRegion array. For each region repeat all of the following:
98 * 2. Set starting address (cur_start) to the beginning of the region.
99 * 3. Traverse the entire memory map, locating the lowest fitting chunk.
100 * 4. If we have found a chunk in (3), we add it to the memory list.
101 * 5. If there's a gap between this chunk and the previously added one, we also start a new MemHeader.
102 * 6, Set cur_start to the end of this repeat the process from step (3).
104 * This effectively sorts memory map entries in ascending order and merges adjacent chunks into single MemHeaders.
106 void mmap_InitMemory(struct mb_mmap
*mmap_addr
, unsigned long mmap_len
, struct MinList
*memList
,
107 IPTR klo
, IPTR khi
, IPTR reserve
, const struct MemRegion
*reg
, ULONG allocator
)
109 struct MemHeader
*mh
;
113 struct MemChunk
*mc
= NULL
;
114 IPTR phys_start
= ~0;
115 IPTR cur_start
= reg
->start
;
118 unsigned int chunk_type
;
122 D(nbug("[Kernel:MMAP] Processing region 0x%p - 0x%p (%s)...\n", reg
->start
, reg
->end
, reg
->name
));
126 struct mb_mmap
*mmap
= mmap_addr
;
127 unsigned long len
= mmap_len
;
133 while (len
>= sizeof(struct mb_mmap
))
135 IPTR start
= mmap
->addr
;
139 /* We are on i386, ignore high memory */
142 /* Go to the next chunk */
143 len
-= mmap
->size
+ 4;
144 mmap
= (struct mb_mmap
*)(mmap
->size
+ (IPTR
)mmap
+ 4);
153 end
= mmap
->addr
+ mmap
->len
;
155 if ((cur_start
< end
) && (reg
->end
> start
))
157 if (cur_start
> start
)
162 if (start
< chunk_start
)
166 chunk_type
= mmap
->type
;
168 if (chunk_start
== cur_start
)
171 * Terminate search early if the found chunk is in the beginning of the region
172 * to consider. There will be no better match.
179 /* Go to the next chunk */
180 len
-= mmap
->size
+ 4;
181 mmap
= (struct mb_mmap
*)(mmap
->size
+ (IPTR
)mmap
+ 4);
186 /* Have a chunk to add. Either reserved or free. */
188 if (mh
&& (chunk_start
> cur_start
))
191 * There is a physical gap in the memory. Add current MemHeader to the list and reset pointers
192 * in order to begin a new one.
194 D(nbug("[Kernel:MMAP] Physical gap 0x%p - 0x%p\n", cur_start
, chunk_start
));
195 D(nbug("[Kernel:MMAP] * mh @ 0x%p, region 0x%p - 0x%p\n", mh
, mh
->mh_Lower
, mh
->mh_Upper
));
197 if (allocator
== ALLOCATOR_TLSF
)
198 mh
= krnConvertMemHeaderToTLSF(mh
);
200 ADDTAIL(memList
, mh
);
205 if (phys_start
== ~0)
206 phys_start
= chunk_start
;
208 if (chunk_type
== MMAP_TYPE_RAM
)
210 /* Take reserved space into account */
211 if (reserve
> chunk_start
)
212 chunk_start
= reserve
;
214 D(nbug("[Kernel:MMAP] Usable chunk 0x%p - 0x%p\n", chunk_start
, chunk_end
));
217 * Now let's add the chunk. However, this is the right place to remember about klo and khi.
218 * Area occupied by kickstart must appear to be preallocated. This way our chunk can be
219 * split into up to three chunks, one of which will be occupied by the KS.
221 if ((klo
>= chunk_end
) || (khi
<= chunk_start
))
223 /* If the kickstart is placed outside of this region, just add it as it is */
224 mc
= krnAddMemChunk(&mh
, mc
, chunk_start
, chunk_end
, phys_start
, reg
);
228 /* Have some usable space above the kickstart ? */
229 if (klo
> chunk_start
)
230 mc
= krnAddMemChunk(&mh
, mc
, chunk_start
, klo
, phys_start
, reg
);
232 /* Have some usable space below the kickstart ? */
234 mc
= krnAddMemChunk(&mh
, mc
, khi
, chunk_end
, phys_start
, reg
);
239 /* Just expand physical MemHeader area, but do not add the chunk as free */
240 D(nbug("[Kernel:MMAP] Reserved chunk 0x%p - 0x%p\n", chunk_start
, chunk_end
));
242 mh
->mh_Upper
= (APTR
)chunk_end
;
245 if (chunk_end
== reg
->end
)
247 /* Terminate early if we have reached the end of region */
251 cur_start
= chunk_end
;
256 /* Add the last MemHeader if exists */
259 D(nbug("[Kernel:MMAP] * mh @ 0x%p, region 0x%p - 0x%p\n", mh
, mh
->mh_Lower
, mh
->mh_Upper
));
260 if (allocator
== ALLOCATOR_TLSF
)
261 mh
= krnConvertMemHeaderToTLSF(mh
);
263 ADDTAIL(memList
, mh
);
270 struct mb_mmap
*mmap_FindRegion(IPTR addr
, struct mb_mmap
*mmap
, unsigned long len
)
272 while (len
>= sizeof(struct mb_mmap
))
277 /* We are on i386, ignore high memory */
285 end
= mmap
->addr
+ mmap
->len
;
287 /* Returh chunk pointer if matches */
288 if ((addr
>= mmap
->addr
) && (addr
< end
))
291 /* Go to the next chunk */
292 len
-= mmap
->size
+ 4;
293 mmap
= (struct mb_mmap
*)(mmap
->size
+ (IPTR
)mmap
+ 4);
298 /* Validate the specified region via memory map */
299 BOOL
mmap_ValidateRegion(unsigned long addr
, unsigned long len
, struct mb_mmap
*mmap
, unsigned long mmap_len
)
301 /* Locate a memory region */
302 struct mb_mmap
*region
= mmap_FindRegion(addr
, mmap
, mmap_len
);
304 /* If it exists, and free for usage... */
305 if (region
&& region
->type
== MMAP_TYPE_RAM
)
307 IPTR end
= region
->addr
+ region
->len
;
309 /* Make sure it covers the whole our specified area */
310 if (addr
+ len
< end
)
317 IPTR
mmap_LargestAddress(struct mb_mmap
*mmap
, unsigned long len
)
321 while(len
>= sizeof(struct mb_mmap
))
323 if (mmap
->type
== MMAP_TYPE_RAM
)
325 D(bug("type %02x ", mmap
->type
));
326 if (top
< (mmap
->addr
+ mmap
->len
))
327 top
= mmap
->addr
+ mmap
->len
;
329 D(bug("base %p end %p\n", mmap
->addr
, mmap
->addr
+ mmap
->len
- 1));
332 /* Go to the next chunk */
333 len
-= mmap
->size
+ 4;
334 mmap
= (struct mb_mmap
*)(mmap
->size
+ (IPTR
)mmap
+ 4);