2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
15 #include <boot/stage2.h>
16 #include <boot/platform.h>
19 static const size_t kChunkSize
= 16 * B_PAGE_SIZE
;
21 kernel_args gKernelArgs
;
24 static void* sFirstFree
;
26 static size_t sFree
= kChunkSize
;
30 add_kernel_args_range(void* start
, size_t size
)
32 return insert_address_range(gKernelArgs
.kernel_args_range
,
33 &gKernelArgs
.num_kernel_args_ranges
, MAX_KERNEL_ARGS_RANGE
,
38 // #pragma mark - addr_range utility functions
42 remove_range_index(addr_range
* ranges
, uint32
& numRanges
, uint32 index
)
44 if (index
+ 1 == numRanges
) {
50 memmove(&ranges
[index
], &ranges
[index
+ 1],
51 sizeof(addr_range
) * (numRanges
- 1 - index
));
56 /*! Inserts the specified (start, size) pair (aka range) in the
58 It will extend existing ranges in order to have as little
59 ranges in the array as possible.
60 Returns B_OK on success, or B_ENTRY_NOT_FOUND if there was
61 no free array entry available anymore.
64 insert_address_range(addr_range
* ranges
, uint32
* _numRanges
, uint32 maxRanges
,
65 uint64 start
, uint64 size
)
67 uint32 numRanges
= *_numRanges
;
69 start
= ROUNDDOWN(start
, B_PAGE_SIZE
);
70 size
= ROUNDUP(size
, B_PAGE_SIZE
);
71 uint64 end
= start
+ size
;
73 for (uint32 i
= 0; i
< numRanges
; i
++) {
74 uint64 rangeStart
= ranges
[i
].start
;
75 uint64 rangeEnd
= rangeStart
+ ranges
[i
].size
;
77 if (end
< rangeStart
|| start
> rangeEnd
) {
78 // ranges don't intersect or touch each other
81 if (start
>= rangeStart
&& end
<= rangeEnd
) {
82 // range is already completely covered
86 if (start
< rangeStart
) {
87 // prepend to the existing range
88 ranges
[i
].start
= start
;
89 ranges
[i
].size
+= rangeStart
- start
;
91 if (end
> ranges
[i
].start
+ ranges
[i
].size
) {
92 // append to the existing range
93 ranges
[i
].size
= end
- ranges
[i
].start
;
96 // join ranges if possible
98 for (uint32 j
= 0; j
< numRanges
; j
++) {
102 rangeStart
= ranges
[i
].start
;
103 rangeEnd
= rangeStart
+ ranges
[i
].size
;
104 uint64 joinStart
= ranges
[j
].start
;
105 uint64 joinEnd
= joinStart
+ ranges
[j
].size
;
107 if (rangeStart
<= joinEnd
&& joinEnd
<= rangeEnd
) {
108 // join range that used to be before the current one, or
109 // the one that's now entirely included by the current one
110 if (joinStart
< rangeStart
) {
111 ranges
[i
].size
+= rangeStart
- joinStart
;
112 ranges
[i
].start
= joinStart
;
115 remove_range_index(ranges
, numRanges
, j
--);
116 } else if (joinStart
<= rangeEnd
&& joinEnd
> rangeEnd
) {
117 // join range that used to be after the current one
118 ranges
[i
].size
+= joinEnd
- rangeEnd
;
120 remove_range_index(ranges
, numRanges
, j
--);
124 *_numRanges
= numRanges
;
128 // no range matched, we need to create a new one
130 if (numRanges
>= maxRanges
)
131 return B_ENTRY_NOT_FOUND
;
133 ranges
[numRanges
].start
= (uint64
)start
;
134 ranges
[numRanges
].size
= size
;
142 remove_address_range(addr_range
* ranges
, uint32
* _numRanges
, uint32 maxRanges
,
143 uint64 start
, uint64 size
)
145 uint32 numRanges
= *_numRanges
;
147 uint64 end
= ROUNDUP(start
+ size
, B_PAGE_SIZE
);
148 start
= ROUNDDOWN(start
, B_PAGE_SIZE
);
150 for (uint32 i
= 0; i
< numRanges
; i
++) {
151 uint64 rangeStart
= ranges
[i
].start
;
152 uint64 rangeEnd
= rangeStart
+ ranges
[i
].size
;
154 if (start
<= rangeStart
) {
155 if (end
<= rangeStart
) {
157 } else if (end
>= rangeEnd
) {
158 // remove the complete range
159 remove_range_index(ranges
, numRanges
, i
);
162 // remove the head of the range
163 ranges
[i
].start
= end
;
164 ranges
[i
].size
= rangeEnd
- end
;
166 } else if (end
>= rangeEnd
) {
167 if (start
< rangeEnd
) {
169 ranges
[i
].size
= start
- rangeStart
;
170 } // else: no intersection
172 // rangeStart < start < end < rangeEnd
173 // The ugly case: We have to remove something from the middle of
174 // the range. We keep the head of the range and insert its tail
176 ranges
[i
].size
= start
- rangeStart
;
177 return insert_address_range(ranges
, _numRanges
, maxRanges
, end
,
182 *_numRanges
= numRanges
;
188 get_free_address_range(addr_range
* ranges
, uint32 numRanges
, uint64 base
,
189 uint64 size
, uint64
* _rangeBase
)
191 uint64 end
= base
+ size
- 1;
195 // Note: We don't assume that the ranges are sorted, so we can't do this
196 // in a simple loop. Instead we restart the loop whenever our range
197 // intersects with an existing one.
199 for (uint32 i
= 0; i
< numRanges
;) {
200 uint64 rangeStart
= ranges
[i
].start
;
201 uint64 rangeEnd
= ranges
[i
].start
+ ranges
[i
].size
- 1;
203 if (base
<= rangeEnd
&& rangeStart
<= end
) {
205 end
= rangeEnd
+ size
;
206 if (base
== 0 || end
< base
)
222 is_address_range_covered(addr_range
* ranges
, uint32 numRanges
, uint64 base
,
225 // Note: We don't assume that the ranges are sorted, so we can't do this
226 // in a simple loop. Instead we restart the loop whenever the start of the
227 // given range intersects with an existing one.
229 for (uint32 i
= 0; i
< numRanges
;) {
230 uint64 rangeStart
= ranges
[i
].start
;
231 uint64 rangeSize
= ranges
[i
].size
;
233 if (rangeStart
<= base
&& rangeSize
> base
- rangeStart
) {
234 uint64 intersect
= std::min(rangeStart
+ rangeSize
- base
, size
);
252 sort_address_ranges(addr_range
* ranges
, uint32 numRanges
)
254 // TODO: This is a pretty sucky bubble sort implementation!
259 for (uint32 i
= 1; i
< numRanges
; i
++) {
260 if (ranges
[i
].start
< ranges
[i
- 1].start
) {
262 addr_range tempRange
;
263 memcpy(&tempRange
, &ranges
[i
], sizeof(addr_range
));
264 memcpy(&ranges
[i
], &ranges
[i
- 1], sizeof(addr_range
));
265 memcpy(&ranges
[i
- 1], &tempRange
, sizeof(addr_range
));
272 // #pragma mark - kernel args range functions
276 insert_physical_memory_range(uint64 start
, uint64 size
)
278 return insert_address_range(gKernelArgs
.physical_memory_range
,
279 &gKernelArgs
.num_physical_memory_ranges
, MAX_PHYSICAL_MEMORY_RANGE
,
285 insert_physical_allocated_range(uint64 start
, uint64 size
)
287 return insert_address_range(gKernelArgs
.physical_allocated_range
,
288 &gKernelArgs
.num_physical_allocated_ranges
,
289 MAX_PHYSICAL_ALLOCATED_RANGE
, start
, size
);
294 insert_virtual_allocated_range(uint64 start
, uint64 size
)
296 return insert_address_range(gKernelArgs
.virtual_allocated_range
,
297 &gKernelArgs
.num_virtual_allocated_ranges
, MAX_VIRTUAL_ALLOCATED_RANGE
,
302 #if B_HAIKU_PHYSICAL_BITS > 32
305 ignore_physical_memory_ranges_beyond_4gb()
308 sort_address_ranges(gKernelArgs
.physical_memory_range
,
309 gKernelArgs
.num_physical_memory_ranges
);
311 static const uint64 kLimit
= (uint64
)1 << 32;
313 // remove everything past 4 GB
314 for (uint32 i
= gKernelArgs
.num_physical_memory_ranges
; i
> 0; i
--) {
315 addr_range
& range
= gKernelArgs
.physical_memory_range
[i
- 1];
316 if (range
.start
>= kLimit
) {
317 // the complete range is beyond the limit
318 dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring "
319 "range: %#" B_PRIx64
" - %#" B_PRIx64
"\n", range
.start
,
320 range
.start
+ range
.size
);
321 gKernelArgs
.ignored_physical_memory
+= range
.size
;
322 gKernelArgs
.num_physical_memory_ranges
= i
- 1;
326 if (kLimit
- range
.start
< range
.size
) {
327 // the range is partially beyond the limit
328 dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring "
329 "range: %#" B_PRIx64
" - %#" B_PRIx64
"\n", kLimit
,
330 range
.start
+ range
.size
);
331 gKernelArgs
.ignored_physical_memory
332 += range
.size
- (kLimit
- range
.start
);
339 #endif // B_HAIKU_PHYSICAL_BITS > 32
342 // #pragma mark - kernel_args allocations
345 /*! This function can be used to allocate memory that is going
346 to be passed over to the kernel. For example, the preloaded_image
347 structures are allocated this way.
348 The boot loader heap doesn't make it into the kernel!
351 kernel_args_malloc(size_t size
)
353 //dprintf("kernel_args_malloc(): %ld bytes (%ld bytes left)\n", size, sFree);
355 if (sFirstFree
!= NULL
&& size
<= sFree
) {
356 // there is enough space in the current buffer
357 void* address
= sFirstFree
;
358 sFirstFree
= (void*)((addr_t
)sFirstFree
+ size
);
365 if (size
> kChunkSize
/ 2 && sFree
< size
) {
366 // the block is so large, we'll allocate a new block for it
368 if (platform_allocate_region(&block
, size
, B_READ_AREA
| B_WRITE_AREA
,
373 #ifdef _BOOT_PLATFORM_EFI
374 uint64 translated_block
;
375 platform_bootloader_address_to_kernel_address(block
, &translated_block
);
376 if (add_kernel_args_range((void *)translated_block
, size
) != B_OK
)
378 if (add_kernel_args_range(block
, size
) != B_OK
)
380 panic("kernel_args max range too low!\n");
384 // just allocate a new block and "close" the old one
386 if (platform_allocate_region(&block
, kChunkSize
, B_READ_AREA
| B_WRITE_AREA
,
391 sFirstFree
= (void*)((addr_t
)block
+ size
);
393 sFree
= kChunkSize
- size
;
394 #ifdef _BOOT_PLATFORM_EFI
395 uint64 translated_block
;
396 platform_bootloader_address_to_kernel_address(block
, &translated_block
);
397 if (add_kernel_args_range((void *)translated_block
, kChunkSize
) != B_OK
)
399 if (add_kernel_args_range(block
, kChunkSize
) != B_OK
)
401 panic("kernel_args max range too low!\n");
407 /*! Convenience function that copies strdup() functions for the
411 kernel_args_strdup(const char* string
)
413 if (string
== NULL
|| string
[0] == '\0')
416 size_t length
= strlen(string
) + 1;
418 char* target
= (char*)kernel_args_malloc(length
);
422 memcpy(target
, string
, length
);
427 /*! This function frees a block allocated via kernel_args_malloc().
428 It's very simple; it can only free the last allocation. It's
429 enough for its current usage in the boot loader, though.
432 kernel_args_free(void* block
)
434 if (sLast
!= block
) {
439 sFree
= (addr_t
)sFirstFree
- (addr_t
)sLast
;