btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / vm / VMKernelAddressSpace.cpp
blob9b5c5c20d1f57684571daa015599dcb96e1cd98f
1 /*
2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 #include "VMKernelAddressSpace.h"
13 #include <stdlib.h>
15 #include <KernelExport.h>
17 #include <heap.h>
18 #include <slab/Slab.h>
19 #include <thread.h>
20 #include <vm/vm.h>
21 #include <vm/VMArea.h>
24 //#define TRACE_VM
25 #ifdef TRACE_VM
26 # define TRACE(x...) dprintf(x)
27 #else
28 # define TRACE(x...) ;
29 #endif
32 //#define PARANOIA_CHECKS
33 #ifdef PARANOIA_CHECKS
34 # define PARANOIA_CHECK_STRUCTURES() _CheckStructures()
35 #else
36 # define PARANOIA_CHECK_STRUCTURES() do {} while (false)
37 #endif
40 static int
41 ld(size_t value)
43 int index = -1;
44 while (value > 0) {
45 value >>= 1;
46 index++;
49 return index;
53 /*! Verifies that an area with the given aligned base and size fits into
54 the spot defined by base and limit and checks for overflows.
55 \param base Minimum base address valid for the area.
56 \param alignedBase The base address of the range to check.
57 \param size The size of the area.
58 \param limit The last (inclusive) addresss of the range to check.
60 static inline bool
61 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
63 return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
64 && alignedBase + (size - 1) <= limit);
68 // #pragma mark - VMKernelAddressSpace
71 VMKernelAddressSpace::VMKernelAddressSpace(team_id id, addr_t base, size_t size)
73 VMAddressSpace(id, base, size, "kernel address space"),
74 fAreaObjectCache(NULL),
75 fRangesObjectCache(NULL)
80 VMKernelAddressSpace::~VMKernelAddressSpace()
82 panic("deleting the kernel aspace!\n");
86 status_t
87 VMKernelAddressSpace::InitObject()
89 fAreaObjectCache = create_object_cache("kernel areas",
90 sizeof(VMKernelArea), 0, NULL, NULL, NULL);
91 if (fAreaObjectCache == NULL)
92 return B_NO_MEMORY;
94 fRangesObjectCache = create_object_cache("kernel address ranges",
95 sizeof(Range), 0, NULL, NULL, NULL);
96 if (fRangesObjectCache == NULL)
97 return B_NO_MEMORY;
99 // create the free lists
100 size_t size = fEndAddress - fBase + 1;
101 fFreeListCount = ld(size) - PAGE_SHIFT + 1;
102 fFreeLists = new(std::nothrow) RangeFreeList[fFreeListCount];
103 if (fFreeLists == NULL)
104 return B_NO_MEMORY;
106 Range* range = new(fRangesObjectCache, 0) Range(fBase, size,
107 Range::RANGE_FREE);
108 if (range == NULL)
109 return B_NO_MEMORY;
111 _InsertRange(range);
113 TRACE("VMKernelAddressSpace::InitObject(): address range: %#" B_PRIxADDR
114 " - %#" B_PRIxADDR ", free lists: %d\n", fBase, fEndAddress,
115 fFreeListCount);
117 return B_OK;
121 inline VMArea*
122 VMKernelAddressSpace::FirstArea() const
124 Range* range = fRangeList.Head();
125 while (range != NULL && range->type != Range::RANGE_AREA)
126 range = fRangeList.GetNext(range);
127 return range != NULL ? range->area : NULL;
131 inline VMArea*
132 VMKernelAddressSpace::NextArea(VMArea* _area) const
134 Range* range = static_cast<VMKernelArea*>(_area)->Range();
135 do {
136 range = fRangeList.GetNext(range);
137 } while (range != NULL && range->type != Range::RANGE_AREA);
138 return range != NULL ? range->area : NULL;
142 VMArea*
143 VMKernelAddressSpace::CreateArea(const char* name, uint32 wiring,
144 uint32 protection, uint32 allocationFlags)
146 return VMKernelArea::Create(this, name, wiring, protection,
147 fAreaObjectCache, allocationFlags);
151 void
152 VMKernelAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
154 TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", _area);
156 VMKernelArea* area = static_cast<VMKernelArea*>(_area);
157 object_cache_delete(fAreaObjectCache, area);
161 //! You must hold the address space's read lock.
162 VMArea*
163 VMKernelAddressSpace::LookupArea(addr_t address) const
165 Range* range = fRangeTree.FindClosest(address, true);
166 if (range == NULL || range->type != Range::RANGE_AREA)
167 return NULL;
169 VMKernelArea* area = range->area;
170 return area->ContainsAddress(address) ? area : NULL;
174 /*! This inserts the area you pass into the address space.
175 It will also set the "_address" argument to its base address when
176 the call succeeds.
177 You need to hold the VMAddressSpace write lock.
179 status_t
180 VMKernelAddressSpace::InsertArea(VMArea* _area, size_t size,
181 const virtual_address_restrictions* addressRestrictions,
182 uint32 allocationFlags, void** _address)
184 TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32 ", %#" B_PRIxSIZE
185 ", %p \"%s\")\n", addressRestrictions->address,
186 addressRestrictions->address_specification, size, _area, _area->name);
188 VMKernelArea* area = static_cast<VMKernelArea*>(_area);
190 Range* range;
191 status_t error = _AllocateRange(addressRestrictions, size,
192 addressRestrictions->address_specification == B_EXACT_ADDRESS,
193 allocationFlags, range);
194 if (error != B_OK)
195 return error;
197 range->type = Range::RANGE_AREA;
198 range->area = area;
199 area->SetRange(range);
200 area->SetBase(range->base);
201 area->SetSize(range->size);
203 if (_address != NULL)
204 *_address = (void*)area->Base();
205 fFreeSpace -= area->Size();
207 PARANOIA_CHECK_STRUCTURES();
209 return B_OK;
213 //! You must hold the address space's write lock.
214 void
215 VMKernelAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
217 TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area);
219 VMKernelArea* area = static_cast<VMKernelArea*>(_area);
221 _FreeRange(area->Range(), allocationFlags);
223 fFreeSpace += area->Size();
225 PARANOIA_CHECK_STRUCTURES();
229 bool
230 VMKernelAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
232 Range* range = static_cast<VMKernelArea*>(area)->Range();
234 if (newSize <= range->size)
235 return true;
237 Range* nextRange = fRangeList.GetNext(range);
238 if (nextRange == NULL || nextRange->type == Range::RANGE_AREA)
239 return false;
241 if (nextRange->type == Range::RANGE_RESERVED
242 && nextRange->reserved.base > range->base) {
243 return false;
246 // TODO: If there is free space after a reserved range (or vice versa), it
247 // could be used as well.
248 return newSize - range->size <= nextRange->size;
252 status_t
253 VMKernelAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
254 uint32 allocationFlags)
256 TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE ")\n", _area,
257 newSize);
259 VMKernelArea* area = static_cast<VMKernelArea*>(_area);
260 Range* range = area->Range();
262 if (newSize == range->size)
263 return B_OK;
265 Range* nextRange = fRangeList.GetNext(range);
267 if (newSize < range->size) {
268 if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
269 // a free range is following -- just enlarge it
270 _FreeListRemoveRange(nextRange, nextRange->size);
271 nextRange->size += range->size - newSize;
272 nextRange->base = range->base + newSize;
273 _FreeListInsertRange(nextRange, nextRange->size);
274 } else {
275 // no free range following -- we need to allocate a new one and
276 // insert it
277 nextRange = new(fRangesObjectCache, allocationFlags) Range(
278 range->base + newSize, range->size - newSize,
279 Range::RANGE_FREE);
280 if (nextRange == NULL)
281 return B_NO_MEMORY;
282 _InsertRange(nextRange);
284 } else {
285 if (nextRange == NULL
286 || (nextRange->type == Range::RANGE_RESERVED
287 && nextRange->reserved.base > range->base)) {
288 return B_BAD_VALUE;
290 // TODO: If there is free space after a reserved range (or vice versa),
291 // it could be used as well.
292 size_t sizeDiff = newSize - range->size;
293 if (sizeDiff > nextRange->size)
294 return B_BAD_VALUE;
296 if (sizeDiff == nextRange->size) {
297 // The next range is completely covered -- remove and delete it.
298 _RemoveRange(nextRange);
299 object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
300 } else {
301 // The next range is only partially covered -- shrink it.
302 if (nextRange->type == Range::RANGE_FREE)
303 _FreeListRemoveRange(nextRange, nextRange->size);
304 nextRange->size -= sizeDiff;
305 nextRange->base = range->base + newSize;
306 if (nextRange->type == Range::RANGE_FREE)
307 _FreeListInsertRange(nextRange, nextRange->size);
311 range->size = newSize;
312 area->SetSize(newSize);
314 IncrementChangeCount();
315 PARANOIA_CHECK_STRUCTURES();
316 return B_OK;
320 status_t
321 VMKernelAddressSpace::ShrinkAreaHead(VMArea* _area, size_t newSize,
322 uint32 allocationFlags)
324 TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE ")\n", _area,
325 newSize);
327 VMKernelArea* area = static_cast<VMKernelArea*>(_area);
328 Range* range = area->Range();
330 if (newSize == range->size)
331 return B_OK;
333 if (newSize > range->size)
334 return B_BAD_VALUE;
336 Range* previousRange = fRangeList.GetPrevious(range);
338 size_t sizeDiff = range->size - newSize;
339 if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
340 // the previous range is free -- just enlarge it
341 _FreeListRemoveRange(previousRange, previousRange->size);
342 previousRange->size += sizeDiff;
343 _FreeListInsertRange(previousRange, previousRange->size);
344 range->base += sizeDiff;
345 range->size = newSize;
346 } else {
347 // no free range before -- we need to allocate a new one and
348 // insert it
349 previousRange = new(fRangesObjectCache, allocationFlags) Range(
350 range->base, sizeDiff, Range::RANGE_FREE);
351 if (previousRange == NULL)
352 return B_NO_MEMORY;
353 range->base += sizeDiff;
354 range->size = newSize;
355 _InsertRange(previousRange);
358 area->SetBase(range->base);
359 area->SetSize(range->size);
361 IncrementChangeCount();
362 PARANOIA_CHECK_STRUCTURES();
363 return B_OK;
367 status_t
368 VMKernelAddressSpace::ShrinkAreaTail(VMArea* area, size_t newSize,
369 uint32 allocationFlags)
371 return ResizeArea(area, newSize, allocationFlags);
375 status_t
376 VMKernelAddressSpace::ReserveAddressRange(size_t size,
377 const virtual_address_restrictions* addressRestrictions,
378 uint32 flags, uint32 allocationFlags, void** _address)
380 TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32 ", %#"
381 B_PRIxSIZE ", %#" B_PRIx32 ")\n", addressRestrictions->address,
382 addressRestrictions->address_specification, size, flags);
384 // Don't allow range reservations, if the address space is about to be
385 // deleted.
386 if (fDeleting)
387 return B_BAD_TEAM_ID;
389 Range* range;
390 status_t error = _AllocateRange(addressRestrictions, size, false,
391 allocationFlags, range);
392 if (error != B_OK)
393 return error;
395 range->type = Range::RANGE_RESERVED;
396 range->reserved.base = range->base;
397 range->reserved.flags = flags;
399 if (_address != NULL)
400 *_address = (void*)range->base;
402 Get();
403 PARANOIA_CHECK_STRUCTURES();
404 return B_OK;
408 status_t
409 VMKernelAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
410 uint32 allocationFlags)
412 TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR ", %#"
413 B_PRIxSIZE ")\n", address, size);
415 // Don't allow range unreservations, if the address space is about to be
416 // deleted. UnreserveAllAddressRanges() must be used.
417 if (fDeleting)
418 return B_BAD_TEAM_ID;
420 // search range list and remove any matching reserved ranges
421 addr_t endAddress = address + (size - 1);
422 Range* range = fRangeTree.FindClosest(address, false);
423 while (range != NULL && range->base + (range->size - 1) <= endAddress) {
424 // Get the next range for the iteration -- we need to skip free ranges,
425 // since _FreeRange() might join them with the current range and delete
426 // them.
427 Range* nextRange = fRangeList.GetNext(range);
428 while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
429 nextRange = fRangeList.GetNext(nextRange);
431 if (range->type == Range::RANGE_RESERVED) {
432 _FreeRange(range, allocationFlags);
433 Put();
436 range = nextRange;
439 PARANOIA_CHECK_STRUCTURES();
440 return B_OK;
444 void
445 VMKernelAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
447 Range* range = fRangeList.Head();
448 while (range != NULL) {
449 // Get the next range for the iteration -- we need to skip free ranges,
450 // since _FreeRange() might join them with the current range and delete
451 // them.
452 Range* nextRange = fRangeList.GetNext(range);
453 while (nextRange != NULL && nextRange->type == Range::RANGE_FREE)
454 nextRange = fRangeList.GetNext(nextRange);
456 if (range->type == Range::RANGE_RESERVED) {
457 _FreeRange(range, allocationFlags);
458 Put();
461 range = nextRange;
464 PARANOIA_CHECK_STRUCTURES();
468 void
469 VMKernelAddressSpace::Dump() const
471 VMAddressSpace::Dump();
473 kprintf("area_list:\n");
475 for (RangeList::ConstIterator it = fRangeList.GetIterator();
476 Range* range = it.Next();) {
477 if (range->type != Range::RANGE_AREA)
478 continue;
480 VMKernelArea* area = range->area;
481 kprintf(" area %" B_PRId32 ": ", area->id);
482 kprintf("base_addr = %#" B_PRIxADDR " ", area->Base());
483 kprintf("size = %#" B_PRIxSIZE " ", area->Size());
484 kprintf("name = '%s' ", area->name);
485 kprintf("protection = %#" B_PRIx32 "\n", area->protection);
490 inline void
491 VMKernelAddressSpace::_FreeListInsertRange(Range* range, size_t size)
493 TRACE(" VMKernelAddressSpace::_FreeListInsertRange(%p (%#" B_PRIxADDR
494 ", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
495 range->size, range->type, size, ld(size) - PAGE_SHIFT);
497 fFreeLists[ld(size) - PAGE_SHIFT].Add(range);
501 inline void
502 VMKernelAddressSpace::_FreeListRemoveRange(Range* range, size_t size)
504 TRACE(" VMKernelAddressSpace::_FreeListRemoveRange(%p (%#" B_PRIxADDR
505 ", %#" B_PRIxSIZE ", %d), %#" B_PRIxSIZE " (%d))\n", range, range->base,
506 range->size, range->type, size, ld(size) - PAGE_SHIFT);
508 fFreeLists[ld(size) - PAGE_SHIFT].Remove(range);
512 void
513 VMKernelAddressSpace::_InsertRange(Range* range)
515 TRACE(" VMKernelAddressSpace::_InsertRange(%p (%#" B_PRIxADDR ", %#"
516 B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
518 // insert at the correct position in the range list
519 Range* insertBeforeRange = fRangeTree.FindClosest(range->base, true);
520 fRangeList.Insert(
521 insertBeforeRange != NULL
522 ? fRangeList.GetNext(insertBeforeRange) : fRangeList.Head(),
523 range);
525 // insert into tree
526 fRangeTree.Insert(range);
528 // insert in the free ranges list, if the range is free
529 if (range->type == Range::RANGE_FREE)
530 _FreeListInsertRange(range, range->size);
534 void
535 VMKernelAddressSpace::_RemoveRange(Range* range)
537 TRACE(" VMKernelAddressSpace::_RemoveRange(%p (%#" B_PRIxADDR ", %#"
538 B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
540 // remove from tree and range list
541 // insert at the correct position in the range list
542 fRangeTree.Remove(range);
543 fRangeList.Remove(range);
545 // if it is a free range, also remove it from the free list
546 if (range->type == Range::RANGE_FREE)
547 _FreeListRemoveRange(range, range->size);
551 status_t
552 VMKernelAddressSpace::_AllocateRange(
553 const virtual_address_restrictions* addressRestrictions,
554 size_t size, bool allowReservedRange, uint32 allocationFlags,
555 Range*& _range)
557 TRACE(" VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
558 B_PRIxSIZE ", addressSpec: %#" B_PRIx32 ", reserved allowed: %d)\n",
559 addressRestrictions->address, size,
560 addressRestrictions->address_specification, allowReservedRange);
562 // prepare size, alignment and the base address for the range search
563 addr_t address = (addr_t)addressRestrictions->address;
564 size = ROUNDUP(size, B_PAGE_SIZE);
565 size_t alignment = addressRestrictions->alignment != 0
566 ? addressRestrictions->alignment : B_PAGE_SIZE;
568 switch (addressRestrictions->address_specification) {
569 case B_EXACT_ADDRESS:
571 if (address % B_PAGE_SIZE != 0)
572 return B_BAD_VALUE;
573 break;
576 case B_BASE_ADDRESS:
577 address = ROUNDUP(address, B_PAGE_SIZE);
578 break;
580 case B_ANY_KERNEL_BLOCK_ADDRESS:
581 // align the memory to the next power of two of the size
582 while (alignment < size)
583 alignment <<= 1;
585 // fall through...
587 case B_ANY_ADDRESS:
588 case B_ANY_KERNEL_ADDRESS:
589 address = fBase;
590 // TODO: remove this again when vm86 mode is moved into the kernel
591 // completely (currently needs a userland address space!)
592 if (address == USER_BASE)
593 address = USER_BASE_ANY;
594 break;
596 default:
597 return B_BAD_VALUE;
600 // find a range
601 Range* range = _FindFreeRange(address, size, alignment,
602 addressRestrictions->address_specification, allowReservedRange,
603 address);
604 if (range == NULL) {
605 return addressRestrictions->address_specification == B_EXACT_ADDRESS
606 ? B_BAD_VALUE : B_NO_MEMORY;
609 TRACE(" VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
610 B_PRIxADDR ", %#" B_PRIxSIZE ", %d)\n", range, range->base, range->size,
611 range->type);
613 // We have found a range. It might not be a perfect fit, in which case
614 // we have to split the range.
615 size_t rangeSize = range->size;
617 if (address == range->base) {
618 // allocation at the beginning of the range
619 if (range->size > size) {
620 // only partial -- split the range
621 Range* leftOverRange = new(fRangesObjectCache, allocationFlags)
622 Range(address + size, range->size - size, range);
623 if (leftOverRange == NULL)
624 return B_NO_MEMORY;
626 range->size = size;
627 _InsertRange(leftOverRange);
629 } else if (address + size == range->base + range->size) {
630 // allocation at the end of the range -- split the range
631 Range* leftOverRange = new(fRangesObjectCache, allocationFlags) Range(
632 range->base, range->size - size, range);
633 if (leftOverRange == NULL)
634 return B_NO_MEMORY;
636 range->base = address;
637 range->size = size;
638 _InsertRange(leftOverRange);
639 } else {
640 // allocation in the middle of the range -- split the range in three
641 Range* leftOverRange1 = new(fRangesObjectCache, allocationFlags) Range(
642 range->base, address - range->base, range);
643 if (leftOverRange1 == NULL)
644 return B_NO_MEMORY;
645 Range* leftOverRange2 = new(fRangesObjectCache, allocationFlags) Range(
646 address + size, range->size - size - leftOverRange1->size, range);
647 if (leftOverRange2 == NULL) {
648 object_cache_delete(fRangesObjectCache, leftOverRange1,
649 allocationFlags);
650 return B_NO_MEMORY;
653 range->base = address;
654 range->size = size;
655 _InsertRange(leftOverRange1);
656 _InsertRange(leftOverRange2);
659 // If the range is a free range, remove it from the respective free list.
660 if (range->type == Range::RANGE_FREE)
661 _FreeListRemoveRange(range, rangeSize);
663 IncrementChangeCount();
665 TRACE(" VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR ", %#"
666 B_PRIxSIZE ")\n", range, range->base, range->size);
668 _range = range;
669 return B_OK;
673 VMKernelAddressSpace::Range*
674 VMKernelAddressSpace::_FindFreeRange(addr_t start, size_t size,
675 size_t alignment, uint32 addressSpec, bool allowReservedRange,
676 addr_t& _foundAddress)
678 TRACE(" VMKernelAddressSpace::_FindFreeRange(start: %#" B_PRIxADDR
679 ", size: %#" B_PRIxSIZE ", alignment: %#" B_PRIxSIZE ", addressSpec: %#"
680 B_PRIx32 ", reserved allowed: %d)\n", start, size, alignment,
681 addressSpec, allowReservedRange);
683 switch (addressSpec) {
684 case B_BASE_ADDRESS:
686 // We have to iterate through the range list starting at the given
687 // address. This is the most inefficient case.
688 Range* range = fRangeTree.FindClosest(start, true);
689 while (range != NULL) {
690 if (range->type == Range::RANGE_FREE) {
691 addr_t alignedBase = ROUNDUP(range->base, alignment);
692 if (is_valid_spot(start, alignedBase, size,
693 range->base + (range->size - 1))) {
694 _foundAddress = alignedBase;
695 return range;
698 range = fRangeList.GetNext(range);
701 // We didn't find a free spot in the requested range, so we'll
702 // try again without any restrictions.
703 start = fBase;
704 addressSpec = B_ANY_ADDRESS;
705 // fall through...
708 case B_ANY_ADDRESS:
709 case B_ANY_KERNEL_ADDRESS:
710 case B_ANY_KERNEL_BLOCK_ADDRESS:
712 // We want to allocate from the first non-empty free list that is
713 // guaranteed to contain the size. Finding a free range is O(1),
714 // unless there are constraints (min base address, alignment).
715 int freeListIndex = ld((size * 2 - 1) >> PAGE_SHIFT);
717 for (int32 i = freeListIndex; i < fFreeListCount; i++) {
718 RangeFreeList& freeList = fFreeLists[i];
719 if (freeList.IsEmpty())
720 continue;
722 for (RangeFreeList::Iterator it = freeList.GetIterator();
723 Range* range = it.Next();) {
724 addr_t alignedBase = ROUNDUP(range->base, alignment);
725 if (is_valid_spot(start, alignedBase, size,
726 range->base + (range->size - 1))) {
727 _foundAddress = alignedBase;
728 return range;
733 if (!allowReservedRange)
734 return NULL;
736 // We haven't found any free ranges, but we're supposed to look
737 // for reserved ones, too. Iterate through the range list starting
738 // at the given address.
739 Range* range = fRangeTree.FindClosest(start, true);
740 while (range != NULL) {
741 if (range->type == Range::RANGE_RESERVED) {
742 addr_t alignedBase = ROUNDUP(range->base, alignment);
743 if (is_valid_spot(start, alignedBase, size,
744 range->base + (range->size - 1))) {
745 // allocation from the back might be preferred
746 // -- adjust the base accordingly
747 if ((range->reserved.flags & RESERVED_AVOID_BASE)
748 != 0) {
749 alignedBase = ROUNDDOWN(
750 range->base + (range->size - size), alignment);
753 _foundAddress = alignedBase;
754 return range;
757 range = fRangeList.GetNext(range);
760 return NULL;
763 case B_EXACT_ADDRESS:
765 Range* range = fRangeTree.FindClosest(start, true);
766 TRACE(" B_EXACT_ADDRESS: range: %p\n", range);
767 if (range == NULL || range->type == Range::RANGE_AREA
768 || range->base + (range->size - 1) < start + (size - 1)) {
769 // TODO: Support allocating if the area range covers multiple
770 // free and reserved ranges!
771 TRACE(" -> no suitable range\n");
772 return NULL;
775 if (range->type != Range::RANGE_FREE && !allowReservedRange)
777 TRACE(" -> reserved range not allowed\n");
778 return NULL;
781 _foundAddress = start;
782 return range;
785 default:
786 return NULL;
791 void
792 VMKernelAddressSpace::_FreeRange(Range* range, uint32 allocationFlags)
794 TRACE(" VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR ", %#"
795 B_PRIxSIZE ", %d))\n", range, range->base, range->size, range->type);
797 // Check whether one or both of the neighboring ranges are free already,
798 // and join them, if so.
799 Range* previousRange = fRangeList.GetPrevious(range);
800 Range* nextRange = fRangeList.GetNext(range);
802 if (previousRange != NULL && previousRange->type == Range::RANGE_FREE) {
803 if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
804 // join them all -- keep the first one, delete the others
805 _FreeListRemoveRange(previousRange, previousRange->size);
806 _RemoveRange(range);
807 _RemoveRange(nextRange);
808 previousRange->size += range->size + nextRange->size;
809 object_cache_delete(fRangesObjectCache, range, allocationFlags);
810 object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
811 _FreeListInsertRange(previousRange, previousRange->size);
812 } else {
813 // join with the previous range only, delete the supplied one
814 _FreeListRemoveRange(previousRange, previousRange->size);
815 _RemoveRange(range);
816 previousRange->size += range->size;
817 object_cache_delete(fRangesObjectCache, range, allocationFlags);
818 _FreeListInsertRange(previousRange, previousRange->size);
820 } else {
821 if (nextRange != NULL && nextRange->type == Range::RANGE_FREE) {
822 // join with the next range and delete it
823 _RemoveRange(nextRange);
824 range->size += nextRange->size;
825 object_cache_delete(fRangesObjectCache, nextRange, allocationFlags);
828 // mark the range free and add it to the respective free list
829 range->type = Range::RANGE_FREE;
830 _FreeListInsertRange(range, range->size);
833 IncrementChangeCount();
837 #ifdef PARANOIA_CHECKS
839 void
840 VMKernelAddressSpace::_CheckStructures() const
842 // general tree structure check
843 fRangeTree.CheckTree();
845 // check range list and tree
846 size_t spaceSize = fEndAddress - fBase + 1;
847 addr_t nextBase = fBase;
848 Range* previousRange = NULL;
849 int previousRangeType = Range::RANGE_AREA;
850 uint64 freeRanges = 0;
852 RangeList::ConstIterator listIt = fRangeList.GetIterator();
853 RangeTree::ConstIterator treeIt = fRangeTree.GetIterator();
854 while (true) {
855 Range* range = listIt.Next();
856 Range* treeRange = treeIt.Next();
857 if (range != treeRange) {
858 panic("VMKernelAddressSpace::_CheckStructures(): list/tree range "
859 "mismatch: %p vs %p", range, treeRange);
861 if (range == NULL)
862 break;
864 if (range->base != nextBase) {
865 panic("VMKernelAddressSpace::_CheckStructures(): range base %#"
866 B_PRIxADDR ", expected: %#" B_PRIxADDR, range->base, nextBase);
869 if (range->size == 0) {
870 panic("VMKernelAddressSpace::_CheckStructures(): empty range %p",
871 range);
874 if (range->size % B_PAGE_SIZE != 0) {
875 panic("VMKernelAddressSpace::_CheckStructures(): range %p (%#"
876 B_PRIxADDR ", %#" B_PRIxSIZE ") not page aligned", range,
877 range->base, range->size);
880 if (range->size > spaceSize - (range->base - fBase)) {
881 panic("VMKernelAddressSpace::_CheckStructures(): range too large: "
882 "(%#" B_PRIxADDR ", %#" B_PRIxSIZE "), address space end: %#"
883 B_PRIxADDR, range->base, range->size, fEndAddress);
886 if (range->type == Range::RANGE_FREE) {
887 freeRanges++;
889 if (previousRangeType == Range::RANGE_FREE) {
890 panic("VMKernelAddressSpace::_CheckStructures(): adjoining "
891 "free ranges: %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE
892 "), %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ")", previousRange,
893 previousRange->base, previousRange->size, range,
894 range->base, range->size);
898 previousRange = range;
899 nextBase = range->base + range->size;
900 previousRangeType = range->type;
903 if (nextBase - 1 != fEndAddress) {
904 panic("VMKernelAddressSpace::_CheckStructures(): space not fully "
905 "covered by ranges: last: %#" B_PRIxADDR ", expected %#" B_PRIxADDR,
906 nextBase - 1, fEndAddress);
909 // check free lists
910 uint64 freeListRanges = 0;
911 for (int i = 0; i < fFreeListCount; i++) {
912 RangeFreeList& freeList = fFreeLists[i];
913 if (freeList.IsEmpty())
914 continue;
916 for (RangeFreeList::Iterator it = freeList.GetIterator();
917 Range* range = it.Next();) {
918 if (range->type != Range::RANGE_FREE) {
919 panic("VMKernelAddressSpace::_CheckStructures(): non-free "
920 "range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
921 "free list %d", range, range->base, range->size,
922 range->type, i);
925 if (fRangeTree.Find(range->base) != range) {
926 panic("VMKernelAddressSpace::_CheckStructures(): unknown "
927 "range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
928 "free list %d", range, range->base, range->size,
929 range->type, i);
932 if (ld(range->size) - PAGE_SHIFT != i) {
933 panic("VMKernelAddressSpace::_CheckStructures(): "
934 "range %p (%#" B_PRIxADDR ", %#" B_PRIxSIZE ", %d) in "
935 "wrong free list %d", range, range->base, range->size,
936 range->type, i);
939 freeListRanges++;
944 #endif // PARANOIA_CHECKS