2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 #include "VMKernelAddressSpace.h"
15 #include <KernelExport.h>
18 #include <slab/Slab.h>
21 #include <vm/VMArea.h>
26 # define TRACE(x...) dprintf(x)
28 # define TRACE(x...) ;
32 //#define PARANOIA_CHECKS
33 #ifdef PARANOIA_CHECKS
34 # define PARANOIA_CHECK_STRUCTURES() _CheckStructures()
36 # define PARANOIA_CHECK_STRUCTURES() do {} while (false)
53 /*! Verifies that an area with the given aligned base and size fits into
54 the spot defined by base and limit and checks for overflows.
55 \param base Minimum base address valid for the area.
56 \param alignedBase The base address of the range to check.
57 \param size The size of the area.
58 \param limit The last (inclusive) addresss of the range to check.
61 is_valid_spot(addr_t base
, addr_t alignedBase
, addr_t size
, addr_t limit
)
63 return (alignedBase
>= base
&& alignedBase
+ (size
- 1) > alignedBase
64 && alignedBase
+ (size
- 1) <= limit
);
68 // #pragma mark - VMKernelAddressSpace
71 VMKernelAddressSpace::VMKernelAddressSpace(team_id id
, addr_t base
, size_t size
)
73 VMAddressSpace(id
, base
, size
, "kernel address space"),
74 fAreaObjectCache(NULL
),
75 fRangesObjectCache(NULL
)
80 VMKernelAddressSpace::~VMKernelAddressSpace()
82 panic("deleting the kernel aspace!\n");
87 VMKernelAddressSpace::InitObject()
89 fAreaObjectCache
= create_object_cache("kernel areas",
90 sizeof(VMKernelArea
), 0, NULL
, NULL
, NULL
);
91 if (fAreaObjectCache
== NULL
)
94 fRangesObjectCache
= create_object_cache("kernel address ranges",
95 sizeof(Range
), 0, NULL
, NULL
, NULL
);
96 if (fRangesObjectCache
== NULL
)
99 // create the free lists
100 size_t size
= fEndAddress
- fBase
+ 1;
101 fFreeListCount
= ld(size
) - PAGE_SHIFT
+ 1;
102 fFreeLists
= new(std::nothrow
) RangeFreeList
[fFreeListCount
];
103 if (fFreeLists
== NULL
)
106 Range
* range
= new(fRangesObjectCache
, 0) Range(fBase
, size
,
113 TRACE("VMKernelAddressSpace::InitObject(): address range: %#" B_PRIxADDR
114 " - %#" B_PRIxADDR
", free lists: %d\n", fBase
, fEndAddress
,
122 VMKernelAddressSpace::FirstArea() const
124 Range
* range
= fRangeList
.Head();
125 while (range
!= NULL
&& range
->type
!= Range::RANGE_AREA
)
126 range
= fRangeList
.GetNext(range
);
127 return range
!= NULL
? range
->area
: NULL
;
132 VMKernelAddressSpace::NextArea(VMArea
* _area
) const
134 Range
* range
= static_cast<VMKernelArea
*>(_area
)->Range();
136 range
= fRangeList
.GetNext(range
);
137 } while (range
!= NULL
&& range
->type
!= Range::RANGE_AREA
);
138 return range
!= NULL
? range
->area
: NULL
;
143 VMKernelAddressSpace::CreateArea(const char* name
, uint32 wiring
,
144 uint32 protection
, uint32 allocationFlags
)
146 return VMKernelArea::Create(this, name
, wiring
, protection
,
147 fAreaObjectCache
, allocationFlags
);
152 VMKernelAddressSpace::DeleteArea(VMArea
* _area
, uint32 allocationFlags
)
154 TRACE("VMKernelAddressSpace::DeleteArea(%p)\n", _area
);
156 VMKernelArea
* area
= static_cast<VMKernelArea
*>(_area
);
157 object_cache_delete(fAreaObjectCache
, area
);
161 //! You must hold the address space's read lock.
163 VMKernelAddressSpace::LookupArea(addr_t address
) const
165 Range
* range
= fRangeTree
.FindClosest(address
, true);
166 if (range
== NULL
|| range
->type
!= Range::RANGE_AREA
)
169 VMKernelArea
* area
= range
->area
;
170 return area
->ContainsAddress(address
) ? area
: NULL
;
174 /*! This inserts the area you pass into the address space.
175 It will also set the "_address" argument to its base address when
177 You need to hold the VMAddressSpace write lock.
180 VMKernelAddressSpace::InsertArea(VMArea
* _area
, size_t size
,
181 const virtual_address_restrictions
* addressRestrictions
,
182 uint32 allocationFlags
, void** _address
)
184 TRACE("VMKernelAddressSpace::InsertArea(%p, %" B_PRIu32
", %#" B_PRIxSIZE
185 ", %p \"%s\")\n", addressRestrictions
->address
,
186 addressRestrictions
->address_specification
, size
, _area
, _area
->name
);
188 VMKernelArea
* area
= static_cast<VMKernelArea
*>(_area
);
191 status_t error
= _AllocateRange(addressRestrictions
, size
,
192 addressRestrictions
->address_specification
== B_EXACT_ADDRESS
,
193 allocationFlags
, range
);
197 range
->type
= Range::RANGE_AREA
;
199 area
->SetRange(range
);
200 area
->SetBase(range
->base
);
201 area
->SetSize(range
->size
);
203 if (_address
!= NULL
)
204 *_address
= (void*)area
->Base();
205 fFreeSpace
-= area
->Size();
207 PARANOIA_CHECK_STRUCTURES();
213 //! You must hold the address space's write lock.
215 VMKernelAddressSpace::RemoveArea(VMArea
* _area
, uint32 allocationFlags
)
217 TRACE("VMKernelAddressSpace::RemoveArea(%p)\n", _area
);
219 VMKernelArea
* area
= static_cast<VMKernelArea
*>(_area
);
221 _FreeRange(area
->Range(), allocationFlags
);
223 fFreeSpace
+= area
->Size();
225 PARANOIA_CHECK_STRUCTURES();
230 VMKernelAddressSpace::CanResizeArea(VMArea
* area
, size_t newSize
)
232 Range
* range
= static_cast<VMKernelArea
*>(area
)->Range();
234 if (newSize
<= range
->size
)
237 Range
* nextRange
= fRangeList
.GetNext(range
);
238 if (nextRange
== NULL
|| nextRange
->type
== Range::RANGE_AREA
)
241 if (nextRange
->type
== Range::RANGE_RESERVED
242 && nextRange
->reserved
.base
> range
->base
) {
246 // TODO: If there is free space after a reserved range (or vice versa), it
247 // could be used as well.
248 return newSize
- range
->size
<= nextRange
->size
;
253 VMKernelAddressSpace::ResizeArea(VMArea
* _area
, size_t newSize
,
254 uint32 allocationFlags
)
256 TRACE("VMKernelAddressSpace::ResizeArea(%p, %#" B_PRIxSIZE
")\n", _area
,
259 VMKernelArea
* area
= static_cast<VMKernelArea
*>(_area
);
260 Range
* range
= area
->Range();
262 if (newSize
== range
->size
)
265 Range
* nextRange
= fRangeList
.GetNext(range
);
267 if (newSize
< range
->size
) {
268 if (nextRange
!= NULL
&& nextRange
->type
== Range::RANGE_FREE
) {
269 // a free range is following -- just enlarge it
270 _FreeListRemoveRange(nextRange
, nextRange
->size
);
271 nextRange
->size
+= range
->size
- newSize
;
272 nextRange
->base
= range
->base
+ newSize
;
273 _FreeListInsertRange(nextRange
, nextRange
->size
);
275 // no free range following -- we need to allocate a new one and
277 nextRange
= new(fRangesObjectCache
, allocationFlags
) Range(
278 range
->base
+ newSize
, range
->size
- newSize
,
280 if (nextRange
== NULL
)
282 _InsertRange(nextRange
);
285 if (nextRange
== NULL
286 || (nextRange
->type
== Range::RANGE_RESERVED
287 && nextRange
->reserved
.base
> range
->base
)) {
290 // TODO: If there is free space after a reserved range (or vice versa),
291 // it could be used as well.
292 size_t sizeDiff
= newSize
- range
->size
;
293 if (sizeDiff
> nextRange
->size
)
296 if (sizeDiff
== nextRange
->size
) {
297 // The next range is completely covered -- remove and delete it.
298 _RemoveRange(nextRange
);
299 object_cache_delete(fRangesObjectCache
, nextRange
, allocationFlags
);
301 // The next range is only partially covered -- shrink it.
302 if (nextRange
->type
== Range::RANGE_FREE
)
303 _FreeListRemoveRange(nextRange
, nextRange
->size
);
304 nextRange
->size
-= sizeDiff
;
305 nextRange
->base
= range
->base
+ newSize
;
306 if (nextRange
->type
== Range::RANGE_FREE
)
307 _FreeListInsertRange(nextRange
, nextRange
->size
);
311 range
->size
= newSize
;
312 area
->SetSize(newSize
);
314 IncrementChangeCount();
315 PARANOIA_CHECK_STRUCTURES();
321 VMKernelAddressSpace::ShrinkAreaHead(VMArea
* _area
, size_t newSize
,
322 uint32 allocationFlags
)
324 TRACE("VMKernelAddressSpace::ShrinkAreaHead(%p, %#" B_PRIxSIZE
")\n", _area
,
327 VMKernelArea
* area
= static_cast<VMKernelArea
*>(_area
);
328 Range
* range
= area
->Range();
330 if (newSize
== range
->size
)
333 if (newSize
> range
->size
)
336 Range
* previousRange
= fRangeList
.GetPrevious(range
);
338 size_t sizeDiff
= range
->size
- newSize
;
339 if (previousRange
!= NULL
&& previousRange
->type
== Range::RANGE_FREE
) {
340 // the previous range is free -- just enlarge it
341 _FreeListRemoveRange(previousRange
, previousRange
->size
);
342 previousRange
->size
+= sizeDiff
;
343 _FreeListInsertRange(previousRange
, previousRange
->size
);
344 range
->base
+= sizeDiff
;
345 range
->size
= newSize
;
347 // no free range before -- we need to allocate a new one and
349 previousRange
= new(fRangesObjectCache
, allocationFlags
) Range(
350 range
->base
, sizeDiff
, Range::RANGE_FREE
);
351 if (previousRange
== NULL
)
353 range
->base
+= sizeDiff
;
354 range
->size
= newSize
;
355 _InsertRange(previousRange
);
358 area
->SetBase(range
->base
);
359 area
->SetSize(range
->size
);
361 IncrementChangeCount();
362 PARANOIA_CHECK_STRUCTURES();
368 VMKernelAddressSpace::ShrinkAreaTail(VMArea
* area
, size_t newSize
,
369 uint32 allocationFlags
)
371 return ResizeArea(area
, newSize
, allocationFlags
);
376 VMKernelAddressSpace::ReserveAddressRange(size_t size
,
377 const virtual_address_restrictions
* addressRestrictions
,
378 uint32 flags
, uint32 allocationFlags
, void** _address
)
380 TRACE("VMKernelAddressSpace::ReserveAddressRange(%p, %" B_PRIu32
", %#"
381 B_PRIxSIZE
", %#" B_PRIx32
")\n", addressRestrictions
->address
,
382 addressRestrictions
->address_specification
, size
, flags
);
384 // Don't allow range reservations, if the address space is about to be
387 return B_BAD_TEAM_ID
;
390 status_t error
= _AllocateRange(addressRestrictions
, size
, false,
391 allocationFlags
, range
);
395 range
->type
= Range::RANGE_RESERVED
;
396 range
->reserved
.base
= range
->base
;
397 range
->reserved
.flags
= flags
;
399 if (_address
!= NULL
)
400 *_address
= (void*)range
->base
;
403 PARANOIA_CHECK_STRUCTURES();
409 VMKernelAddressSpace::UnreserveAddressRange(addr_t address
, size_t size
,
410 uint32 allocationFlags
)
412 TRACE("VMKernelAddressSpace::UnreserveAddressRange(%#" B_PRIxADDR
", %#"
413 B_PRIxSIZE
")\n", address
, size
);
415 // Don't allow range unreservations, if the address space is about to be
416 // deleted. UnreserveAllAddressRanges() must be used.
418 return B_BAD_TEAM_ID
;
420 // search range list and remove any matching reserved ranges
421 addr_t endAddress
= address
+ (size
- 1);
422 Range
* range
= fRangeTree
.FindClosest(address
, false);
423 while (range
!= NULL
&& range
->base
+ (range
->size
- 1) <= endAddress
) {
424 // Get the next range for the iteration -- we need to skip free ranges,
425 // since _FreeRange() might join them with the current range and delete
427 Range
* nextRange
= fRangeList
.GetNext(range
);
428 while (nextRange
!= NULL
&& nextRange
->type
== Range::RANGE_FREE
)
429 nextRange
= fRangeList
.GetNext(nextRange
);
431 if (range
->type
== Range::RANGE_RESERVED
) {
432 _FreeRange(range
, allocationFlags
);
439 PARANOIA_CHECK_STRUCTURES();
445 VMKernelAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags
)
447 Range
* range
= fRangeList
.Head();
448 while (range
!= NULL
) {
449 // Get the next range for the iteration -- we need to skip free ranges,
450 // since _FreeRange() might join them with the current range and delete
452 Range
* nextRange
= fRangeList
.GetNext(range
);
453 while (nextRange
!= NULL
&& nextRange
->type
== Range::RANGE_FREE
)
454 nextRange
= fRangeList
.GetNext(nextRange
);
456 if (range
->type
== Range::RANGE_RESERVED
) {
457 _FreeRange(range
, allocationFlags
);
464 PARANOIA_CHECK_STRUCTURES();
469 VMKernelAddressSpace::Dump() const
471 VMAddressSpace::Dump();
473 kprintf("area_list:\n");
475 for (RangeList::ConstIterator it
= fRangeList
.GetIterator();
476 Range
* range
= it
.Next();) {
477 if (range
->type
!= Range::RANGE_AREA
)
480 VMKernelArea
* area
= range
->area
;
481 kprintf(" area %" B_PRId32
": ", area
->id
);
482 kprintf("base_addr = %#" B_PRIxADDR
" ", area
->Base());
483 kprintf("size = %#" B_PRIxSIZE
" ", area
->Size());
484 kprintf("name = '%s' ", area
->name
);
485 kprintf("protection = %#" B_PRIx32
"\n", area
->protection
);
491 VMKernelAddressSpace::_FreeListInsertRange(Range
* range
, size_t size
)
493 TRACE(" VMKernelAddressSpace::_FreeListInsertRange(%p (%#" B_PRIxADDR
494 ", %#" B_PRIxSIZE
", %d), %#" B_PRIxSIZE
" (%d))\n", range
, range
->base
,
495 range
->size
, range
->type
, size
, ld(size
) - PAGE_SHIFT
);
497 fFreeLists
[ld(size
) - PAGE_SHIFT
].Add(range
);
502 VMKernelAddressSpace::_FreeListRemoveRange(Range
* range
, size_t size
)
504 TRACE(" VMKernelAddressSpace::_FreeListRemoveRange(%p (%#" B_PRIxADDR
505 ", %#" B_PRIxSIZE
", %d), %#" B_PRIxSIZE
" (%d))\n", range
, range
->base
,
506 range
->size
, range
->type
, size
, ld(size
) - PAGE_SHIFT
);
508 fFreeLists
[ld(size
) - PAGE_SHIFT
].Remove(range
);
513 VMKernelAddressSpace::_InsertRange(Range
* range
)
515 TRACE(" VMKernelAddressSpace::_InsertRange(%p (%#" B_PRIxADDR
", %#"
516 B_PRIxSIZE
", %d))\n", range
, range
->base
, range
->size
, range
->type
);
518 // insert at the correct position in the range list
519 Range
* insertBeforeRange
= fRangeTree
.FindClosest(range
->base
, true);
521 insertBeforeRange
!= NULL
522 ? fRangeList
.GetNext(insertBeforeRange
) : fRangeList
.Head(),
526 fRangeTree
.Insert(range
);
528 // insert in the free ranges list, if the range is free
529 if (range
->type
== Range::RANGE_FREE
)
530 _FreeListInsertRange(range
, range
->size
);
535 VMKernelAddressSpace::_RemoveRange(Range
* range
)
537 TRACE(" VMKernelAddressSpace::_RemoveRange(%p (%#" B_PRIxADDR
", %#"
538 B_PRIxSIZE
", %d))\n", range
, range
->base
, range
->size
, range
->type
);
540 // remove from tree and range list
541 // insert at the correct position in the range list
542 fRangeTree
.Remove(range
);
543 fRangeList
.Remove(range
);
545 // if it is a free range, also remove it from the free list
546 if (range
->type
== Range::RANGE_FREE
)
547 _FreeListRemoveRange(range
, range
->size
);
552 VMKernelAddressSpace::_AllocateRange(
553 const virtual_address_restrictions
* addressRestrictions
,
554 size_t size
, bool allowReservedRange
, uint32 allocationFlags
,
557 TRACE(" VMKernelAddressSpace::_AllocateRange(address: %p, size: %#"
558 B_PRIxSIZE
", addressSpec: %#" B_PRIx32
", reserved allowed: %d)\n",
559 addressRestrictions
->address
, size
,
560 addressRestrictions
->address_specification
, allowReservedRange
);
562 // prepare size, alignment and the base address for the range search
563 addr_t address
= (addr_t
)addressRestrictions
->address
;
564 size
= ROUNDUP(size
, B_PAGE_SIZE
);
565 size_t alignment
= addressRestrictions
->alignment
!= 0
566 ? addressRestrictions
->alignment
: B_PAGE_SIZE
;
568 switch (addressRestrictions
->address_specification
) {
569 case B_EXACT_ADDRESS
:
571 if (address
% B_PAGE_SIZE
!= 0)
577 address
= ROUNDUP(address
, B_PAGE_SIZE
);
580 case B_ANY_KERNEL_BLOCK_ADDRESS
:
581 // align the memory to the next power of two of the size
582 while (alignment
< size
)
588 case B_ANY_KERNEL_ADDRESS
:
590 // TODO: remove this again when vm86 mode is moved into the kernel
591 // completely (currently needs a userland address space!)
592 if (address
== USER_BASE
)
593 address
= USER_BASE_ANY
;
601 Range
* range
= _FindFreeRange(address
, size
, alignment
,
602 addressRestrictions
->address_specification
, allowReservedRange
,
605 return addressRestrictions
->address_specification
== B_EXACT_ADDRESS
606 ? B_BAD_VALUE
: B_NO_MEMORY
;
609 TRACE(" VMKernelAddressSpace::_AllocateRange() found range:(%p (%#"
610 B_PRIxADDR
", %#" B_PRIxSIZE
", %d)\n", range
, range
->base
, range
->size
,
613 // We have found a range. It might not be a perfect fit, in which case
614 // we have to split the range.
615 size_t rangeSize
= range
->size
;
617 if (address
== range
->base
) {
618 // allocation at the beginning of the range
619 if (range
->size
> size
) {
620 // only partial -- split the range
621 Range
* leftOverRange
= new(fRangesObjectCache
, allocationFlags
)
622 Range(address
+ size
, range
->size
- size
, range
);
623 if (leftOverRange
== NULL
)
627 _InsertRange(leftOverRange
);
629 } else if (address
+ size
== range
->base
+ range
->size
) {
630 // allocation at the end of the range -- split the range
631 Range
* leftOverRange
= new(fRangesObjectCache
, allocationFlags
) Range(
632 range
->base
, range
->size
- size
, range
);
633 if (leftOverRange
== NULL
)
636 range
->base
= address
;
638 _InsertRange(leftOverRange
);
640 // allocation in the middle of the range -- split the range in three
641 Range
* leftOverRange1
= new(fRangesObjectCache
, allocationFlags
) Range(
642 range
->base
, address
- range
->base
, range
);
643 if (leftOverRange1
== NULL
)
645 Range
* leftOverRange2
= new(fRangesObjectCache
, allocationFlags
) Range(
646 address
+ size
, range
->size
- size
- leftOverRange1
->size
, range
);
647 if (leftOverRange2
== NULL
) {
648 object_cache_delete(fRangesObjectCache
, leftOverRange1
,
653 range
->base
= address
;
655 _InsertRange(leftOverRange1
);
656 _InsertRange(leftOverRange2
);
659 // If the range is a free range, remove it from the respective free list.
660 if (range
->type
== Range::RANGE_FREE
)
661 _FreeListRemoveRange(range
, rangeSize
);
663 IncrementChangeCount();
665 TRACE(" VMKernelAddressSpace::_AllocateRange() -> %p (%#" B_PRIxADDR
", %#"
666 B_PRIxSIZE
")\n", range
, range
->base
, range
->size
);
673 VMKernelAddressSpace::Range
*
674 VMKernelAddressSpace::_FindFreeRange(addr_t start
, size_t size
,
675 size_t alignment
, uint32 addressSpec
, bool allowReservedRange
,
676 addr_t
& _foundAddress
)
678 TRACE(" VMKernelAddressSpace::_FindFreeRange(start: %#" B_PRIxADDR
679 ", size: %#" B_PRIxSIZE
", alignment: %#" B_PRIxSIZE
", addressSpec: %#"
680 B_PRIx32
", reserved allowed: %d)\n", start
, size
, alignment
,
681 addressSpec
, allowReservedRange
);
683 switch (addressSpec
) {
686 // We have to iterate through the range list starting at the given
687 // address. This is the most inefficient case.
688 Range
* range
= fRangeTree
.FindClosest(start
, true);
689 while (range
!= NULL
) {
690 if (range
->type
== Range::RANGE_FREE
) {
691 addr_t alignedBase
= ROUNDUP(range
->base
, alignment
);
692 if (is_valid_spot(start
, alignedBase
, size
,
693 range
->base
+ (range
->size
- 1))) {
694 _foundAddress
= alignedBase
;
698 range
= fRangeList
.GetNext(range
);
701 // We didn't find a free spot in the requested range, so we'll
702 // try again without any restrictions.
704 addressSpec
= B_ANY_ADDRESS
;
709 case B_ANY_KERNEL_ADDRESS
:
710 case B_ANY_KERNEL_BLOCK_ADDRESS
:
712 // We want to allocate from the first non-empty free list that is
713 // guaranteed to contain the size. Finding a free range is O(1),
714 // unless there are constraints (min base address, alignment).
715 int freeListIndex
= ld((size
* 2 - 1) >> PAGE_SHIFT
);
717 for (int32 i
= freeListIndex
; i
< fFreeListCount
; i
++) {
718 RangeFreeList
& freeList
= fFreeLists
[i
];
719 if (freeList
.IsEmpty())
722 for (RangeFreeList::Iterator it
= freeList
.GetIterator();
723 Range
* range
= it
.Next();) {
724 addr_t alignedBase
= ROUNDUP(range
->base
, alignment
);
725 if (is_valid_spot(start
, alignedBase
, size
,
726 range
->base
+ (range
->size
- 1))) {
727 _foundAddress
= alignedBase
;
733 if (!allowReservedRange
)
736 // We haven't found any free ranges, but we're supposed to look
737 // for reserved ones, too. Iterate through the range list starting
738 // at the given address.
739 Range
* range
= fRangeTree
.FindClosest(start
, true);
740 while (range
!= NULL
) {
741 if (range
->type
== Range::RANGE_RESERVED
) {
742 addr_t alignedBase
= ROUNDUP(range
->base
, alignment
);
743 if (is_valid_spot(start
, alignedBase
, size
,
744 range
->base
+ (range
->size
- 1))) {
745 // allocation from the back might be preferred
746 // -- adjust the base accordingly
747 if ((range
->reserved
.flags
& RESERVED_AVOID_BASE
)
749 alignedBase
= ROUNDDOWN(
750 range
->base
+ (range
->size
- size
), alignment
);
753 _foundAddress
= alignedBase
;
757 range
= fRangeList
.GetNext(range
);
763 case B_EXACT_ADDRESS
:
765 Range
* range
= fRangeTree
.FindClosest(start
, true);
766 TRACE(" B_EXACT_ADDRESS: range: %p\n", range
);
767 if (range
== NULL
|| range
->type
== Range::RANGE_AREA
768 || range
->base
+ (range
->size
- 1) < start
+ (size
- 1)) {
769 // TODO: Support allocating if the area range covers multiple
770 // free and reserved ranges!
771 TRACE(" -> no suitable range\n");
775 if (range
->type
!= Range::RANGE_FREE
&& !allowReservedRange
)
777 TRACE(" -> reserved range not allowed\n");
781 _foundAddress
= start
;
792 VMKernelAddressSpace::_FreeRange(Range
* range
, uint32 allocationFlags
)
794 TRACE(" VMKernelAddressSpace::_FreeRange(%p (%#" B_PRIxADDR
", %#"
795 B_PRIxSIZE
", %d))\n", range
, range
->base
, range
->size
, range
->type
);
797 // Check whether one or both of the neighboring ranges are free already,
798 // and join them, if so.
799 Range
* previousRange
= fRangeList
.GetPrevious(range
);
800 Range
* nextRange
= fRangeList
.GetNext(range
);
802 if (previousRange
!= NULL
&& previousRange
->type
== Range::RANGE_FREE
) {
803 if (nextRange
!= NULL
&& nextRange
->type
== Range::RANGE_FREE
) {
804 // join them all -- keep the first one, delete the others
805 _FreeListRemoveRange(previousRange
, previousRange
->size
);
807 _RemoveRange(nextRange
);
808 previousRange
->size
+= range
->size
+ nextRange
->size
;
809 object_cache_delete(fRangesObjectCache
, range
, allocationFlags
);
810 object_cache_delete(fRangesObjectCache
, nextRange
, allocationFlags
);
811 _FreeListInsertRange(previousRange
, previousRange
->size
);
813 // join with the previous range only, delete the supplied one
814 _FreeListRemoveRange(previousRange
, previousRange
->size
);
816 previousRange
->size
+= range
->size
;
817 object_cache_delete(fRangesObjectCache
, range
, allocationFlags
);
818 _FreeListInsertRange(previousRange
, previousRange
->size
);
821 if (nextRange
!= NULL
&& nextRange
->type
== Range::RANGE_FREE
) {
822 // join with the next range and delete it
823 _RemoveRange(nextRange
);
824 range
->size
+= nextRange
->size
;
825 object_cache_delete(fRangesObjectCache
, nextRange
, allocationFlags
);
828 // mark the range free and add it to the respective free list
829 range
->type
= Range::RANGE_FREE
;
830 _FreeListInsertRange(range
, range
->size
);
833 IncrementChangeCount();
837 #ifdef PARANOIA_CHECKS
840 VMKernelAddressSpace::_CheckStructures() const
842 // general tree structure check
843 fRangeTree
.CheckTree();
845 // check range list and tree
846 size_t spaceSize
= fEndAddress
- fBase
+ 1;
847 addr_t nextBase
= fBase
;
848 Range
* previousRange
= NULL
;
849 int previousRangeType
= Range::RANGE_AREA
;
850 uint64 freeRanges
= 0;
852 RangeList::ConstIterator listIt
= fRangeList
.GetIterator();
853 RangeTree::ConstIterator treeIt
= fRangeTree
.GetIterator();
855 Range
* range
= listIt
.Next();
856 Range
* treeRange
= treeIt
.Next();
857 if (range
!= treeRange
) {
858 panic("VMKernelAddressSpace::_CheckStructures(): list/tree range "
859 "mismatch: %p vs %p", range
, treeRange
);
864 if (range
->base
!= nextBase
) {
865 panic("VMKernelAddressSpace::_CheckStructures(): range base %#"
866 B_PRIxADDR
", expected: %#" B_PRIxADDR
, range
->base
, nextBase
);
869 if (range
->size
== 0) {
870 panic("VMKernelAddressSpace::_CheckStructures(): empty range %p",
874 if (range
->size
% B_PAGE_SIZE
!= 0) {
875 panic("VMKernelAddressSpace::_CheckStructures(): range %p (%#"
876 B_PRIxADDR
", %#" B_PRIxSIZE
") not page aligned", range
,
877 range
->base
, range
->size
);
880 if (range
->size
> spaceSize
- (range
->base
- fBase
)) {
881 panic("VMKernelAddressSpace::_CheckStructures(): range too large: "
882 "(%#" B_PRIxADDR
", %#" B_PRIxSIZE
"), address space end: %#"
883 B_PRIxADDR
, range
->base
, range
->size
, fEndAddress
);
886 if (range
->type
== Range::RANGE_FREE
) {
889 if (previousRangeType
== Range::RANGE_FREE
) {
890 panic("VMKernelAddressSpace::_CheckStructures(): adjoining "
891 "free ranges: %p (%#" B_PRIxADDR
", %#" B_PRIxSIZE
892 "), %p (%#" B_PRIxADDR
", %#" B_PRIxSIZE
")", previousRange
,
893 previousRange
->base
, previousRange
->size
, range
,
894 range
->base
, range
->size
);
898 previousRange
= range
;
899 nextBase
= range
->base
+ range
->size
;
900 previousRangeType
= range
->type
;
903 if (nextBase
- 1 != fEndAddress
) {
904 panic("VMKernelAddressSpace::_CheckStructures(): space not fully "
905 "covered by ranges: last: %#" B_PRIxADDR
", expected %#" B_PRIxADDR
,
906 nextBase
- 1, fEndAddress
);
910 uint64 freeListRanges
= 0;
911 for (int i
= 0; i
< fFreeListCount
; i
++) {
912 RangeFreeList
& freeList
= fFreeLists
[i
];
913 if (freeList
.IsEmpty())
916 for (RangeFreeList::Iterator it
= freeList
.GetIterator();
917 Range
* range
= it
.Next();) {
918 if (range
->type
!= Range::RANGE_FREE
) {
919 panic("VMKernelAddressSpace::_CheckStructures(): non-free "
920 "range %p (%#" B_PRIxADDR
", %#" B_PRIxSIZE
", %d) in "
921 "free list %d", range
, range
->base
, range
->size
,
925 if (fRangeTree
.Find(range
->base
) != range
) {
926 panic("VMKernelAddressSpace::_CheckStructures(): unknown "
927 "range %p (%#" B_PRIxADDR
", %#" B_PRIxSIZE
", %d) in "
928 "free list %d", range
, range
->base
, range
->size
,
932 if (ld(range
->size
) - PAGE_SHIFT
!= i
) {
933 panic("VMKernelAddressSpace::_CheckStructures(): "
934 "range %p (%#" B_PRIxADDR
", %#" B_PRIxSIZE
", %d) in "
935 "wrong free list %d", range
, range
->base
, range
->size
,
944 #endif // PARANOIA_CHECKS