2 * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 #include "VMUserAddressSpace.h"
17 #include <KernelExport.h>
21 #include <util/atomic.h>
22 #include <util/Random.h>
24 #include <vm/VMArea.h>
29 # define TRACE(x) dprintf x
36 const addr_t
VMUserAddressSpace::kMaxRandomize
= 0x8000000000ul
;
37 const addr_t
VMUserAddressSpace::kMaxInitialRandomize
= 0x20000000000ul
;
39 const addr_t
VMUserAddressSpace::kMaxRandomize
= 0x800000ul
;
40 const addr_t
VMUserAddressSpace::kMaxInitialRandomize
= 0x2000000ul
;
44 /*! Verifies that an area with the given aligned base and size fits into
45 the spot defined by base and limit and checks for overflows.
48 is_valid_spot(addr_t base
, addr_t alignedBase
, addr_t size
, addr_t limit
)
50 return (alignedBase
>= base
&& alignedBase
+ (size
- 1) > alignedBase
51 && alignedBase
+ (size
- 1) <= limit
);
56 is_base_address_spec(uint32 addressSpec
)
58 return addressSpec
== B_BASE_ADDRESS
59 || addressSpec
== B_RANDOMIZED_BASE_ADDRESS
;
64 align_address(addr_t address
, size_t alignment
)
66 return ROUNDUP(address
, alignment
);
71 align_address(addr_t address
, size_t alignment
, uint32 addressSpec
,
74 if (is_base_address_spec(addressSpec
))
75 address
= std::max(address
, baseAddress
);
76 return align_address(address
, alignment
);
80 // #pragma mark - VMUserAddressSpace
83 VMUserAddressSpace::VMUserAddressSpace(team_id id
, addr_t base
, size_t size
)
85 VMAddressSpace(id
, base
, size
, "address space"),
91 VMUserAddressSpace::~VMUserAddressSpace()
97 VMUserAddressSpace::FirstArea() const
99 VMUserArea
* area
= fAreas
.Head();
100 while (area
!= NULL
&& area
->id
== RESERVED_AREA_ID
)
101 area
= fAreas
.GetNext(area
);
107 VMUserAddressSpace::NextArea(VMArea
* _area
) const
109 VMUserArea
* area
= static_cast<VMUserArea
*>(_area
);
110 area
= fAreas
.GetNext(area
);
111 while (area
!= NULL
&& area
->id
== RESERVED_AREA_ID
)
112 area
= fAreas
.GetNext(area
);
118 VMUserAddressSpace::CreateArea(const char* name
, uint32 wiring
,
119 uint32 protection
, uint32 allocationFlags
)
121 return VMUserArea::Create(this, name
, wiring
, protection
, allocationFlags
);
126 VMUserAddressSpace::DeleteArea(VMArea
* _area
, uint32 allocationFlags
)
128 VMUserArea
* area
= static_cast<VMUserArea
*>(_area
);
130 free_etc(area
, allocationFlags
);
134 //! You must hold the address space's read lock.
136 VMUserAddressSpace::LookupArea(addr_t address
) const
138 // check the area hint first
139 VMArea
* areaHint
= atomic_pointer_get(&fAreaHint
);
140 if (areaHint
!= NULL
&& areaHint
->ContainsAddress(address
))
143 for (VMUserAreaList::ConstIterator it
= fAreas
.GetIterator();
144 VMUserArea
* area
= it
.Next();) {
145 if (area
->id
== RESERVED_AREA_ID
)
148 if (area
->ContainsAddress(address
)) {
149 atomic_pointer_set(&fAreaHint
, area
);
158 /*! This inserts the area you pass into the address space.
159 It will also set the "_address" argument to its base address when
161 You need to hold the VMAddressSpace write lock.
164 VMUserAddressSpace::InsertArea(VMArea
* _area
, size_t size
,
165 const virtual_address_restrictions
* addressRestrictions
,
166 uint32 allocationFlags
, void** _address
)
168 VMUserArea
* area
= static_cast<VMUserArea
*>(_area
);
170 addr_t searchBase
, searchEnd
;
173 switch (addressRestrictions
->address_specification
) {
174 case B_EXACT_ADDRESS
:
175 searchBase
= (addr_t
)addressRestrictions
->address
;
176 searchEnd
= (addr_t
)addressRestrictions
->address
+ (size
- 1);
180 case B_RANDOMIZED_BASE_ADDRESS
:
181 searchBase
= (addr_t
)addressRestrictions
->address
;
182 searchEnd
= fEndAddress
;
186 case B_ANY_KERNEL_ADDRESS
:
187 case B_ANY_KERNEL_BLOCK_ADDRESS
:
188 case B_RANDOMIZED_ANY_ADDRESS
:
190 searchEnd
= fEndAddress
;
197 // TODO: remove this again when vm86 mode is moved into the kernel
198 // completely (currently needs a userland address space!)
199 if (addressRestrictions
->address_specification
!= B_EXACT_ADDRESS
)
200 searchBase
= std::max(searchBase
, (addr_t
)USER_BASE_ANY
);
202 status
= _InsertAreaSlot(searchBase
, size
, searchEnd
,
203 addressRestrictions
->address_specification
,
204 addressRestrictions
->alignment
, area
, allocationFlags
);
205 if (status
== B_OK
) {
206 if (_address
!= NULL
)
207 *_address
= (void*)area
->Base();
208 fFreeSpace
-= area
->Size();
215 //! You must hold the address space's write lock.
217 VMUserAddressSpace::RemoveArea(VMArea
* _area
, uint32 allocationFlags
)
219 VMUserArea
* area
= static_cast<VMUserArea
*>(_area
);
223 if (area
->id
!= RESERVED_AREA_ID
) {
224 IncrementChangeCount();
225 fFreeSpace
+= area
->Size();
227 if (area
== fAreaHint
)
234 VMUserAddressSpace::CanResizeArea(VMArea
* area
, size_t newSize
)
236 VMUserArea
* next
= fAreas
.GetNext(static_cast<VMUserArea
*>(area
));
237 addr_t newEnd
= area
->Base() + (newSize
- 1);
240 return fEndAddress
>= newEnd
;
242 if (next
->Base() > newEnd
)
245 // If the area was created inside a reserved area, it can
246 // also be resized in that area
247 // TODO: if there is free space after the reserved area, it could
248 // be used as well...
249 return next
->id
== RESERVED_AREA_ID
250 && (uint64
)next
->cache_offset
<= (uint64
)area
->Base()
251 && next
->Base() + (next
->Size() - 1) >= newEnd
;
256 VMUserAddressSpace::ResizeArea(VMArea
* _area
, size_t newSize
,
257 uint32 allocationFlags
)
259 VMUserArea
* area
= static_cast<VMUserArea
*>(_area
);
261 addr_t newEnd
= area
->Base() + (newSize
- 1);
262 VMUserArea
* next
= fAreas
.GetNext(area
);
263 if (next
!= NULL
&& next
->Base() <= newEnd
) {
264 if (next
->id
!= RESERVED_AREA_ID
265 || (uint64
)next
->cache_offset
> (uint64
)area
->Base()
266 || next
->Base() + (next
->Size() - 1) < newEnd
) {
267 panic("resize situation for area %p has changed although we "
268 "should have the address space lock", area
);
272 // resize reserved area
273 addr_t offset
= area
->Base() + newSize
- next
->Base();
274 if (next
->Size() <= offset
) {
275 RemoveArea(next
, allocationFlags
);
277 free_etc(next
, allocationFlags
);
279 status_t error
= ShrinkAreaHead(next
, next
->Size() - offset
,
286 area
->SetSize(newSize
);
292 VMUserAddressSpace::ShrinkAreaHead(VMArea
* area
, size_t size
,
293 uint32 allocationFlags
)
295 size_t oldSize
= area
->Size();
299 area
->SetBase(area
->Base() + oldSize
- size
);
307 VMUserAddressSpace::ShrinkAreaTail(VMArea
* area
, size_t size
,
308 uint32 allocationFlags
)
310 size_t oldSize
= area
->Size();
321 VMUserAddressSpace::ReserveAddressRange(size_t size
,
322 const virtual_address_restrictions
* addressRestrictions
,
323 uint32 flags
, uint32 allocationFlags
, void** _address
)
325 // check to see if this address space has entered DELETE state
327 // okay, someone is trying to delete this address space now, so we
328 // can't insert the area, let's back out
329 return B_BAD_TEAM_ID
;
332 VMUserArea
* area
= VMUserArea::CreateReserved(this, flags
, allocationFlags
);
336 status_t status
= InsertArea(area
, size
, addressRestrictions
,
337 allocationFlags
, _address
);
338 if (status
!= B_OK
) {
340 free_etc(area
, allocationFlags
);
344 area
->cache_offset
= area
->Base();
345 // we cache the original base address here
353 VMUserAddressSpace::UnreserveAddressRange(addr_t address
, size_t size
,
354 uint32 allocationFlags
)
356 // check to see if this address space has entered DELETE state
358 // okay, someone is trying to delete this address space now, so we can't
359 // insert the area, so back out
360 return B_BAD_TEAM_ID
;
363 // search area list and remove any matching reserved ranges
364 addr_t endAddress
= address
+ (size
- 1);
365 for (VMUserAreaList::Iterator it
= fAreas
.GetIterator();
366 VMUserArea
* area
= it
.Next();) {
367 // the area must be completely part of the reserved range
368 if (area
->Base() + (area
->Size() - 1) > endAddress
)
370 if (area
->id
== RESERVED_AREA_ID
&& area
->Base() >= (addr_t
)address
) {
371 // remove reserved range
372 RemoveArea(area
, allocationFlags
);
375 free_etc(area
, allocationFlags
);
384 VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags
)
386 for (VMUserAreaList::Iterator it
= fAreas
.GetIterator();
387 VMUserArea
* area
= it
.Next();) {
388 if (area
->id
== RESERVED_AREA_ID
) {
389 RemoveArea(area
, allocationFlags
);
392 free_etc(area
, allocationFlags
);
399 VMUserAddressSpace::Dump() const
401 VMAddressSpace::Dump();
402 kprintf("area_hint: %p\n", fAreaHint
);
404 kprintf("area_list:\n");
406 for (VMUserAreaList::ConstIterator it
= fAreas
.GetIterator();
407 VMUserArea
* area
= it
.Next();) {
408 kprintf(" area 0x%" B_PRIx32
": ", area
->id
);
409 kprintf("base_addr = 0x%lx ", area
->Base());
410 kprintf("size = 0x%lx ", area
->Size());
411 kprintf("name = '%s' ", area
->name
);
412 kprintf("protection = 0x%" B_PRIx32
"\n", area
->protection
);
418 VMUserAddressSpace::_IsRandomized(uint32 addressSpec
) const
420 return fRandomizingEnabled
421 && (addressSpec
== B_RANDOMIZED_ANY_ADDRESS
422 || addressSpec
== B_RANDOMIZED_BASE_ADDRESS
);
427 VMUserAddressSpace::_RandomizeAddress(addr_t start
, addr_t end
,
428 size_t alignment
, bool initial
)
430 ASSERT((start
& addr_t(alignment
- 1)) == 0);
431 ASSERT(start
<= end
);
436 addr_t range
= end
- start
+ 1;
438 range
= std::min(range
, kMaxInitialRandomize
);
440 range
= std::min(range
, kMaxRandomize
);
442 addr_t random
= secure_get_random
<addr_t
>();
444 random
&= ~addr_t(alignment
- 1);
446 return start
+ random
;
450 /*! Finds a reserved area that covers the region spanned by \a start and
451 \a size, inserts the \a area into that region and makes sure that
452 there are reserved regions for the remaining parts.
455 VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start
, size_t size
,
456 VMUserArea
* area
, uint32 allocationFlags
)
460 for (VMUserAreaList::Iterator it
= fAreas
.GetIterator();
461 (next
= it
.Next()) != NULL
;) {
462 if (next
->Base() <= start
463 && next
->Base() + (next
->Size() - 1) >= start
+ (size
- 1)) {
464 // This area covers the requested range
465 if (next
->id
!= RESERVED_AREA_ID
) {
466 // but it's not reserved space, it's a real area
475 return B_ENTRY_NOT_FOUND
;
477 // Now we have to transfer the requested part of the reserved
478 // range to the new area - and remove, resize or split the old
481 if (start
== next
->Base()) {
482 // the area starts at the beginning of the reserved range
483 fAreas
.Insert(next
, area
);
485 if (size
== next
->Size()) {
486 // the new area fully covers the reversed range
490 free_etc(next
, allocationFlags
);
492 // resize the reserved range behind the area
493 next
->SetBase(next
->Base() + size
);
494 next
->SetSize(next
->Size() - size
);
496 } else if (start
+ size
== next
->Base() + next
->Size()) {
497 // the area is at the end of the reserved range
498 fAreas
.Insert(fAreas
.GetNext(next
), area
);
500 // resize the reserved range before the area
501 next
->SetSize(start
- next
->Base());
503 // the area splits the reserved range into two separate ones
504 // we need a new reserved area to cover this space
505 VMUserArea
* reserved
= VMUserArea::CreateReserved(this,
506 next
->protection
, allocationFlags
);
507 if (reserved
== NULL
)
511 fAreas
.Insert(fAreas
.GetNext(next
), reserved
);
512 fAreas
.Insert(reserved
, area
);
515 reserved
->SetSize(next
->Base() + next
->Size() - start
- size
);
516 next
->SetSize(start
- next
->Base());
517 reserved
->SetBase(start
+ size
);
518 reserved
->cache_offset
= next
->cache_offset
;
521 area
->SetBase(start
);
523 IncrementChangeCount();
529 /*! Must be called with this address space's write lock held */
531 VMUserAddressSpace::_InsertAreaSlot(addr_t start
, addr_t size
, addr_t end
,
532 uint32 addressSpec
, size_t alignment
, VMUserArea
* area
,
533 uint32 allocationFlags
)
535 VMUserArea
* last
= NULL
;
537 bool foundSpot
= false;
538 addr_t originalStart
= 0;
540 TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
541 "0x%lx, size %ld, end 0x%lx, addressSpec %" B_PRIu32
", area %p\n",
542 this, start
, size
, end
, addressSpec
, area
));
544 // do some sanity checking
545 if (start
< fBase
|| size
== 0 || end
> fEndAddress
546 || start
+ (size
- 1) > end
)
547 return B_BAD_ADDRESS
;
549 if (addressSpec
== B_EXACT_ADDRESS
&& area
->id
!= RESERVED_AREA_ID
) {
550 // search for a reserved area
551 status_t status
= _InsertAreaIntoReservedRegion(start
, size
, area
,
553 if (status
== B_OK
|| status
== B_BAD_VALUE
)
556 // There was no reserved area, and the slot doesn't seem to be used
558 // TODO: this could be further optimized.
562 alignment
= B_PAGE_SIZE
;
563 if (addressSpec
== B_ANY_KERNEL_BLOCK_ADDRESS
) {
564 // align the memory to the next power of two of the size
565 while (alignment
< size
)
569 start
= align_address(start
, alignment
);
571 if (fRandomizingEnabled
&& addressSpec
== B_RANDOMIZED_BASE_ADDRESS
) {
572 originalStart
= start
;
573 start
= _RandomizeAddress(start
, end
- size
+ 1, alignment
, true);
576 // walk up to the spot where we should start searching
578 VMUserAreaList::Iterator it
= fAreas
.GetIterator();
579 while ((next
= it
.Next()) != NULL
) {
580 if (next
->Base() > start
+ (size
- 1)) {
588 // find the right spot depending on the address specification - the area
589 // will be inserted directly after "last" ("next" is not referenced anymore)
591 switch (addressSpec
) {
593 case B_ANY_KERNEL_ADDRESS
:
594 case B_ANY_KERNEL_BLOCK_ADDRESS
:
595 case B_RANDOMIZED_ANY_ADDRESS
:
597 case B_RANDOMIZED_BASE_ADDRESS
:
599 // find a hole big enough for a new area
601 // see if we can build it at the beginning of the virtual map
602 addr_t alignedBase
= align_address(start
, alignment
);
603 addr_t nextBase
= next
== NULL
604 ? end
: std::min(next
->Base() - 1, end
);
605 if (is_valid_spot(start
, alignedBase
, size
, nextBase
)) {
606 addr_t rangeEnd
= std::min(nextBase
- size
+ 1, end
);
607 if (_IsRandomized(addressSpec
)) {
608 alignedBase
= _RandomizeAddress(alignedBase
, rangeEnd
,
613 area
->SetBase(alignedBase
);
622 while (next
!= NULL
&& next
->Base() + next
->Size() - 1 <= end
) {
623 addr_t alignedBase
= align_address(last
->Base() + last
->Size(),
624 alignment
, addressSpec
, start
);
625 addr_t nextBase
= std::min(end
, next
->Base() - 1);
627 if (is_valid_spot(last
->Base() + (last
->Size() - 1),
628 alignedBase
, size
, nextBase
)) {
629 addr_t rangeEnd
= std::min(nextBase
- size
+ 1, end
);
630 if (_IsRandomized(addressSpec
)) {
631 alignedBase
= _RandomizeAddress(alignedBase
,
632 rangeEnd
, alignment
);
636 area
->SetBase(alignedBase
);
647 addr_t alignedBase
= align_address(last
->Base() + last
->Size(),
648 alignment
, addressSpec
, start
);
650 if (next
== NULL
&& is_valid_spot(last
->Base() + (last
->Size() - 1),
651 alignedBase
, size
, end
)) {
652 if (_IsRandomized(addressSpec
)) {
653 alignedBase
= _RandomizeAddress(alignedBase
, end
- size
+ 1,
659 area
->SetBase(alignedBase
);
661 } else if (is_base_address_spec(addressSpec
)) {
662 // we didn't find a free spot in the requested range, so we'll
663 // try again without any restrictions
664 if (!_IsRandomized(addressSpec
)) {
665 start
= USER_BASE_ANY
;
666 addressSpec
= B_ANY_ADDRESS
;
667 } else if (start
== originalStart
) {
668 start
= USER_BASE_ANY
;
669 addressSpec
= B_RANDOMIZED_ANY_ADDRESS
;
671 start
= originalStart
;
672 addressSpec
= B_RANDOMIZED_BASE_ADDRESS
;
677 } else if (area
->id
!= RESERVED_AREA_ID
) {
678 // We didn't find a free spot - if there are any reserved areas,
679 // we can now test those for free space
680 // TODO: it would make sense to start with the biggest of them
683 for (last
= NULL
; next
!= NULL
; next
= it
.Next()) {
684 if (next
->id
!= RESERVED_AREA_ID
) {
687 } else if (next
->Base() + size
- 1 > end
)
690 // TODO: take free space after the reserved area into
692 addr_t alignedBase
= align_address(next
->Base(), alignment
);
693 if (next
->Base() == alignedBase
&& next
->Size() == size
) {
694 // The reserved area is entirely covered, and thus,
699 area
->SetBase(alignedBase
);
701 free_etc(next
, allocationFlags
);
705 if ((next
->protection
& RESERVED_AVOID_BASE
) == 0
706 && alignedBase
== next
->Base()
707 && next
->Size() >= size
) {
708 addr_t rangeEnd
= std::min(
709 next
->Base() + next
->Size() - size
, end
);
710 if (_IsRandomized(addressSpec
)) {
711 alignedBase
= _RandomizeAddress(next
->Base(),
712 rangeEnd
, alignment
);
714 addr_t offset
= alignedBase
- next
->Base();
716 // The new area will be placed at the beginning of the
717 // reserved area and the reserved area will be offset
720 next
->SetBase(next
->Base() + offset
+ size
);
721 next
->SetSize(next
->Size() - offset
- size
);
722 area
->SetBase(alignedBase
);
726 if (is_valid_spot(next
->Base(), alignedBase
, size
,
727 std::min(next
->Base() + next
->Size() - 1, end
))) {
728 // The new area will be placed at the end of the
729 // reserved area, and the reserved area will be resized
732 if (_IsRandomized(addressSpec
)) {
733 addr_t alignedNextBase
= align_address(next
->Base(),
736 addr_t startRange
= next
->Base() + next
->Size();
737 startRange
-= size
+ kMaxRandomize
;
738 startRange
= ROUNDDOWN(startRange
, alignment
);
739 startRange
= std::max(startRange
, alignedNextBase
);
742 = std::min(next
->Base() + next
->Size() - size
,
744 alignedBase
= _RandomizeAddress(startRange
,
745 rangeEnd
, alignment
);
747 alignedBase
= ROUNDDOWN(
748 next
->Base() + next
->Size() - size
, alignment
);
752 next
->SetSize(alignedBase
- next
->Base());
753 area
->SetBase(alignedBase
);
765 case B_EXACT_ADDRESS
:
766 // see if we can create it exactly here
767 if ((last
== NULL
|| last
->Base() + (last
->Size() - 1) < start
)
768 && (next
== NULL
|| next
->Base() > start
+ (size
- 1))) {
770 area
->SetBase(start
);
779 return addressSpec
== B_EXACT_ADDRESS
? B_BAD_VALUE
: B_NO_MEMORY
;
783 fAreas
.Insert(fAreas
.GetNext(last
), area
);
785 fAreas
.Insert(fAreas
.Head(), area
);
787 IncrementChangeCount();