btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / vm / VMUserAddressSpace.cpp
blob53249de109080aa7074cc1375f870a727779ba1b
1 /*
2 * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 #include "VMUserAddressSpace.h"
13 #include <stdlib.h>
15 #include <algorithm>
17 #include <KernelExport.h>
19 #include <heap.h>
20 #include <thread.h>
21 #include <util/atomic.h>
22 #include <util/Random.h>
23 #include <vm/vm.h>
24 #include <vm/VMArea.h>
27 //#define TRACE_VM
28 #ifdef TRACE_VM
29 # define TRACE(x) dprintf x
30 #else
31 # define TRACE(x) ;
32 #endif
35 #ifdef B_HAIKU_64_BIT
36 const addr_t VMUserAddressSpace::kMaxRandomize = 0x8000000000ul;
37 const addr_t VMUserAddressSpace::kMaxInitialRandomize = 0x20000000000ul;
38 #else
39 const addr_t VMUserAddressSpace::kMaxRandomize = 0x800000ul;
40 const addr_t VMUserAddressSpace::kMaxInitialRandomize = 0x2000000ul;
41 #endif
44 /*! Verifies that an area with the given aligned base and size fits into
45 the spot defined by base and limit and checks for overflows.
47 static inline bool
48 is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
50 return (alignedBase >= base && alignedBase + (size - 1) > alignedBase
51 && alignedBase + (size - 1) <= limit);
55 static inline bool
56 is_base_address_spec(uint32 addressSpec)
58 return addressSpec == B_BASE_ADDRESS
59 || addressSpec == B_RANDOMIZED_BASE_ADDRESS;
63 static inline addr_t
64 align_address(addr_t address, size_t alignment)
66 return ROUNDUP(address, alignment);
70 static inline addr_t
71 align_address(addr_t address, size_t alignment, uint32 addressSpec,
72 addr_t baseAddress)
74 if (is_base_address_spec(addressSpec))
75 address = std::max(address, baseAddress);
76 return align_address(address, alignment);
80 // #pragma mark - VMUserAddressSpace
83 VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size)
85 VMAddressSpace(id, base, size, "address space"),
86 fAreaHint(NULL)
91 VMUserAddressSpace::~VMUserAddressSpace()
96 inline VMArea*
97 VMUserAddressSpace::FirstArea() const
99 VMUserArea* area = fAreas.Head();
100 while (area != NULL && area->id == RESERVED_AREA_ID)
101 area = fAreas.GetNext(area);
102 return area;
106 inline VMArea*
107 VMUserAddressSpace::NextArea(VMArea* _area) const
109 VMUserArea* area = static_cast<VMUserArea*>(_area);
110 area = fAreas.GetNext(area);
111 while (area != NULL && area->id == RESERVED_AREA_ID)
112 area = fAreas.GetNext(area);
113 return area;
117 VMArea*
118 VMUserAddressSpace::CreateArea(const char* name, uint32 wiring,
119 uint32 protection, uint32 allocationFlags)
121 return VMUserArea::Create(this, name, wiring, protection, allocationFlags);
125 void
126 VMUserAddressSpace::DeleteArea(VMArea* _area, uint32 allocationFlags)
128 VMUserArea* area = static_cast<VMUserArea*>(_area);
129 area->~VMUserArea();
130 free_etc(area, allocationFlags);
134 //! You must hold the address space's read lock.
135 VMArea*
136 VMUserAddressSpace::LookupArea(addr_t address) const
138 // check the area hint first
139 VMArea* areaHint = atomic_pointer_get(&fAreaHint);
140 if (areaHint != NULL && areaHint->ContainsAddress(address))
141 return areaHint;
143 for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
144 VMUserArea* area = it.Next();) {
145 if (area->id == RESERVED_AREA_ID)
146 continue;
148 if (area->ContainsAddress(address)) {
149 atomic_pointer_set(&fAreaHint, area);
150 return area;
154 return NULL;
158 /*! This inserts the area you pass into the address space.
159 It will also set the "_address" argument to its base address when
160 the call succeeds.
161 You need to hold the VMAddressSpace write lock.
163 status_t
164 VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
165 const virtual_address_restrictions* addressRestrictions,
166 uint32 allocationFlags, void** _address)
168 VMUserArea* area = static_cast<VMUserArea*>(_area);
170 addr_t searchBase, searchEnd;
171 status_t status;
173 switch (addressRestrictions->address_specification) {
174 case B_EXACT_ADDRESS:
175 searchBase = (addr_t)addressRestrictions->address;
176 searchEnd = (addr_t)addressRestrictions->address + (size - 1);
177 break;
179 case B_BASE_ADDRESS:
180 case B_RANDOMIZED_BASE_ADDRESS:
181 searchBase = (addr_t)addressRestrictions->address;
182 searchEnd = fEndAddress;
183 break;
185 case B_ANY_ADDRESS:
186 case B_ANY_KERNEL_ADDRESS:
187 case B_ANY_KERNEL_BLOCK_ADDRESS:
188 case B_RANDOMIZED_ANY_ADDRESS:
189 searchBase = fBase;
190 searchEnd = fEndAddress;
191 break;
193 default:
194 return B_BAD_VALUE;
197 // TODO: remove this again when vm86 mode is moved into the kernel
198 // completely (currently needs a userland address space!)
199 if (addressRestrictions->address_specification != B_EXACT_ADDRESS)
200 searchBase = std::max(searchBase, (addr_t)USER_BASE_ANY);
202 status = _InsertAreaSlot(searchBase, size, searchEnd,
203 addressRestrictions->address_specification,
204 addressRestrictions->alignment, area, allocationFlags);
205 if (status == B_OK) {
206 if (_address != NULL)
207 *_address = (void*)area->Base();
208 fFreeSpace -= area->Size();
211 return status;
215 //! You must hold the address space's write lock.
216 void
217 VMUserAddressSpace::RemoveArea(VMArea* _area, uint32 allocationFlags)
219 VMUserArea* area = static_cast<VMUserArea*>(_area);
221 fAreas.Remove(area);
223 if (area->id != RESERVED_AREA_ID) {
224 IncrementChangeCount();
225 fFreeSpace += area->Size();
227 if (area == fAreaHint)
228 fAreaHint = NULL;
233 bool
234 VMUserAddressSpace::CanResizeArea(VMArea* area, size_t newSize)
236 VMUserArea* next = fAreas.GetNext(static_cast<VMUserArea*>(area));
237 addr_t newEnd = area->Base() + (newSize - 1);
239 if (next == NULL)
240 return fEndAddress >= newEnd;
242 if (next->Base() > newEnd)
243 return true;
245 // If the area was created inside a reserved area, it can
246 // also be resized in that area
247 // TODO: if there is free space after the reserved area, it could
248 // be used as well...
249 return next->id == RESERVED_AREA_ID
250 && (uint64)next->cache_offset <= (uint64)area->Base()
251 && next->Base() + (next->Size() - 1) >= newEnd;
255 status_t
256 VMUserAddressSpace::ResizeArea(VMArea* _area, size_t newSize,
257 uint32 allocationFlags)
259 VMUserArea* area = static_cast<VMUserArea*>(_area);
261 addr_t newEnd = area->Base() + (newSize - 1);
262 VMUserArea* next = fAreas.GetNext(area);
263 if (next != NULL && next->Base() <= newEnd) {
264 if (next->id != RESERVED_AREA_ID
265 || (uint64)next->cache_offset > (uint64)area->Base()
266 || next->Base() + (next->Size() - 1) < newEnd) {
267 panic("resize situation for area %p has changed although we "
268 "should have the address space lock", area);
269 return B_ERROR;
272 // resize reserved area
273 addr_t offset = area->Base() + newSize - next->Base();
274 if (next->Size() <= offset) {
275 RemoveArea(next, allocationFlags);
276 next->~VMUserArea();
277 free_etc(next, allocationFlags);
278 } else {
279 status_t error = ShrinkAreaHead(next, next->Size() - offset,
280 allocationFlags);
281 if (error != B_OK)
282 return error;
286 area->SetSize(newSize);
287 return B_OK;
291 status_t
292 VMUserAddressSpace::ShrinkAreaHead(VMArea* area, size_t size,
293 uint32 allocationFlags)
295 size_t oldSize = area->Size();
296 if (size == oldSize)
297 return B_OK;
299 area->SetBase(area->Base() + oldSize - size);
300 area->SetSize(size);
302 return B_OK;
306 status_t
307 VMUserAddressSpace::ShrinkAreaTail(VMArea* area, size_t size,
308 uint32 allocationFlags)
310 size_t oldSize = area->Size();
311 if (size == oldSize)
312 return B_OK;
314 area->SetSize(size);
316 return B_OK;
320 status_t
321 VMUserAddressSpace::ReserveAddressRange(size_t size,
322 const virtual_address_restrictions* addressRestrictions,
323 uint32 flags, uint32 allocationFlags, void** _address)
325 // check to see if this address space has entered DELETE state
326 if (fDeleting) {
327 // okay, someone is trying to delete this address space now, so we
328 // can't insert the area, let's back out
329 return B_BAD_TEAM_ID;
332 VMUserArea* area = VMUserArea::CreateReserved(this, flags, allocationFlags);
333 if (area == NULL)
334 return B_NO_MEMORY;
336 status_t status = InsertArea(area, size, addressRestrictions,
337 allocationFlags, _address);
338 if (status != B_OK) {
339 area->~VMUserArea();
340 free_etc(area, allocationFlags);
341 return status;
344 area->cache_offset = area->Base();
345 // we cache the original base address here
347 Get();
348 return B_OK;
352 status_t
353 VMUserAddressSpace::UnreserveAddressRange(addr_t address, size_t size,
354 uint32 allocationFlags)
356 // check to see if this address space has entered DELETE state
357 if (fDeleting) {
358 // okay, someone is trying to delete this address space now, so we can't
359 // insert the area, so back out
360 return B_BAD_TEAM_ID;
363 // search area list and remove any matching reserved ranges
364 addr_t endAddress = address + (size - 1);
365 for (VMUserAreaList::Iterator it = fAreas.GetIterator();
366 VMUserArea* area = it.Next();) {
367 // the area must be completely part of the reserved range
368 if (area->Base() + (area->Size() - 1) > endAddress)
369 break;
370 if (area->id == RESERVED_AREA_ID && area->Base() >= (addr_t)address) {
371 // remove reserved range
372 RemoveArea(area, allocationFlags);
373 Put();
374 area->~VMUserArea();
375 free_etc(area, allocationFlags);
379 return B_OK;
383 void
384 VMUserAddressSpace::UnreserveAllAddressRanges(uint32 allocationFlags)
386 for (VMUserAreaList::Iterator it = fAreas.GetIterator();
387 VMUserArea* area = it.Next();) {
388 if (area->id == RESERVED_AREA_ID) {
389 RemoveArea(area, allocationFlags);
390 Put();
391 area->~VMUserArea();
392 free_etc(area, allocationFlags);
398 void
399 VMUserAddressSpace::Dump() const
401 VMAddressSpace::Dump();
402 kprintf("area_hint: %p\n", fAreaHint);
404 kprintf("area_list:\n");
406 for (VMUserAreaList::ConstIterator it = fAreas.GetIterator();
407 VMUserArea* area = it.Next();) {
408 kprintf(" area 0x%" B_PRIx32 ": ", area->id);
409 kprintf("base_addr = 0x%lx ", area->Base());
410 kprintf("size = 0x%lx ", area->Size());
411 kprintf("name = '%s' ", area->name);
412 kprintf("protection = 0x%" B_PRIx32 "\n", area->protection);
417 inline bool
418 VMUserAddressSpace::_IsRandomized(uint32 addressSpec) const
420 return fRandomizingEnabled
421 && (addressSpec == B_RANDOMIZED_ANY_ADDRESS
422 || addressSpec == B_RANDOMIZED_BASE_ADDRESS);
426 addr_t
427 VMUserAddressSpace::_RandomizeAddress(addr_t start, addr_t end,
428 size_t alignment, bool initial)
430 ASSERT((start & addr_t(alignment - 1)) == 0);
431 ASSERT(start <= end);
433 if (start == end)
434 return start;
436 addr_t range = end - start + 1;
437 if (initial)
438 range = std::min(range, kMaxInitialRandomize);
439 else
440 range = std::min(range, kMaxRandomize);
442 addr_t random = secure_get_random<addr_t>();
443 random %= range;
444 random &= ~addr_t(alignment - 1);
446 return start + random;
450 /*! Finds a reserved area that covers the region spanned by \a start and
451 \a size, inserts the \a area into that region and makes sure that
452 there are reserved regions for the remaining parts.
454 status_t
455 VMUserAddressSpace::_InsertAreaIntoReservedRegion(addr_t start, size_t size,
456 VMUserArea* area, uint32 allocationFlags)
458 VMUserArea* next;
460 for (VMUserAreaList::Iterator it = fAreas.GetIterator();
461 (next = it.Next()) != NULL;) {
462 if (next->Base() <= start
463 && next->Base() + (next->Size() - 1) >= start + (size - 1)) {
464 // This area covers the requested range
465 if (next->id != RESERVED_AREA_ID) {
466 // but it's not reserved space, it's a real area
467 return B_BAD_VALUE;
470 break;
474 if (next == NULL)
475 return B_ENTRY_NOT_FOUND;
477 // Now we have to transfer the requested part of the reserved
478 // range to the new area - and remove, resize or split the old
479 // reserved area.
481 if (start == next->Base()) {
482 // the area starts at the beginning of the reserved range
483 fAreas.Insert(next, area);
485 if (size == next->Size()) {
486 // the new area fully covers the reversed range
487 fAreas.Remove(next);
488 Put();
489 next->~VMUserArea();
490 free_etc(next, allocationFlags);
491 } else {
492 // resize the reserved range behind the area
493 next->SetBase(next->Base() + size);
494 next->SetSize(next->Size() - size);
496 } else if (start + size == next->Base() + next->Size()) {
497 // the area is at the end of the reserved range
498 fAreas.Insert(fAreas.GetNext(next), area);
500 // resize the reserved range before the area
501 next->SetSize(start - next->Base());
502 } else {
503 // the area splits the reserved range into two separate ones
504 // we need a new reserved area to cover this space
505 VMUserArea* reserved = VMUserArea::CreateReserved(this,
506 next->protection, allocationFlags);
507 if (reserved == NULL)
508 return B_NO_MEMORY;
510 Get();
511 fAreas.Insert(fAreas.GetNext(next), reserved);
512 fAreas.Insert(reserved, area);
514 // resize regions
515 reserved->SetSize(next->Base() + next->Size() - start - size);
516 next->SetSize(start - next->Base());
517 reserved->SetBase(start + size);
518 reserved->cache_offset = next->cache_offset;
521 area->SetBase(start);
522 area->SetSize(size);
523 IncrementChangeCount();
525 return B_OK;
529 /*! Must be called with this address space's write lock held */
530 status_t
531 VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
532 uint32 addressSpec, size_t alignment, VMUserArea* area,
533 uint32 allocationFlags)
535 VMUserArea* last = NULL;
536 VMUserArea* next;
537 bool foundSpot = false;
538 addr_t originalStart = 0;
540 TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
541 "0x%lx, size %ld, end 0x%lx, addressSpec %" B_PRIu32 ", area %p\n",
542 this, start, size, end, addressSpec, area));
544 // do some sanity checking
545 if (start < fBase || size == 0 || end > fEndAddress
546 || start + (size - 1) > end)
547 return B_BAD_ADDRESS;
549 if (addressSpec == B_EXACT_ADDRESS && area->id != RESERVED_AREA_ID) {
550 // search for a reserved area
551 status_t status = _InsertAreaIntoReservedRegion(start, size, area,
552 allocationFlags);
553 if (status == B_OK || status == B_BAD_VALUE)
554 return status;
556 // There was no reserved area, and the slot doesn't seem to be used
557 // already
558 // TODO: this could be further optimized.
561 if (alignment == 0)
562 alignment = B_PAGE_SIZE;
563 if (addressSpec == B_ANY_KERNEL_BLOCK_ADDRESS) {
564 // align the memory to the next power of two of the size
565 while (alignment < size)
566 alignment <<= 1;
569 start = align_address(start, alignment);
571 if (fRandomizingEnabled && addressSpec == B_RANDOMIZED_BASE_ADDRESS) {
572 originalStart = start;
573 start = _RandomizeAddress(start, end - size + 1, alignment, true);
576 // walk up to the spot where we should start searching
577 second_chance:
578 VMUserAreaList::Iterator it = fAreas.GetIterator();
579 while ((next = it.Next()) != NULL) {
580 if (next->Base() > start + (size - 1)) {
581 // we have a winner
582 break;
585 last = next;
588 // find the right spot depending on the address specification - the area
589 // will be inserted directly after "last" ("next" is not referenced anymore)
591 switch (addressSpec) {
592 case B_ANY_ADDRESS:
593 case B_ANY_KERNEL_ADDRESS:
594 case B_ANY_KERNEL_BLOCK_ADDRESS:
595 case B_RANDOMIZED_ANY_ADDRESS:
596 case B_BASE_ADDRESS:
597 case B_RANDOMIZED_BASE_ADDRESS:
599 // find a hole big enough for a new area
600 if (last == NULL) {
601 // see if we can build it at the beginning of the virtual map
602 addr_t alignedBase = align_address(start, alignment);
603 addr_t nextBase = next == NULL
604 ? end : std::min(next->Base() - 1, end);
605 if (is_valid_spot(start, alignedBase, size, nextBase)) {
606 addr_t rangeEnd = std::min(nextBase - size + 1, end);
607 if (_IsRandomized(addressSpec)) {
608 alignedBase = _RandomizeAddress(alignedBase, rangeEnd,
609 alignment);
612 foundSpot = true;
613 area->SetBase(alignedBase);
614 break;
617 last = next;
618 next = it.Next();
621 // keep walking
622 while (next != NULL && next->Base() + next->Size() - 1 <= end) {
623 addr_t alignedBase = align_address(last->Base() + last->Size(),
624 alignment, addressSpec, start);
625 addr_t nextBase = std::min(end, next->Base() - 1);
627 if (is_valid_spot(last->Base() + (last->Size() - 1),
628 alignedBase, size, nextBase)) {
629 addr_t rangeEnd = std::min(nextBase - size + 1, end);
630 if (_IsRandomized(addressSpec)) {
631 alignedBase = _RandomizeAddress(alignedBase,
632 rangeEnd, alignment);
635 foundSpot = true;
636 area->SetBase(alignedBase);
637 break;
640 last = next;
641 next = it.Next();
644 if (foundSpot)
645 break;
647 addr_t alignedBase = align_address(last->Base() + last->Size(),
648 alignment, addressSpec, start);
650 if (next == NULL && is_valid_spot(last->Base() + (last->Size() - 1),
651 alignedBase, size, end)) {
652 if (_IsRandomized(addressSpec)) {
653 alignedBase = _RandomizeAddress(alignedBase, end - size + 1,
654 alignment);
657 // got a spot
658 foundSpot = true;
659 area->SetBase(alignedBase);
660 break;
661 } else if (is_base_address_spec(addressSpec)) {
662 // we didn't find a free spot in the requested range, so we'll
663 // try again without any restrictions
664 if (!_IsRandomized(addressSpec)) {
665 start = USER_BASE_ANY;
666 addressSpec = B_ANY_ADDRESS;
667 } else if (start == originalStart) {
668 start = USER_BASE_ANY;
669 addressSpec = B_RANDOMIZED_ANY_ADDRESS;
670 } else {
671 start = originalStart;
672 addressSpec = B_RANDOMIZED_BASE_ADDRESS;
675 last = NULL;
676 goto second_chance;
677 } else if (area->id != RESERVED_AREA_ID) {
678 // We didn't find a free spot - if there are any reserved areas,
679 // we can now test those for free space
680 // TODO: it would make sense to start with the biggest of them
681 it.Rewind();
682 next = it.Next();
683 for (last = NULL; next != NULL; next = it.Next()) {
684 if (next->id != RESERVED_AREA_ID) {
685 last = next;
686 continue;
687 } else if (next->Base() + size - 1 > end)
688 break;
690 // TODO: take free space after the reserved area into
691 // account!
692 addr_t alignedBase = align_address(next->Base(), alignment);
693 if (next->Base() == alignedBase && next->Size() == size) {
694 // The reserved area is entirely covered, and thus,
695 // removed
696 fAreas.Remove(next);
698 foundSpot = true;
699 area->SetBase(alignedBase);
700 next->~VMUserArea();
701 free_etc(next, allocationFlags);
702 break;
705 if ((next->protection & RESERVED_AVOID_BASE) == 0
706 && alignedBase == next->Base()
707 && next->Size() >= size) {
708 addr_t rangeEnd = std::min(
709 next->Base() + next->Size() - size, end);
710 if (_IsRandomized(addressSpec)) {
711 alignedBase = _RandomizeAddress(next->Base(),
712 rangeEnd, alignment);
714 addr_t offset = alignedBase - next->Base();
716 // The new area will be placed at the beginning of the
717 // reserved area and the reserved area will be offset
718 // and resized
719 foundSpot = true;
720 next->SetBase(next->Base() + offset + size);
721 next->SetSize(next->Size() - offset - size);
722 area->SetBase(alignedBase);
723 break;
726 if (is_valid_spot(next->Base(), alignedBase, size,
727 std::min(next->Base() + next->Size() - 1, end))) {
728 // The new area will be placed at the end of the
729 // reserved area, and the reserved area will be resized
730 // to make space
732 if (_IsRandomized(addressSpec)) {
733 addr_t alignedNextBase = align_address(next->Base(),
734 alignment);
736 addr_t startRange = next->Base() + next->Size();
737 startRange -= size + kMaxRandomize;
738 startRange = ROUNDDOWN(startRange, alignment);
739 startRange = std::max(startRange, alignedNextBase);
741 addr_t rangeEnd
742 = std::min(next->Base() + next->Size() - size,
743 end);
744 alignedBase = _RandomizeAddress(startRange,
745 rangeEnd, alignment);
746 } else {
747 alignedBase = ROUNDDOWN(
748 next->Base() + next->Size() - size, alignment);
751 foundSpot = true;
752 next->SetSize(alignedBase - next->Base());
753 area->SetBase(alignedBase);
754 last = next;
755 break;
758 last = next;
762 break;
765 case B_EXACT_ADDRESS:
766 // see if we can create it exactly here
767 if ((last == NULL || last->Base() + (last->Size() - 1) < start)
768 && (next == NULL || next->Base() > start + (size - 1))) {
769 foundSpot = true;
770 area->SetBase(start);
771 break;
773 break;
774 default:
775 return B_BAD_VALUE;
778 if (!foundSpot)
779 return addressSpec == B_EXACT_ADDRESS ? B_BAD_VALUE : B_NO_MEMORY;
781 area->SetSize(size);
782 if (last)
783 fAreas.Insert(fAreas.GetNext(last), area);
784 else
785 fAreas.Insert(fAreas.Head(), area);
787 IncrementChangeCount();
788 return B_OK;