2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Distributed under the terms of the MIT License.
7 #include "MemoryManager.h"
13 #include <util/AutoLock.h>
15 #include <vm/vm_page.h>
16 #include <vm/vm_priv.h>
17 #include <vm/VMAddressSpace.h>
18 #include <vm/VMArea.h>
19 #include <vm/VMCache.h>
20 #include <vm/VMTranslationMap.h>
22 #include "kernel_debug_config.h"
24 #include "ObjectCache.h"
27 //#define TRACE_MEMORY_MANAGER
28 #ifdef TRACE_MEMORY_MANAGER
29 # define TRACE(x...) dprintf(x)
31 # define TRACE(x...) do {} while (false)
34 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
35 # define PARANOID_CHECKS_ONLY(x) x
37 # define PARANOID_CHECKS_ONLY(x)
41 static const char* const kSlabAreaName
= "slab area";
43 static void* sAreaTableBuffer
[1024];
45 mutex
MemoryManager::sLock
;
46 rw_lock
MemoryManager::sAreaTableLock
;
47 kernel_args
* MemoryManager::sKernelArgs
;
48 MemoryManager::AreaTable
MemoryManager::sAreaTable
;
49 MemoryManager::Area
* MemoryManager::sFreeAreas
;
50 int MemoryManager::sFreeAreaCount
;
51 MemoryManager::MetaChunkList
MemoryManager::sFreeCompleteMetaChunks
;
52 MemoryManager::MetaChunkList
MemoryManager::sFreeShortMetaChunks
;
53 MemoryManager::MetaChunkList
MemoryManager::sPartialMetaChunksSmall
;
54 MemoryManager::MetaChunkList
MemoryManager::sPartialMetaChunksMedium
;
55 MemoryManager::AllocationEntry
* MemoryManager::sAllocationEntryCanWait
;
56 MemoryManager::AllocationEntry
* MemoryManager::sAllocationEntryDontWait
;
57 bool MemoryManager::sMaintenanceNeeded
;
60 RANGE_MARKER_FUNCTION_BEGIN(SlabMemoryManager
)
63 // #pragma mark - kernel tracing
66 #if SLAB_MEMORY_MANAGER_TRACING
69 //namespace SlabMemoryManagerCacheTracing {
70 struct MemoryManager::Tracing
{
72 class MemoryManagerTraceEntry
73 : public TRACE_ENTRY_SELECTOR(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE
) {
75 MemoryManagerTraceEntry()
77 TraceEntryBase(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE
, 0, true)
83 class Allocate
: public MemoryManagerTraceEntry
{
85 Allocate(ObjectCache
* cache
, uint32 flags
)
87 MemoryManagerTraceEntry(),
94 virtual void AddDump(TraceOutput
& out
)
96 out
.Print("slab memory manager alloc: cache: %p, flags: %#" B_PRIx32
,
106 class Free
: public MemoryManagerTraceEntry
{
108 Free(void* address
, uint32 flags
)
110 MemoryManagerTraceEntry(),
117 virtual void AddDump(TraceOutput
& out
)
119 out
.Print("slab memory manager free: address: %p, flags: %#" B_PRIx32
,
129 class AllocateRaw
: public MemoryManagerTraceEntry
{
131 AllocateRaw(size_t size
, uint32 flags
)
133 MemoryManagerTraceEntry(),
140 virtual void AddDump(TraceOutput
& out
)
142 out
.Print("slab memory manager alloc raw: size: %" B_PRIuSIZE
143 ", flags: %#" B_PRIx32
, fSize
, fFlags
);
152 class FreeRawOrReturnCache
: public MemoryManagerTraceEntry
{
154 FreeRawOrReturnCache(void* address
, uint32 flags
)
156 MemoryManagerTraceEntry(),
163 virtual void AddDump(TraceOutput
& out
)
165 out
.Print("slab memory manager free raw/return: address: %p, flags: %#"
166 B_PRIx32
, fAddress
, fFlags
);
175 class AllocateArea
: public MemoryManagerTraceEntry
{
177 AllocateArea(Area
* area
, uint32 flags
)
179 MemoryManagerTraceEntry(),
186 virtual void AddDump(TraceOutput
& out
)
188 out
.Print("slab memory manager alloc area: flags: %#" B_PRIx32
189 " -> %p", fFlags
, fArea
);
198 class AddArea
: public MemoryManagerTraceEntry
{
202 MemoryManagerTraceEntry(),
208 virtual void AddDump(TraceOutput
& out
)
210 out
.Print("slab memory manager add area: %p", fArea
);
218 class FreeArea
: public MemoryManagerTraceEntry
{
220 FreeArea(Area
* area
, bool areaRemoved
, uint32 flags
)
222 MemoryManagerTraceEntry(),
225 fRemoved(areaRemoved
)
230 virtual void AddDump(TraceOutput
& out
)
232 out
.Print("slab memory manager free area: %p%s, flags: %#" B_PRIx32
,
233 fArea
, fRemoved
? " (removed)" : "", fFlags
);
243 class AllocateMetaChunk
: public MemoryManagerTraceEntry
{
245 AllocateMetaChunk(MetaChunk
* metaChunk
)
247 MemoryManagerTraceEntry(),
248 fMetaChunk(metaChunk
->chunkBase
)
253 virtual void AddDump(TraceOutput
& out
)
255 out
.Print("slab memory manager alloc meta chunk: %#" B_PRIxADDR
,
264 class FreeMetaChunk
: public MemoryManagerTraceEntry
{
266 FreeMetaChunk(MetaChunk
* metaChunk
)
268 MemoryManagerTraceEntry(),
269 fMetaChunk(metaChunk
->chunkBase
)
274 virtual void AddDump(TraceOutput
& out
)
276 out
.Print("slab memory manager free meta chunk: %#" B_PRIxADDR
,
285 class AllocateChunk
: public MemoryManagerTraceEntry
{
287 AllocateChunk(size_t chunkSize
, MetaChunk
* metaChunk
, Chunk
* chunk
)
289 MemoryManagerTraceEntry(),
290 fChunkSize(chunkSize
),
291 fMetaChunk(metaChunk
->chunkBase
),
292 fChunk(chunk
- metaChunk
->chunks
)
297 virtual void AddDump(TraceOutput
& out
)
299 out
.Print("slab memory manager alloc chunk: size: %" B_PRIuSIZE
300 " -> meta chunk: %#" B_PRIxADDR
", chunk: %" B_PRIu32
, fChunkSize
,
311 class AllocateChunks
: public MemoryManagerTraceEntry
{
313 AllocateChunks(size_t chunkSize
, uint32 chunkCount
, MetaChunk
* metaChunk
,
316 MemoryManagerTraceEntry(),
317 fMetaChunk(metaChunk
->chunkBase
),
318 fChunkSize(chunkSize
),
319 fChunkCount(chunkCount
),
320 fChunk(chunk
- metaChunk
->chunks
)
325 virtual void AddDump(TraceOutput
& out
)
327 out
.Print("slab memory manager alloc chunks: size: %" B_PRIuSIZE
328 ", count %" B_PRIu32
" -> meta chunk: %#" B_PRIxADDR
", chunk: %"
329 B_PRIu32
, fChunkSize
, fChunkCount
, fMetaChunk
, fChunk
);
340 class FreeChunk
: public MemoryManagerTraceEntry
{
342 FreeChunk(MetaChunk
* metaChunk
, Chunk
* chunk
)
344 MemoryManagerTraceEntry(),
345 fMetaChunk(metaChunk
->chunkBase
),
346 fChunk(chunk
- metaChunk
->chunks
)
351 virtual void AddDump(TraceOutput
& out
)
353 out
.Print("slab memory manager free chunk: meta chunk: %#" B_PRIxADDR
354 ", chunk: %" B_PRIu32
, fMetaChunk
, fChunk
);
363 class Map
: public MemoryManagerTraceEntry
{
365 Map(addr_t address
, size_t size
, uint32 flags
)
367 MemoryManagerTraceEntry(),
375 virtual void AddDump(TraceOutput
& out
)
377 out
.Print("slab memory manager map: %#" B_PRIxADDR
", size: %"
378 B_PRIuSIZE
", flags: %#" B_PRIx32
, fAddress
, fSize
, fFlags
);
388 class Unmap
: public MemoryManagerTraceEntry
{
390 Unmap(addr_t address
, size_t size
, uint32 flags
)
392 MemoryManagerTraceEntry(),
400 virtual void AddDump(TraceOutput
& out
)
402 out
.Print("slab memory manager unmap: %#" B_PRIxADDR
", size: %"
403 B_PRIuSIZE
", flags: %#" B_PRIx32
, fAddress
, fSize
, fFlags
);
413 //} // namespace SlabMemoryManagerCacheTracing
414 }; // struct MemoryManager::Tracing
417 //# define T(x) new(std::nothrow) SlabMemoryManagerCacheTracing::x
418 # define T(x) new(std::nothrow) MemoryManager::Tracing::x
422 #endif // SLAB_MEMORY_MANAGER_TRACING
425 // #pragma mark - MemoryManager
429 MemoryManager::Init(kernel_args
* args
)
431 mutex_init(&sLock
, "slab memory manager");
432 rw_lock_init(&sAreaTableLock
, "slab memory manager area table");
435 new(&sFreeCompleteMetaChunks
) MetaChunkList
;
436 new(&sFreeShortMetaChunks
) MetaChunkList
;
437 new(&sPartialMetaChunksSmall
) MetaChunkList
;
438 new(&sPartialMetaChunksMedium
) MetaChunkList
;
440 new(&sAreaTable
) AreaTable
;
441 sAreaTable
.Resize(sAreaTableBuffer
, sizeof(sAreaTableBuffer
), true);
442 // A bit hacky: The table now owns the memory. Since we never resize or
443 // free it, that's not a problem, though.
447 sMaintenanceNeeded
= false;
452 MemoryManager::InitPostArea()
456 // Convert all areas to actual areas. This loop might look a bit weird, but
457 // is necessary since creating the actual area involves memory allocations,
458 // which in turn can change the situation.
463 for (AreaTable::Iterator it
= sAreaTable
.GetIterator();
464 Area
* area
= it
.Next();) {
465 if (area
->vmArea
== NULL
) {
466 _ConvertEarlyArea(area
);
473 // unmap and free unused pages
474 if (sFreeAreas
!= NULL
) {
475 // Just "leak" all but the first of the free areas -- the VM will
476 // automatically free all unclaimed memory.
477 sFreeAreas
->next
= NULL
;
480 Area
* area
= sFreeAreas
;
481 _ConvertEarlyArea(area
);
482 _UnmapFreeChunksEarly(area
);
485 for (AreaTable::Iterator it
= sAreaTable
.GetIterator();
486 Area
* area
= it
.Next();) {
487 _UnmapFreeChunksEarly(area
);
490 sMaintenanceNeeded
= true;
491 // might not be necessary, but doesn't harm
493 add_debugger_command_etc("slab_area", &_DumpArea
,
494 "Dump information on a given slab area",
496 "Dump information on a given slab area specified by its base "
498 "If \"-c\" is given, the chunks of all meta chunks area printed as "
500 add_debugger_command_etc("slab_areas", &_DumpAreas
,
501 "List all slab areas",
503 "Lists all slab areas.\n", 0);
504 add_debugger_command_etc("slab_meta_chunk", &_DumpMetaChunk
,
505 "Dump information on a given slab meta chunk",
507 "Dump information on a given slab meta chunk specified by its base "
508 "or object address.\n", 0);
509 add_debugger_command_etc("slab_meta_chunks", &_DumpMetaChunks
,
510 "List all non-full slab meta chunks",
512 "Lists all non-full slab meta chunks.\n"
513 "If \"-c\" is given, the chunks of all meta chunks area printed as "
515 add_debugger_command_etc("slab_raw_allocations", &_DumpRawAllocations
,
516 "List all raw allocations in slab areas",
518 "Lists all raw allocations in slab areas.\n", 0);
523 MemoryManager::Allocate(ObjectCache
* cache
, uint32 flags
, void*& _pages
)
525 // TODO: Support CACHE_UNLOCKED_PAGES!
527 T(Allocate(cache
, flags
));
529 size_t chunkSize
= cache
->slab_size
;
531 TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32
"): chunkSize: %"
532 B_PRIuSIZE
"\n", cache
, flags
, chunkSize
);
534 MutexLocker
locker(sLock
);
537 MetaChunk
* metaChunk
;
539 status_t error
= _AllocateChunks(chunkSize
, 1, flags
, metaChunk
, chunk
);
544 Area
* area
= metaChunk
->GetArea();
545 addr_t chunkAddress
= _ChunkAddress(metaChunk
, chunk
);
548 error
= _MapChunk(area
->vmArea
, chunkAddress
, chunkSize
, 0, flags
);
551 // something failed -- free the chunk
552 _FreeChunk(area
, metaChunk
, chunk
, chunkAddress
, true, flags
);
556 chunk
->reference
= (addr_t
)cache
;
557 _pages
= (void*)chunkAddress
;
559 TRACE("MemoryManager::Allocate() done: %p (meta chunk: %d, chunk %d)\n",
560 _pages
, int(metaChunk
- area
->metaChunks
),
561 int(chunk
- metaChunk
->chunks
));
567 MemoryManager::Free(void* pages
, uint32 flags
)
569 TRACE("MemoryManager::Free(%p, %#" B_PRIx32
")\n", pages
, flags
);
571 T(Free(pages
, flags
));
573 // get the area and the meta chunk
574 Area
* area
= _AreaForAddress((addr_t
)pages
);
575 MetaChunk
* metaChunk
= &area
->metaChunks
[
576 ((addr_t
)pages
% SLAB_AREA_SIZE
) / SLAB_CHUNK_SIZE_LARGE
];
578 ASSERT(metaChunk
->chunkSize
> 0);
579 ASSERT((addr_t
)pages
>= metaChunk
->chunkBase
);
580 ASSERT(((addr_t
)pages
% metaChunk
->chunkSize
) == 0);
583 uint16 chunkIndex
= _ChunkIndexForAddress(metaChunk
, (addr_t
)pages
);
584 Chunk
* chunk
= &metaChunk
->chunks
[chunkIndex
];
586 ASSERT(chunk
->next
!= NULL
);
587 ASSERT(chunk
->next
< metaChunk
->chunks
589 >= metaChunk
->chunks
+ SLAB_SMALL_CHUNKS_PER_META_CHUNK
);
592 MutexLocker
locker(sLock
);
593 _FreeChunk(area
, metaChunk
, chunk
, (addr_t
)pages
, false, flags
);
598 MemoryManager::AllocateRaw(size_t size
, uint32 flags
, void*& _pages
)
600 #if SLAB_MEMORY_MANAGER_TRACING
601 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
602 AbstractTraceEntryWithStackTrace
* traceEntry
= T(AllocateRaw(size
, flags
));
603 size
+= sizeof(AllocationTrackingInfo
);
605 T(AllocateRaw(size
, flags
));
609 size
= ROUNDUP(size
, SLAB_CHUNK_SIZE_SMALL
);
611 TRACE("MemoryManager::AllocateRaw(%" B_PRIuSIZE
", %#" B_PRIx32
")\n", size
,
614 if (size
> SLAB_CHUNK_SIZE_LARGE
|| (flags
& CACHE_ALIGN_ON_SIZE
) != 0) {
615 // Requested size greater than a large chunk or an aligned allocation.
616 // Allocate as an area.
617 if ((flags
& CACHE_DONT_LOCK_KERNEL_SPACE
) != 0)
618 return B_WOULD_BLOCK
;
620 virtual_address_restrictions virtualRestrictions
= {};
621 virtualRestrictions
.address_specification
622 = (flags
& CACHE_ALIGN_ON_SIZE
) != 0
623 ? B_ANY_KERNEL_BLOCK_ADDRESS
: B_ANY_KERNEL_ADDRESS
;
624 physical_address_restrictions physicalRestrictions
= {};
625 area_id area
= create_area_etc(VMAddressSpace::KernelID(),
626 "slab large raw allocation", size
, B_FULL_LOCK
,
627 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
,
628 ((flags
& CACHE_DONT_WAIT_FOR_MEMORY
) != 0
629 ? CREATE_AREA_DONT_WAIT
: 0)
630 | CREATE_AREA_DONT_CLEAR
, 0,
631 &virtualRestrictions
, &physicalRestrictions
, &_pages
);
633 status_t result
= area
>= 0 ? B_OK
: area
;
634 if (result
== B_OK
) {
635 fill_allocated_block(_pages
, size
);
636 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
637 _AddTrackingInfo(_pages
, size
, traceEntry
);
644 // determine chunk size (small or medium)
645 size_t chunkSize
= SLAB_CHUNK_SIZE_SMALL
;
646 uint32 chunkCount
= size
/ SLAB_CHUNK_SIZE_SMALL
;
648 if (size
% SLAB_CHUNK_SIZE_MEDIUM
== 0) {
649 chunkSize
= SLAB_CHUNK_SIZE_MEDIUM
;
650 chunkCount
= size
/ SLAB_CHUNK_SIZE_MEDIUM
;
653 MutexLocker
locker(sLock
);
655 // allocate the chunks
656 MetaChunk
* metaChunk
;
658 status_t error
= _AllocateChunks(chunkSize
, chunkCount
, flags
, metaChunk
,
664 Area
* area
= metaChunk
->GetArea();
665 addr_t chunkAddress
= _ChunkAddress(metaChunk
, chunk
);
668 error
= _MapChunk(area
->vmArea
, chunkAddress
, size
, 0, flags
);
671 // something failed -- free the chunks
672 for (uint32 i
= 0; i
< chunkCount
; i
++)
673 _FreeChunk(area
, metaChunk
, chunk
+ i
, chunkAddress
, true, flags
);
677 chunk
->reference
= (addr_t
)chunkAddress
+ size
- 1;
678 _pages
= (void*)chunkAddress
;
680 fill_allocated_block(_pages
, size
);
681 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
682 _AddTrackingInfo(_pages
, size
, traceEntry
);
685 TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
686 _pages
, int(metaChunk
- area
->metaChunks
),
687 int(chunk
- metaChunk
->chunks
));
692 /*static*/ ObjectCache
*
693 MemoryManager::FreeRawOrReturnCache(void* pages
, uint32 flags
)
695 TRACE("MemoryManager::FreeRawOrReturnCache(%p, %#" B_PRIx32
")\n", pages
,
698 T(FreeRawOrReturnCache(pages
, flags
));
701 addr_t areaBase
= _AreaBaseAddressForAddress((addr_t
)pages
);
703 ReadLocker
readLocker(sAreaTableLock
);
704 Area
* area
= sAreaTable
.Lookup(areaBase
);
708 // Probably a large allocation. Look up the VM area.
709 VMAddressSpace
* addressSpace
= VMAddressSpace::Kernel();
710 addressSpace
->ReadLock();
711 VMArea
* area
= addressSpace
->LookupArea((addr_t
)pages
);
712 addressSpace
->ReadUnlock();
714 if (area
!= NULL
&& (addr_t
)pages
== area
->Base())
715 delete_area(area
->id
);
717 panic("freeing unknown block %p from area %p", pages
, area
);
722 MetaChunk
* metaChunk
= &area
->metaChunks
[
723 ((addr_t
)pages
% SLAB_AREA_SIZE
) / SLAB_CHUNK_SIZE_LARGE
];
726 ASSERT(metaChunk
->chunkSize
> 0);
727 ASSERT((addr_t
)pages
>= metaChunk
->chunkBase
);
728 uint16 chunkIndex
= _ChunkIndexForAddress(metaChunk
, (addr_t
)pages
);
729 Chunk
* chunk
= &metaChunk
->chunks
[chunkIndex
];
731 addr_t reference
= chunk
->reference
;
732 if ((reference
& 1) == 0)
733 return (ObjectCache
*)reference
;
735 // Seems we have a raw chunk allocation.
736 ASSERT((addr_t
)pages
== _ChunkAddress(metaChunk
, chunk
));
737 ASSERT(reference
> (addr_t
)pages
);
738 ASSERT(reference
<= areaBase
+ SLAB_AREA_SIZE
- 1);
739 size_t size
= reference
- (addr_t
)pages
+ 1;
740 ASSERT((size
% SLAB_CHUNK_SIZE_SMALL
) == 0);
743 _UnmapChunk(area
->vmArea
, (addr_t
)pages
, size
, flags
);
746 MutexLocker
locker(sLock
);
747 uint32 chunkCount
= size
/ metaChunk
->chunkSize
;
748 for (uint32 i
= 0; i
< chunkCount
; i
++)
749 _FreeChunk(area
, metaChunk
, chunk
+ i
, (addr_t
)pages
, true, flags
);
756 MemoryManager::AcceptableChunkSize(size_t size
)
758 if (size
<= SLAB_CHUNK_SIZE_SMALL
)
759 return SLAB_CHUNK_SIZE_SMALL
;
760 if (size
<= SLAB_CHUNK_SIZE_MEDIUM
)
761 return SLAB_CHUNK_SIZE_MEDIUM
;
762 return SLAB_CHUNK_SIZE_LARGE
;
766 /*static*/ ObjectCache
*
767 MemoryManager::GetAllocationInfo(void* address
, size_t& _size
)
770 ReadLocker
readLocker(sAreaTableLock
);
771 Area
* area
= sAreaTable
.Lookup(_AreaBaseAddressForAddress((addr_t
)address
));
775 VMAddressSpace
* addressSpace
= VMAddressSpace::Kernel();
776 addressSpace
->ReadLock();
777 VMArea
* area
= addressSpace
->LookupArea((addr_t
)address
);
778 if (area
!= NULL
&& (addr_t
)address
== area
->Base())
779 _size
= area
->Size();
782 addressSpace
->ReadUnlock();
787 MetaChunk
* metaChunk
= &area
->metaChunks
[
788 ((addr_t
)address
% SLAB_AREA_SIZE
) / SLAB_CHUNK_SIZE_LARGE
];
791 ASSERT(metaChunk
->chunkSize
> 0);
792 ASSERT((addr_t
)address
>= metaChunk
->chunkBase
);
793 uint16 chunkIndex
= _ChunkIndexForAddress(metaChunk
, (addr_t
)address
);
795 addr_t reference
= metaChunk
->chunks
[chunkIndex
].reference
;
796 if ((reference
& 1) == 0) {
797 ObjectCache
* cache
= (ObjectCache
*)reference
;
798 _size
= cache
->object_size
;
802 _size
= reference
- (addr_t
)address
+ 1;
807 /*static*/ ObjectCache
*
808 MemoryManager::CacheForAddress(void* address
)
811 ReadLocker
readLocker(sAreaTableLock
);
812 Area
* area
= sAreaTable
.Lookup(_AreaBaseAddressForAddress((addr_t
)address
));
818 MetaChunk
* metaChunk
= &area
->metaChunks
[
819 ((addr_t
)address
% SLAB_AREA_SIZE
) / SLAB_CHUNK_SIZE_LARGE
];
822 ASSERT(metaChunk
->chunkSize
> 0);
823 ASSERT((addr_t
)address
>= metaChunk
->chunkBase
);
824 uint16 chunkIndex
= _ChunkIndexForAddress(metaChunk
, (addr_t
)address
);
826 addr_t reference
= metaChunk
->chunks
[chunkIndex
].reference
;
827 return (reference
& 1) == 0 ? (ObjectCache
*)reference
: NULL
;
832 MemoryManager::PerformMaintenance()
834 MutexLocker
locker(sLock
);
836 while (sMaintenanceNeeded
) {
837 sMaintenanceNeeded
= false;
839 // We want to keep one or two areas as a reserve. This way we have at
840 // least one area to use in situations when we aren't allowed to
841 // allocate one and also avoid ping-pong effects.
842 if (sFreeAreaCount
> 0 && sFreeAreaCount
<= 2)
845 if (sFreeAreaCount
== 0) {
846 // try to allocate one
848 if (_AllocateArea(0, area
) != B_OK
)
852 if (sFreeAreaCount
> 2)
853 sMaintenanceNeeded
= true;
855 // free until we only have two free ones
856 while (sFreeAreaCount
> 2)
857 _FreeArea(_PopFreeArea(), true, 0);
859 if (sFreeAreaCount
== 0)
860 sMaintenanceNeeded
= true;
866 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
869 MemoryManager::AnalyzeAllocationCallers(AllocationTrackingCallback
& callback
)
871 for (AreaTable::Iterator it
= sAreaTable
.GetIterator();
872 Area
* area
= it
.Next();) {
873 for (int32 i
= 0; i
< SLAB_META_CHUNKS_PER_AREA
; i
++) {
874 MetaChunk
* metaChunk
= area
->metaChunks
+ i
;
875 if (metaChunk
->chunkSize
== 0)
878 for (uint32 k
= 0; k
< metaChunk
->chunkCount
; k
++) {
879 Chunk
* chunk
= metaChunk
->chunks
+ k
;
882 if (_IsChunkFree(metaChunk
, chunk
))
885 addr_t reference
= chunk
->reference
;
886 if ((reference
& 1) == 0 || reference
== 1)
889 addr_t chunkAddress
= _ChunkAddress(metaChunk
, chunk
);
890 size_t size
= reference
- chunkAddress
+ 1;
892 if (!callback
.ProcessTrackingInfo(
893 _TrackingInfoFor((void*)chunkAddress
, size
),
894 (void*)chunkAddress
, size
)) {
904 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
907 /*static*/ ObjectCache
*
908 MemoryManager::DebugObjectCacheForAddress(void* address
)
911 addr_t areaBase
= _AreaBaseAddressForAddress((addr_t
)address
);
912 Area
* area
= sAreaTable
.Lookup(areaBase
);
917 MetaChunk
* metaChunk
= &area
->metaChunks
[
918 ((addr_t
)address
% SLAB_AREA_SIZE
) / SLAB_CHUNK_SIZE_LARGE
];
921 if (metaChunk
->chunkSize
== 0)
923 if ((addr_t
)address
< metaChunk
->chunkBase
)
926 uint16 chunkIndex
= _ChunkIndexForAddress(metaChunk
, (addr_t
)address
);
927 Chunk
* chunk
= &metaChunk
->chunks
[chunkIndex
];
929 addr_t reference
= chunk
->reference
;
930 if ((reference
& 1) == 0)
931 return (ObjectCache
*)reference
;
938 MemoryManager::_AllocateChunks(size_t chunkSize
, uint32 chunkCount
,
939 uint32 flags
, MetaChunk
*& _metaChunk
, Chunk
*& _chunk
)
941 MetaChunkList
* metaChunkList
= NULL
;
942 if (chunkSize
== SLAB_CHUNK_SIZE_SMALL
) {
943 metaChunkList
= &sPartialMetaChunksSmall
;
944 } else if (chunkSize
== SLAB_CHUNK_SIZE_MEDIUM
) {
945 metaChunkList
= &sPartialMetaChunksMedium
;
946 } else if (chunkSize
!= SLAB_CHUNK_SIZE_LARGE
) {
947 panic("MemoryManager::_AllocateChunks(): Unsupported chunk size: %"
948 B_PRIuSIZE
, chunkSize
);
952 if (_GetChunks(metaChunkList
, chunkSize
, chunkCount
, _metaChunk
, _chunk
))
955 if (sFreeAreas
!= NULL
) {
956 _AddArea(_PopFreeArea());
957 _RequestMaintenance();
959 _GetChunks(metaChunkList
, chunkSize
, chunkCount
, _metaChunk
, _chunk
);
963 if ((flags
& CACHE_DONT_LOCK_KERNEL_SPACE
) != 0) {
964 // We can't create an area with this limitation and we must not wait for
965 // someone else doing that.
966 return B_WOULD_BLOCK
;
969 // We need to allocate a new area. Wait, if someone else is trying to do
972 AllocationEntry
* allocationEntry
= NULL
;
973 if (sAllocationEntryDontWait
!= NULL
) {
974 allocationEntry
= sAllocationEntryDontWait
;
975 } else if (sAllocationEntryCanWait
!= NULL
976 && (flags
& CACHE_DONT_WAIT_FOR_MEMORY
) == 0) {
977 allocationEntry
= sAllocationEntryCanWait
;
981 ConditionVariableEntry entry
;
982 allocationEntry
->condition
.Add(&entry
);
984 mutex_unlock(&sLock
);
988 if (_GetChunks(metaChunkList
, chunkSize
, chunkCount
, _metaChunk
,
994 // prepare the allocation entry others can wait on
995 AllocationEntry
*& allocationEntry
996 = (flags
& CACHE_DONT_WAIT_FOR_MEMORY
) != 0
997 ? sAllocationEntryDontWait
: sAllocationEntryCanWait
;
999 AllocationEntry myResizeEntry
;
1000 allocationEntry
= &myResizeEntry
;
1001 allocationEntry
->condition
.Init(metaChunkList
, "wait for slab area");
1002 allocationEntry
->thread
= find_thread(NULL
);
1005 status_t error
= _AllocateArea(flags
, area
);
1007 allocationEntry
->condition
.NotifyAll();
1008 allocationEntry
= NULL
;
1013 // Try again to get a meta chunk. Something might have been freed in the
1014 // meantime. We can free the area in this case.
1015 if (_GetChunks(metaChunkList
, chunkSize
, chunkCount
, _metaChunk
, _chunk
)) {
1016 _FreeArea(area
, true, flags
);
1021 _GetChunks(metaChunkList
, chunkSize
, chunkCount
, _metaChunk
, _chunk
);
1027 MemoryManager::_GetChunks(MetaChunkList
* metaChunkList
, size_t chunkSize
,
1028 uint32 chunkCount
, MetaChunk
*& _metaChunk
, Chunk
*& _chunk
)
1030 // the common and less complicated special case
1031 if (chunkCount
== 1)
1032 return _GetChunk(metaChunkList
, chunkSize
, _metaChunk
, _chunk
);
1034 ASSERT(metaChunkList
!= NULL
);
1036 // Iterate through the partial meta chunk list and try to find a free
1037 // range that is large enough.
1038 MetaChunk
* metaChunk
= NULL
;
1039 for (MetaChunkList::Iterator it
= metaChunkList
->GetIterator();
1040 (metaChunk
= it
.Next()) != NULL
;) {
1041 if (metaChunk
->firstFreeChunk
+ chunkCount
- 1
1042 <= metaChunk
->lastFreeChunk
) {
1047 if (metaChunk
== NULL
) {
1048 // try to get a free meta chunk
1049 if ((SLAB_CHUNK_SIZE_LARGE
- SLAB_AREA_STRUCT_OFFSET
- kAreaAdminSize
)
1050 / chunkSize
>= chunkCount
) {
1051 metaChunk
= sFreeShortMetaChunks
.RemoveHead();
1053 if (metaChunk
== NULL
)
1054 metaChunk
= sFreeCompleteMetaChunks
.RemoveHead();
1056 if (metaChunk
== NULL
)
1059 metaChunkList
->Add(metaChunk
);
1060 metaChunk
->GetArea()->usedMetaChunkCount
++;
1061 _PrepareMetaChunk(metaChunk
, chunkSize
);
1063 T(AllocateMetaChunk(metaChunk
));
1066 // pull the chunks out of the free list
1067 Chunk
* firstChunk
= metaChunk
->chunks
+ metaChunk
->firstFreeChunk
;
1068 Chunk
* lastChunk
= firstChunk
+ (chunkCount
- 1);
1069 Chunk
** chunkPointer
= &metaChunk
->freeChunks
;
1070 uint32 remainingChunks
= chunkCount
;
1071 while (remainingChunks
> 0) {
1072 ASSERT_PRINT(chunkPointer
, "remaining: %" B_PRIu32
"/%" B_PRIu32
1073 ", area: %p, meta chunk: %" B_PRIdSSIZE
"\n", remainingChunks
,
1074 chunkCount
, metaChunk
->GetArea(),
1075 metaChunk
- metaChunk
->GetArea()->metaChunks
);
1076 Chunk
* chunk
= *chunkPointer
;
1077 if (chunk
>= firstChunk
&& chunk
<= lastChunk
) {
1078 *chunkPointer
= chunk
->next
;
1079 chunk
->reference
= 1;
1082 chunkPointer
= &chunk
->next
;
1085 // allocate the chunks
1086 metaChunk
->usedChunkCount
+= chunkCount
;
1087 if (metaChunk
->usedChunkCount
== metaChunk
->chunkCount
) {
1088 // meta chunk is full now -- remove it from its list
1089 if (metaChunkList
!= NULL
)
1090 metaChunkList
->Remove(metaChunk
);
1093 // update the free range
1094 metaChunk
->firstFreeChunk
+= chunkCount
;
1096 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk
));
1098 _chunk
= firstChunk
;
1099 _metaChunk
= metaChunk
;
1101 T(AllocateChunks(chunkSize
, chunkCount
, metaChunk
, firstChunk
));
1108 MemoryManager::_GetChunk(MetaChunkList
* metaChunkList
, size_t chunkSize
,
1109 MetaChunk
*& _metaChunk
, Chunk
*& _chunk
)
1111 MetaChunk
* metaChunk
= metaChunkList
!= NULL
1112 ? metaChunkList
->Head() : NULL
;
1113 if (metaChunk
== NULL
) {
1114 // no partial meta chunk -- maybe there's a free one
1115 if (chunkSize
== SLAB_CHUNK_SIZE_LARGE
) {
1116 metaChunk
= sFreeCompleteMetaChunks
.RemoveHead();
1118 metaChunk
= sFreeShortMetaChunks
.RemoveHead();
1119 if (metaChunk
== NULL
)
1120 metaChunk
= sFreeCompleteMetaChunks
.RemoveHead();
1121 if (metaChunk
!= NULL
)
1122 metaChunkList
->Add(metaChunk
);
1125 if (metaChunk
== NULL
)
1128 metaChunk
->GetArea()->usedMetaChunkCount
++;
1129 _PrepareMetaChunk(metaChunk
, chunkSize
);
1131 T(AllocateMetaChunk(metaChunk
));
1134 // allocate the chunk
1135 if (++metaChunk
->usedChunkCount
== metaChunk
->chunkCount
) {
1136 // meta chunk is full now -- remove it from its list
1137 if (metaChunkList
!= NULL
)
1138 metaChunkList
->Remove(metaChunk
);
1141 _chunk
= _pop(metaChunk
->freeChunks
);
1142 _metaChunk
= metaChunk
;
1144 _chunk
->reference
= 1;
1146 // update the free range
1147 uint32 chunkIndex
= _chunk
- metaChunk
->chunks
;
1148 if (chunkIndex
>= metaChunk
->firstFreeChunk
1149 && chunkIndex
<= metaChunk
->lastFreeChunk
) {
1150 if (chunkIndex
- metaChunk
->firstFreeChunk
1151 <= metaChunk
->lastFreeChunk
- chunkIndex
) {
1152 metaChunk
->firstFreeChunk
= chunkIndex
+ 1;
1154 metaChunk
->lastFreeChunk
= chunkIndex
- 1;
1157 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk
));
1159 T(AllocateChunk(chunkSize
, metaChunk
, _chunk
));
1166 MemoryManager::_FreeChunk(Area
* area
, MetaChunk
* metaChunk
, Chunk
* chunk
,
1167 addr_t chunkAddress
, bool alreadyUnmapped
, uint32 flags
)
1170 if (!alreadyUnmapped
) {
1171 mutex_unlock(&sLock
);
1172 _UnmapChunk(area
->vmArea
, chunkAddress
, metaChunk
->chunkSize
, flags
);
1176 T(FreeChunk(metaChunk
, chunk
));
1178 _push(metaChunk
->freeChunks
, chunk
);
1180 uint32 chunkIndex
= chunk
- metaChunk
->chunks
;
1182 // free the meta chunk, if it is unused now
1183 PARANOID_CHECKS_ONLY(bool areaDeleted
= false;)
1184 ASSERT(metaChunk
->usedChunkCount
> 0);
1185 if (--metaChunk
->usedChunkCount
== 0) {
1186 T(FreeMetaChunk(metaChunk
));
1188 // remove from partial meta chunk list
1189 if (metaChunk
->chunkSize
== SLAB_CHUNK_SIZE_SMALL
)
1190 sPartialMetaChunksSmall
.Remove(metaChunk
);
1191 else if (metaChunk
->chunkSize
== SLAB_CHUNK_SIZE_MEDIUM
)
1192 sPartialMetaChunksMedium
.Remove(metaChunk
);
1195 metaChunk
->chunkSize
= 0;
1198 if (metaChunk
== area
->metaChunks
)
1199 sFreeShortMetaChunks
.Add(metaChunk
, false);
1201 sFreeCompleteMetaChunks
.Add(metaChunk
, false);
1203 // free the area, if it is unused now
1204 ASSERT(area
->usedMetaChunkCount
> 0);
1205 if (--area
->usedMetaChunkCount
== 0) {
1206 _FreeArea(area
, false, flags
);
1207 PARANOID_CHECKS_ONLY(areaDeleted
= true;)
1209 } else if (metaChunk
->usedChunkCount
== metaChunk
->chunkCount
- 1) {
1210 // the meta chunk was full before -- add it back to its partial chunk
1212 if (metaChunk
->chunkSize
== SLAB_CHUNK_SIZE_SMALL
)
1213 sPartialMetaChunksSmall
.Add(metaChunk
, false);
1214 else if (metaChunk
->chunkSize
== SLAB_CHUNK_SIZE_MEDIUM
)
1215 sPartialMetaChunksMedium
.Add(metaChunk
, false);
1217 metaChunk
->firstFreeChunk
= chunkIndex
;
1218 metaChunk
->lastFreeChunk
= chunkIndex
;
1220 // extend the free range, if the chunk adjoins
1221 if (chunkIndex
+ 1 == metaChunk
->firstFreeChunk
) {
1222 uint32 firstFree
= chunkIndex
;
1223 for (; firstFree
> 0; firstFree
--) {
1224 Chunk
* previousChunk
= &metaChunk
->chunks
[firstFree
- 1];
1225 if (!_IsChunkFree(metaChunk
, previousChunk
))
1228 metaChunk
->firstFreeChunk
= firstFree
;
1229 } else if (chunkIndex
== (uint32
)metaChunk
->lastFreeChunk
+ 1) {
1230 uint32 lastFree
= chunkIndex
;
1231 for (; lastFree
+ 1 < metaChunk
->chunkCount
; lastFree
++) {
1232 Chunk
* nextChunk
= &metaChunk
->chunks
[lastFree
+ 1];
1233 if (!_IsChunkFree(metaChunk
, nextChunk
))
1236 metaChunk
->lastFreeChunk
= lastFree
;
1240 PARANOID_CHECKS_ONLY(
1242 _CheckMetaChunk(metaChunk
);
1248 MemoryManager::_PrepareMetaChunk(MetaChunk
* metaChunk
, size_t chunkSize
)
1250 Area
* area
= metaChunk
->GetArea();
1252 if (metaChunk
== area
->metaChunks
) {
1253 // the first chunk is shorter
1254 size_t unusableSize
= ROUNDUP(SLAB_AREA_STRUCT_OFFSET
+ kAreaAdminSize
,
1256 metaChunk
->chunkBase
= area
->BaseAddress() + unusableSize
;
1257 metaChunk
->totalSize
= SLAB_CHUNK_SIZE_LARGE
- unusableSize
;
1260 metaChunk
->chunkSize
= chunkSize
;
1261 metaChunk
->chunkCount
= metaChunk
->totalSize
/ chunkSize
;
1262 metaChunk
->usedChunkCount
= 0;
1264 metaChunk
->freeChunks
= NULL
;
1265 for (int32 i
= metaChunk
->chunkCount
- 1; i
>= 0; i
--)
1266 _push(metaChunk
->freeChunks
, metaChunk
->chunks
+ i
);
1268 metaChunk
->firstFreeChunk
= 0;
1269 metaChunk
->lastFreeChunk
= metaChunk
->chunkCount
- 1;
1271 PARANOID_CHECKS_ONLY(_CheckMetaChunk(metaChunk
));
1276 MemoryManager::_AddArea(Area
* area
)
1280 // add the area to the hash table
1281 WriteLocker
writeLocker(sAreaTableLock
);
1282 sAreaTable
.InsertUnchecked(area
);
1283 writeLocker
.Unlock();
1285 // add the area's meta chunks to the free lists
1286 sFreeShortMetaChunks
.Add(&area
->metaChunks
[0]);
1287 for (int32 i
= 1; i
< SLAB_META_CHUNKS_PER_AREA
; i
++)
1288 sFreeCompleteMetaChunks
.Add(&area
->metaChunks
[i
]);
1293 MemoryManager::_AllocateArea(uint32 flags
, Area
*& _area
)
1295 TRACE("MemoryManager::_AllocateArea(%#" B_PRIx32
")\n", flags
);
1297 ASSERT((flags
& CACHE_DONT_LOCK_KERNEL_SPACE
) == 0);
1299 mutex_unlock(&sLock
);
1301 size_t pagesNeededToMap
= 0;
1304 VMArea
* vmArea
= NULL
;
1306 if (sKernelArgs
== NULL
) {
1308 uint32 areaCreationFlags
= (flags
& CACHE_PRIORITY_VIP
) != 0
1309 ? CREATE_AREA_PRIORITY_VIP
: 0;
1310 area_id areaID
= vm_create_null_area(B_SYSTEM_TEAM
, kSlabAreaName
,
1311 &areaBase
, B_ANY_KERNEL_BLOCK_ADDRESS
, SLAB_AREA_SIZE
,
1318 area
= _AreaForAddress((addr_t
)areaBase
);
1320 // map the memory for the administrative structure
1321 VMAddressSpace
* addressSpace
= VMAddressSpace::Kernel();
1322 VMTranslationMap
* translationMap
= addressSpace
->TranslationMap();
1324 pagesNeededToMap
= translationMap
->MaxPagesNeededToMap(
1325 (addr_t
)area
, (addr_t
)areaBase
+ SLAB_AREA_SIZE
- 1);
1327 vmArea
= VMAreaHash::Lookup(areaID
);
1328 status_t error
= _MapChunk(vmArea
, (addr_t
)area
, kAreaAdminSize
,
1329 pagesNeededToMap
, flags
);
1330 if (error
!= B_OK
) {
1331 delete_area(areaID
);
1336 dprintf("slab memory manager: created area %p (%" B_PRId32
")\n", area
,
1339 // no areas yet -- allocate raw memory
1340 areaBase
= (void*)vm_allocate_early(sKernelArgs
, SLAB_AREA_SIZE
,
1341 SLAB_AREA_SIZE
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
,
1343 if (areaBase
== NULL
) {
1347 area
= _AreaForAddress((addr_t
)areaBase
);
1349 TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
1353 // init the area structure
1354 area
->vmArea
= vmArea
;
1355 area
->reserved_memory_for_mapping
= pagesNeededToMap
* B_PAGE_SIZE
;
1356 area
->usedMetaChunkCount
= 0;
1357 area
->fullyMapped
= vmArea
== NULL
;
1359 // init the meta chunks
1360 for (int32 i
= 0; i
< SLAB_META_CHUNKS_PER_AREA
; i
++) {
1361 MetaChunk
* metaChunk
= area
->metaChunks
+ i
;
1362 metaChunk
->chunkSize
= 0;
1363 metaChunk
->chunkBase
= (addr_t
)areaBase
+ i
* SLAB_CHUNK_SIZE_LARGE
;
1364 metaChunk
->totalSize
= SLAB_CHUNK_SIZE_LARGE
;
1365 // Note: chunkBase and totalSize aren't correct for the first
1366 // meta chunk. They will be set in _PrepareMetaChunk().
1367 metaChunk
->chunkCount
= 0;
1368 metaChunk
->usedChunkCount
= 0;
1369 metaChunk
->freeChunks
= NULL
;
1375 T(AllocateArea(area
, flags
));
1382 MemoryManager::_FreeArea(Area
* area
, bool areaRemoved
, uint32 flags
)
1384 TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32
")\n", area
, flags
);
1386 T(FreeArea(area
, areaRemoved
, flags
));
1388 ASSERT(area
->usedMetaChunkCount
== 0);
1391 // remove the area's meta chunks from the free lists
1392 ASSERT(area
->metaChunks
[0].usedChunkCount
== 0);
1393 sFreeShortMetaChunks
.Remove(&area
->metaChunks
[0]);
1395 for (int32 i
= 1; i
< SLAB_META_CHUNKS_PER_AREA
; i
++) {
1396 ASSERT(area
->metaChunks
[i
].usedChunkCount
== 0);
1397 sFreeCompleteMetaChunks
.Remove(&area
->metaChunks
[i
]);
1400 // remove the area from the hash table
1401 WriteLocker
writeLocker(sAreaTableLock
);
1402 sAreaTable
.RemoveUnchecked(area
);
1403 writeLocker
.Unlock();
1406 // We want to keep one or two free areas as a reserve.
1407 if (sFreeAreaCount
<= 1) {
1408 _PushFreeArea(area
);
1412 if (area
->vmArea
== NULL
|| (flags
& CACHE_DONT_LOCK_KERNEL_SPACE
) != 0) {
1413 // This is either early in the boot process or we aren't allowed to
1414 // delete the area now.
1415 _PushFreeArea(area
);
1416 _RequestMaintenance();
1420 mutex_unlock(&sLock
);
1422 dprintf("slab memory manager: deleting area %p (%" B_PRId32
")\n", area
,
1425 size_t memoryToUnreserve
= area
->reserved_memory_for_mapping
;
1426 delete_area(area
->vmArea
->id
);
1427 vm_unreserve_memory(memoryToUnreserve
);
1434 MemoryManager::_MapChunk(VMArea
* vmArea
, addr_t address
, size_t size
,
1435 size_t reserveAdditionalMemory
, uint32 flags
)
1437 TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR
", %#" B_PRIxSIZE
1438 ")\n", vmArea
, address
, size
);
1440 T(Map(address
, size
, flags
));
1442 if (vmArea
== NULL
) {
1443 // everything is mapped anyway
1447 VMAddressSpace
* addressSpace
= VMAddressSpace::Kernel();
1448 VMTranslationMap
* translationMap
= addressSpace
->TranslationMap();
1450 // reserve memory for the chunk
1451 int priority
= (flags
& CACHE_PRIORITY_VIP
) != 0
1452 ? VM_PRIORITY_VIP
: VM_PRIORITY_SYSTEM
;
1453 size_t reservedMemory
= size
+ reserveAdditionalMemory
;
1454 status_t error
= vm_try_reserve_memory(size
, priority
,
1455 (flags
& CACHE_DONT_WAIT_FOR_MEMORY
) != 0 ? 0 : 1000000);
1459 // reserve the pages we need now
1460 size_t reservedPages
= size
/ B_PAGE_SIZE
1461 + translationMap
->MaxPagesNeededToMap(address
, address
+ size
- 1);
1462 vm_page_reservation reservation
;
1463 if ((flags
& CACHE_DONT_WAIT_FOR_MEMORY
) != 0) {
1464 if (!vm_page_try_reserve_pages(&reservation
, reservedPages
, priority
)) {
1465 vm_unreserve_memory(reservedMemory
);
1466 return B_WOULD_BLOCK
;
1469 vm_page_reserve_pages(&reservation
, reservedPages
, priority
);
1471 VMCache
* cache
= vm_area_get_locked_cache(vmArea
);
1474 translationMap
->Lock();
1476 addr_t areaOffset
= address
- vmArea
->Base();
1477 addr_t endAreaOffset
= areaOffset
+ size
;
1478 for (size_t offset
= areaOffset
; offset
< endAreaOffset
;
1479 offset
+= B_PAGE_SIZE
) {
1480 vm_page
* page
= vm_page_allocate_page(&reservation
, PAGE_STATE_WIRED
);
1481 cache
->InsertPage(page
, offset
);
1483 page
->IncrementWiredCount();
1484 atomic_add(&gMappedPagesCount
, 1);
1485 DEBUG_PAGE_ACCESS_END(page
);
1487 translationMap
->Map(vmArea
->Base() + offset
,
1488 page
->physical_page_number
* B_PAGE_SIZE
,
1489 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
,
1490 vmArea
->MemoryType(), &reservation
);
1493 translationMap
->Unlock();
1495 cache
->ReleaseRefAndUnlock();
1497 vm_page_unreserve_pages(&reservation
);
1504 MemoryManager::_UnmapChunk(VMArea
* vmArea
, addr_t address
, size_t size
,
1507 T(Unmap(address
, size
, flags
));
1512 TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR
", %#" B_PRIxSIZE
1513 ")\n", vmArea
, address
, size
);
1515 VMAddressSpace
* addressSpace
= VMAddressSpace::Kernel();
1516 VMTranslationMap
* translationMap
= addressSpace
->TranslationMap();
1517 VMCache
* cache
= vm_area_get_locked_cache(vmArea
);
1520 translationMap
->Lock();
1521 translationMap
->Unmap(address
, address
+ size
- 1);
1522 atomic_add(&gMappedPagesCount
, -(size
/ B_PAGE_SIZE
));
1523 translationMap
->Unlock();
1526 addr_t areaPageOffset
= (address
- vmArea
->Base()) / B_PAGE_SIZE
;
1527 addr_t areaPageEndOffset
= areaPageOffset
+ size
/ B_PAGE_SIZE
;
1528 VMCachePagesTree::Iterator it
= cache
->pages
.GetIterator(
1529 areaPageOffset
, true, true);
1530 while (vm_page
* page
= it
.Next()) {
1531 if (page
->cache_offset
>= areaPageEndOffset
)
1534 DEBUG_PAGE_ACCESS_START(page
);
1536 page
->DecrementWiredCount();
1538 cache
->RemovePage(page
);
1539 // the iterator is remove-safe
1540 vm_page_free(cache
, page
);
1543 cache
->ReleaseRefAndUnlock();
1545 vm_unreserve_memory(size
);
1552 MemoryManager::_UnmapFreeChunksEarly(Area
* area
)
1554 if (!area
->fullyMapped
)
1557 TRACE("MemoryManager::_UnmapFreeChunksEarly(%p)\n", area
);
1559 // unmap the space before the Area structure
1560 #if SLAB_AREA_STRUCT_OFFSET > 0
1561 _UnmapChunk(area
->vmArea
, area
->BaseAddress(), SLAB_AREA_STRUCT_OFFSET
,
1565 for (int32 i
= 0; i
< SLAB_META_CHUNKS_PER_AREA
; i
++) {
1566 MetaChunk
* metaChunk
= area
->metaChunks
+ i
;
1567 if (metaChunk
->chunkSize
== 0) {
1568 // meta chunk is free -- unmap it completely
1570 _UnmapChunk(area
->vmArea
, (addr_t
)area
+ kAreaAdminSize
,
1571 SLAB_CHUNK_SIZE_LARGE
- kAreaAdminSize
, 0);
1573 _UnmapChunk(area
->vmArea
,
1574 area
->BaseAddress() + i
* SLAB_CHUNK_SIZE_LARGE
,
1575 SLAB_CHUNK_SIZE_LARGE
, 0);
1578 // unmap free chunks
1579 for (Chunk
* chunk
= metaChunk
->freeChunks
; chunk
!= NULL
;
1580 chunk
= chunk
->next
) {
1581 _UnmapChunk(area
->vmArea
, _ChunkAddress(metaChunk
, chunk
),
1582 metaChunk
->chunkSize
, 0);
1585 // The first meta chunk might have space before its first chunk.
1587 addr_t unusedStart
= (addr_t
)area
+ kAreaAdminSize
;
1588 if (unusedStart
< metaChunk
->chunkBase
) {
1589 _UnmapChunk(area
->vmArea
, unusedStart
,
1590 metaChunk
->chunkBase
- unusedStart
, 0);
1596 area
->fullyMapped
= false;
1601 MemoryManager::_ConvertEarlyArea(Area
* area
)
1603 void* address
= (void*)area
->BaseAddress();
1604 area_id areaID
= create_area(kSlabAreaName
, &address
, B_EXACT_ADDRESS
,
1605 SLAB_AREA_SIZE
, B_ALREADY_WIRED
,
1606 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
1608 panic("out of memory");
1610 area
->vmArea
= VMAreaHash::Lookup(areaID
);
1615 MemoryManager::_RequestMaintenance()
1617 if ((sFreeAreaCount
> 0 && sFreeAreaCount
<= 2) || sMaintenanceNeeded
)
1620 sMaintenanceNeeded
= true;
1621 request_memory_manager_maintenance();
1626 MemoryManager::_IsChunkInFreeList(const MetaChunk
* metaChunk
,
1629 Chunk
* freeChunk
= metaChunk
->freeChunks
;
1630 while (freeChunk
!= NULL
) {
1631 if (freeChunk
== chunk
)
1633 freeChunk
= freeChunk
->next
;
1640 #if DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1643 MemoryManager::_CheckMetaChunk(MetaChunk
* metaChunk
)
1645 Area
* area
= metaChunk
->GetArea();
1646 int32 metaChunkIndex
= metaChunk
- area
->metaChunks
;
1647 if (metaChunkIndex
< 0 || metaChunkIndex
>= SLAB_META_CHUNKS_PER_AREA
) {
1648 panic("invalid meta chunk %p!", metaChunk
);
1652 switch (metaChunk
->chunkSize
) {
1656 case SLAB_CHUNK_SIZE_SMALL
:
1657 case SLAB_CHUNK_SIZE_MEDIUM
:
1658 case SLAB_CHUNK_SIZE_LARGE
:
1661 panic("meta chunk %p has invalid chunk size: %" B_PRIuSIZE
,
1662 metaChunk
, metaChunk
->chunkSize
);
1666 if (metaChunk
->totalSize
> SLAB_CHUNK_SIZE_LARGE
) {
1667 panic("meta chunk %p has invalid total size: %" B_PRIuSIZE
,
1668 metaChunk
, metaChunk
->totalSize
);
1672 addr_t expectedBase
= area
->BaseAddress()
1673 + metaChunkIndex
* SLAB_CHUNK_SIZE_LARGE
;
1674 if (metaChunk
->chunkBase
< expectedBase
1675 || metaChunk
->chunkBase
- expectedBase
+ metaChunk
->totalSize
1676 > SLAB_CHUNK_SIZE_LARGE
) {
1677 panic("meta chunk %p has invalid base address: %" B_PRIxADDR
, metaChunk
,
1678 metaChunk
->chunkBase
);
1682 if (metaChunk
->chunkCount
!= metaChunk
->totalSize
/ metaChunk
->chunkSize
) {
1683 panic("meta chunk %p has invalid chunk count: %u", metaChunk
,
1684 metaChunk
->chunkCount
);
1688 if (metaChunk
->usedChunkCount
> metaChunk
->chunkCount
) {
1689 panic("meta chunk %p has invalid unused chunk count: %u", metaChunk
,
1690 metaChunk
->usedChunkCount
);
1694 if (metaChunk
->firstFreeChunk
> metaChunk
->chunkCount
) {
1695 panic("meta chunk %p has invalid first free chunk: %u", metaChunk
,
1696 metaChunk
->firstFreeChunk
);
1700 if (metaChunk
->lastFreeChunk
>= metaChunk
->chunkCount
) {
1701 panic("meta chunk %p has invalid last free chunk: %u", metaChunk
,
1702 metaChunk
->lastFreeChunk
);
1706 // check free list for structural sanity
1707 uint32 freeChunks
= 0;
1708 for (Chunk
* chunk
= metaChunk
->freeChunks
; chunk
!= NULL
;
1709 chunk
= chunk
->next
) {
1710 if ((addr_t
)chunk
% sizeof(Chunk
) != 0 || chunk
< metaChunk
->chunks
1711 || chunk
>= metaChunk
->chunks
+ metaChunk
->chunkCount
) {
1712 panic("meta chunk %p has invalid element in free list, chunk: %p",
1717 if (++freeChunks
> metaChunk
->chunkCount
) {
1718 panic("meta chunk %p has cyclic free list", metaChunk
);
1723 if (freeChunks
+ metaChunk
->usedChunkCount
> metaChunk
->chunkCount
) {
1724 panic("meta chunk %p has mismatching free/used chunk counts: total: "
1725 "%u, used: %u, free: %" B_PRIu32
, metaChunk
, metaChunk
->chunkCount
,
1726 metaChunk
->usedChunkCount
, freeChunks
);
1730 // count used chunks by looking at their reference/next field
1731 uint32 usedChunks
= 0;
1732 for (uint32 i
= 0; i
< metaChunk
->chunkCount
; i
++) {
1733 if (!_IsChunkFree(metaChunk
, metaChunk
->chunks
+ i
))
1737 if (usedChunks
!= metaChunk
->usedChunkCount
) {
1738 panic("meta chunk %p has used chunks that appear free: total: "
1739 "%u, used: %u, appearing used: %" B_PRIu32
, metaChunk
,
1740 metaChunk
->chunkCount
, metaChunk
->usedChunkCount
, usedChunks
);
1745 for (uint32 i
= metaChunk
->firstFreeChunk
; i
< metaChunk
->lastFreeChunk
;
1747 if (!_IsChunkFree(metaChunk
, metaChunk
->chunks
+ i
)) {
1748 panic("meta chunk %p has used chunk in free range, chunk: %p (%"
1749 B_PRIu32
", free range: %u - %u)", metaChunk
,
1750 metaChunk
->chunks
+ i
, i
, metaChunk
->firstFreeChunk
,
1751 metaChunk
->lastFreeChunk
);
1757 #endif // DEBUG_SLAB_MEMORY_MANAGER_PARANOID_CHECKS
1761 MemoryManager::_DumpRawAllocations(int argc
, char** argv
)
1763 kprintf("%-*s meta chunk chunk %-*s size (KB)\n",
1764 B_PRINTF_POINTER_WIDTH
, "area", B_PRINTF_POINTER_WIDTH
, "base");
1766 size_t totalSize
= 0;
1768 for (AreaTable::Iterator it
= sAreaTable
.GetIterator();
1769 Area
* area
= it
.Next();) {
1770 for (int32 i
= 0; i
< SLAB_META_CHUNKS_PER_AREA
; i
++) {
1771 MetaChunk
* metaChunk
= area
->metaChunks
+ i
;
1772 if (metaChunk
->chunkSize
== 0)
1774 for (uint32 k
= 0; k
< metaChunk
->chunkCount
; k
++) {
1775 Chunk
* chunk
= metaChunk
->chunks
+ k
;
1778 if (_IsChunkFree(metaChunk
, chunk
))
1781 addr_t reference
= chunk
->reference
;
1782 if ((reference
& 1) == 0 || reference
== 1)
1785 addr_t chunkAddress
= _ChunkAddress(metaChunk
, chunk
);
1786 size_t size
= reference
- chunkAddress
+ 1;
1789 kprintf("%p %10" B_PRId32
" %5" B_PRIu32
" %p %9"
1790 B_PRIuSIZE
"\n", area
, i
, k
, (void*)chunkAddress
,
1796 kprintf("total:%*s%9" B_PRIuSIZE
"\n", (2 * B_PRINTF_POINTER_WIDTH
) + 21,
1797 "", totalSize
/ 1024);
1804 MemoryManager::_PrintMetaChunkTableHeader(bool printChunks
)
1807 kprintf("chunk base cache object size cache name\n");
1809 kprintf("chunk base\n");
1813 MemoryManager::_DumpMetaChunk(MetaChunk
* metaChunk
, bool printChunks
,
1817 _PrintMetaChunkTableHeader(printChunks
);
1819 const char* type
= "empty";
1820 if (metaChunk
->chunkSize
!= 0) {
1821 switch (metaChunk
->chunkSize
) {
1822 case SLAB_CHUNK_SIZE_SMALL
:
1825 case SLAB_CHUNK_SIZE_MEDIUM
:
1828 case SLAB_CHUNK_SIZE_LARGE
:
1834 int metaChunkIndex
= metaChunk
- metaChunk
->GetArea()->metaChunks
;
1835 kprintf("%5d %p --- %6s meta chunk", metaChunkIndex
,
1836 (void*)metaChunk
->chunkBase
, type
);
1837 if (metaChunk
->chunkSize
!= 0) {
1838 kprintf(": %4u/%4u used, %-4u-%4u free ------------\n",
1839 metaChunk
->usedChunkCount
, metaChunk
->chunkCount
,
1840 metaChunk
->firstFreeChunk
, metaChunk
->lastFreeChunk
);
1842 kprintf(" --------------------------------------------\n");
1844 if (metaChunk
->chunkSize
== 0 || !printChunks
)
1847 for (uint32 i
= 0; i
< metaChunk
->chunkCount
; i
++) {
1848 Chunk
* chunk
= metaChunk
->chunks
+ i
;
1851 if (_IsChunkFree(metaChunk
, chunk
)) {
1852 if (!_IsChunkInFreeList(metaChunk
, chunk
)) {
1853 kprintf("%5" B_PRIu32
" %p appears free, but isn't in free "
1854 "list!\n", i
, (void*)_ChunkAddress(metaChunk
, chunk
));
1860 addr_t reference
= chunk
->reference
;
1861 if ((reference
& 1) == 0) {
1862 ObjectCache
* cache
= (ObjectCache
*)reference
;
1863 kprintf("%5" B_PRIu32
" %p %p %11" B_PRIuSIZE
" %s\n", i
,
1864 (void*)_ChunkAddress(metaChunk
, chunk
), cache
,
1865 cache
!= NULL
? cache
->object_size
: 0,
1866 cache
!= NULL
? cache
->name
: "");
1867 } else if (reference
!= 1) {
1868 kprintf("%5" B_PRIu32
" %p raw allocation up to %p\n", i
,
1869 (void*)_ChunkAddress(metaChunk
, chunk
), (void*)reference
);
1876 MemoryManager::_DumpMetaChunk(int argc
, char** argv
)
1879 print_debugger_command_usage(argv
[0]);
1884 if (!evaluate_debug_expression(argv
[1], &address
, false))
1887 Area
* area
= _AreaForAddress(address
);
1889 MetaChunk
* metaChunk
;
1890 if ((addr_t
)address
>= (addr_t
)area
->metaChunks
1892 < (addr_t
)(area
->metaChunks
+ SLAB_META_CHUNKS_PER_AREA
)) {
1893 metaChunk
= (MetaChunk
*)(addr_t
)address
;
1895 metaChunk
= area
->metaChunks
1896 + (address
% SLAB_AREA_SIZE
) / SLAB_CHUNK_SIZE_LARGE
;
1899 _DumpMetaChunk(metaChunk
, true, true);
1906 MemoryManager::_DumpMetaChunks(const char* name
, MetaChunkList
& metaChunkList
,
1909 kprintf("%s:\n", name
);
1911 for (MetaChunkList::Iterator it
= metaChunkList
.GetIterator();
1912 MetaChunk
* metaChunk
= it
.Next();) {
1913 _DumpMetaChunk(metaChunk
, printChunks
, false);
1919 MemoryManager::_DumpMetaChunks(int argc
, char** argv
)
1921 bool printChunks
= argc
> 1 && strcmp(argv
[1], "-c") == 0;
1923 _PrintMetaChunkTableHeader(printChunks
);
1924 _DumpMetaChunks("free complete", sFreeCompleteMetaChunks
, printChunks
);
1925 _DumpMetaChunks("free short", sFreeShortMetaChunks
, printChunks
);
1926 _DumpMetaChunks("partial small", sPartialMetaChunksSmall
, printChunks
);
1927 _DumpMetaChunks("partial medium", sPartialMetaChunksMedium
, printChunks
);
1934 MemoryManager::_DumpArea(int argc
, char** argv
)
1936 bool printChunks
= false;
1939 while (argi
< argc
) {
1940 if (argv
[argi
][0] != '-')
1942 const char* arg
= argv
[argi
++];
1943 if (strcmp(arg
, "-c") == 0) {
1946 print_debugger_command_usage(argv
[0]);
1951 if (argi
+ 1 != argc
) {
1952 print_debugger_command_usage(argv
[0]);
1957 if (!evaluate_debug_expression(argv
[argi
], &address
, false))
1960 Area
* area
= _AreaForAddress((addr_t
)address
);
1962 for (uint32 k
= 0; k
< SLAB_META_CHUNKS_PER_AREA
; k
++) {
1963 MetaChunk
* metaChunk
= area
->metaChunks
+ k
;
1964 _DumpMetaChunk(metaChunk
, printChunks
, k
== 0);
1972 MemoryManager::_DumpAreas(int argc
, char** argv
)
1974 kprintf(" %*s %*s meta small medium large\n",
1975 B_PRINTF_POINTER_WIDTH
, "base", B_PRINTF_POINTER_WIDTH
, "area");
1977 size_t totalTotalSmall
= 0;
1978 size_t totalUsedSmall
= 0;
1979 size_t totalTotalMedium
= 0;
1980 size_t totalUsedMedium
= 0;
1981 size_t totalUsedLarge
= 0;
1982 uint32 areaCount
= 0;
1984 for (AreaTable::Iterator it
= sAreaTable
.GetIterator();
1985 Area
* area
= it
.Next();) {
1988 // sum up the free/used counts for the chunk sizes
1991 int totalMedium
= 0;
1995 for (int32 i
= 0; i
< SLAB_META_CHUNKS_PER_AREA
; i
++) {
1996 MetaChunk
* metaChunk
= area
->metaChunks
+ i
;
1997 if (metaChunk
->chunkSize
== 0)
2000 switch (metaChunk
->chunkSize
) {
2001 case SLAB_CHUNK_SIZE_SMALL
:
2002 totalSmall
+= metaChunk
->chunkCount
;
2003 usedSmall
+= metaChunk
->usedChunkCount
;
2005 case SLAB_CHUNK_SIZE_MEDIUM
:
2006 totalMedium
+= metaChunk
->chunkCount
;
2007 usedMedium
+= metaChunk
->usedChunkCount
;
2009 case SLAB_CHUNK_SIZE_LARGE
:
2010 usedLarge
+= metaChunk
->usedChunkCount
;
2015 kprintf("%p %p %2u/%2u %4d/%4d %3d/%3d %5d\n",
2016 area
, area
->vmArea
, area
->usedMetaChunkCount
,
2017 SLAB_META_CHUNKS_PER_AREA
, usedSmall
, totalSmall
, usedMedium
,
2018 totalMedium
, usedLarge
);
2020 totalTotalSmall
+= totalSmall
;
2021 totalUsedSmall
+= usedSmall
;
2022 totalTotalMedium
+= totalMedium
;
2023 totalUsedMedium
+= usedMedium
;
2024 totalUsedLarge
+= usedLarge
;
2027 kprintf("%d free area%s:\n", sFreeAreaCount
,
2028 sFreeAreaCount
== 1 ? "" : "s");
2029 for (Area
* area
= sFreeAreas
; area
!= NULL
; area
= area
->next
) {
2031 kprintf("%p %p\n", area
, area
->vmArea
);
2034 kprintf("total usage:\n");
2035 kprintf(" small: %" B_PRIuSIZE
"/%" B_PRIuSIZE
"\n", totalUsedSmall
,
2037 kprintf(" medium: %" B_PRIuSIZE
"/%" B_PRIuSIZE
"\n", totalUsedMedium
,
2039 kprintf(" large: %" B_PRIuSIZE
"\n", totalUsedLarge
);
2040 kprintf(" memory: %" B_PRIuSIZE
"/%" B_PRIu32
" KB\n",
2041 (totalUsedSmall
* SLAB_CHUNK_SIZE_SMALL
2042 + totalUsedMedium
* SLAB_CHUNK_SIZE_MEDIUM
2043 + totalUsedLarge
* SLAB_CHUNK_SIZE_LARGE
) / 1024,
2044 areaCount
* SLAB_AREA_SIZE
/ 1024);
2045 kprintf(" overhead: %" B_PRIuSIZE
" KB\n",
2046 areaCount
* kAreaAdminSize
/ 1024);
2052 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2055 MemoryManager::_AddTrackingInfo(void* allocation
, size_t size
,
2056 AbstractTraceEntryWithStackTrace
* traceEntry
)
2058 _TrackingInfoFor(allocation
, size
)->Init(traceEntry
);
2061 #endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
2064 RANGE_MARKER_FUNCTION_END(SlabMemoryManager
)