2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4 * Copyright 2007, Hugo Santos. All Rights Reserved.
6 * Distributed under the terms of the MIT License.
10 #include <slab/Slab.h>
17 #include <KernelExport.h>
19 #include <condition_variable.h>
22 #include <low_resource_manager.h>
23 #include <slab/ObjectDepot.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
29 #include <vm/VMAddressSpace.h>
31 #include "HashedObjectCache.h"
32 #include "MemoryManager.h"
33 #include "slab_debug.h"
34 #include "slab_private.h"
35 #include "SmallObjectCache.h"
38 #if !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
41 typedef DoublyLinkedList
<ObjectCache
> ObjectCacheList
;
43 typedef DoublyLinkedList
<ObjectCache
,
44 DoublyLinkedListMemberGetLink
<ObjectCache
, &ObjectCache::maintenance_link
> >
47 static ObjectCacheList sObjectCaches
;
48 static mutex sObjectCacheListLock
= MUTEX_INITIALIZER("object cache list");
50 static mutex sMaintenanceLock
51 = MUTEX_INITIALIZER("object cache resize requests");
52 static MaintenanceQueue sMaintenanceQueue
;
53 static ConditionVariable sMaintenanceCondition
;
56 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
64 static const int32 kCallerInfoTableSize
= 1024;
65 static caller_info sCallerInfoTable
[kCallerInfoTableSize
];
66 static int32 sCallerInfoCount
= 0;
68 static caller_info
* get_caller_info(addr_t caller
);
71 RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator
)
72 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache
)
73 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager
)
74 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache
)
75 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot
)
76 RANGE_MARKER_FUNCTION_PROTOTYPES(Slab
)
77 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache
)
80 static const addr_t kSlabCodeAddressRanges
[] = {
81 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator
),
82 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache
),
83 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager
),
84 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache
),
85 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot
),
86 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab
),
87 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache
)
90 static const uint32 kSlabCodeAddressRangeCount
91 = sizeof(kSlabCodeAddressRanges
) / sizeof(kSlabCodeAddressRanges
[0]) / 2;
93 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
96 RANGE_MARKER_FUNCTION_BEGIN(Slab
)
99 #if SLAB_OBJECT_CACHE_TRACING
102 namespace SlabObjectCacheTracing
{
104 class ObjectCacheTraceEntry
105 : public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE
) {
107 ObjectCacheTraceEntry(ObjectCache
* cache
)
109 TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE
, 0, true),
119 class Create
: public ObjectCacheTraceEntry
{
121 Create(const char* name
, size_t objectSize
, size_t alignment
,
122 size_t maxByteUsage
, uint32 flags
, void* cookie
,
125 ObjectCacheTraceEntry(cache
),
126 fObjectSize(objectSize
),
127 fAlignment(alignment
),
128 fMaxByteUsage(maxByteUsage
),
132 fName
= alloc_tracing_buffer_strcpy(name
, 64, false);
136 virtual void AddDump(TraceOutput
& out
)
138 out
.Print("object cache create: name: \"%s\", object size: %lu, "
139 "alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
140 "-> cache: %p", fName
, fObjectSize
, fAlignment
, fMaxByteUsage
,
141 fFlags
, fCookie
, fCache
);
148 size_t fMaxByteUsage
;
154 class Delete
: public ObjectCacheTraceEntry
{
156 Delete(ObjectCache
* cache
)
158 ObjectCacheTraceEntry(cache
)
163 virtual void AddDump(TraceOutput
& out
)
165 out
.Print("object cache delete: %p", fCache
);
170 class Alloc
: public ObjectCacheTraceEntry
{
172 Alloc(ObjectCache
* cache
, uint32 flags
, void* object
)
174 ObjectCacheTraceEntry(cache
),
181 virtual void AddDump(TraceOutput
& out
)
183 out
.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
184 "object: %p", fCache
, fFlags
, fObject
);
193 class Free
: public ObjectCacheTraceEntry
{
195 Free(ObjectCache
* cache
, void* object
)
197 ObjectCacheTraceEntry(cache
),
203 virtual void AddDump(TraceOutput
& out
)
205 out
.Print("object cache free: cache: %p, object: %p", fCache
,
214 class Reserve
: public ObjectCacheTraceEntry
{
216 Reserve(ObjectCache
* cache
, size_t count
, uint32 flags
)
218 ObjectCacheTraceEntry(cache
),
225 virtual void AddDump(TraceOutput
& out
)
227 out
.Print("object cache reserve: cache: %p, count: %lu, "
228 "flags: 0x%lx", fCache
, fCount
, fFlags
);
237 } // namespace SlabObjectCacheTracing
239 # define T(x) new(std::nothrow) SlabObjectCacheTracing::x
243 #endif // SLAB_OBJECT_CACHE_TRACING
250 dump_slab(::slab
* slab
)
252 kprintf(" %p %p %6" B_PRIuSIZE
" %6" B_PRIuSIZE
" %6" B_PRIuSIZE
" %p\n",
253 slab
, slab
->pages
, slab
->size
, slab
->count
, slab
->offset
, slab
->free
);
258 dump_slabs(int argc
, char* argv
[])
260 kprintf("%*s %22s %8s %8s %8s %6s %8s %8s %8s\n",
261 B_PRINTF_POINTER_WIDTH
+ 2, "address", "name", "objsize", "align",
262 "usage", "empty", "usedobj", "total", "flags");
264 ObjectCacheList::Iterator it
= sObjectCaches
.GetIterator();
266 while (it
.HasNext()) {
267 ObjectCache
* cache
= it
.Next();
269 kprintf("%p %22s %8lu %8" B_PRIuSIZE
" %8lu %6lu %8lu %8lu %8" B_PRIx32
270 "\n", cache
, cache
->name
, cache
->object_size
, cache
->alignment
,
271 cache
->usage
, cache
->empty_count
, cache
->used_count
,
272 cache
->total_objects
, cache
->flags
);
280 dump_cache_info(int argc
, char* argv
[])
283 kprintf("usage: slab_cache [address]\n");
287 ObjectCache
* cache
= (ObjectCache
*)parse_expression(argv
[1]);
289 kprintf("name: %s\n", cache
->name
);
290 kprintf("lock: %p\n", &cache
->lock
);
291 kprintf("object_size: %lu\n", cache
->object_size
);
292 kprintf("alignment: %" B_PRIuSIZE
"\n", cache
->alignment
);
293 kprintf("cache_color_cycle: %lu\n", cache
->cache_color_cycle
);
294 kprintf("total_objects: %lu\n", cache
->total_objects
);
295 kprintf("used_count: %lu\n", cache
->used_count
);
296 kprintf("empty_count: %lu\n", cache
->empty_count
);
297 kprintf("pressure: %lu\n", cache
->pressure
);
298 kprintf("slab_size: %lu\n", cache
->slab_size
);
299 kprintf("usage: %lu\n", cache
->usage
);
300 kprintf("maximum: %lu\n", cache
->maximum
);
301 kprintf("flags: 0x%" B_PRIx32
"\n", cache
->flags
);
302 kprintf("cookie: %p\n", cache
->cookie
);
303 kprintf("resize entry don't wait: %p\n", cache
->resize_entry_dont_wait
);
304 kprintf("resize entry can wait: %p\n", cache
->resize_entry_can_wait
);
306 kprintf(" %-*s %-*s size used offset free\n",
307 B_PRINTF_POINTER_WIDTH
, "slab", B_PRINTF_POINTER_WIDTH
, "chunk");
309 SlabList::Iterator iterator
= cache
->empty
.GetIterator();
310 if (iterator
.HasNext())
312 while (::slab
* slab
= iterator
.Next())
315 iterator
= cache
->partial
.GetIterator();
316 if (iterator
.HasNext())
317 kprintf("partial:\n");
318 while (::slab
* slab
= iterator
.Next())
321 iterator
= cache
->full
.GetIterator();
322 if (iterator
.HasNext())
324 while (::slab
* slab
= iterator
.Next())
327 if ((cache
->flags
& CACHE_NO_DEPOT
) == 0) {
329 dump_object_depot(&cache
->depot
);
336 // #pragma mark - AllocationTrackingCallback
339 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
341 AllocationTrackingCallback::~AllocationTrackingCallback()
345 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
351 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
355 class AllocationCollectorCallback
: public AllocationTrackingCallback
{
357 AllocationCollectorCallback(bool resetInfos
)
359 fResetInfos(resetInfos
)
363 virtual bool ProcessTrackingInfo(AllocationTrackingInfo
* info
,
364 void* allocation
, size_t allocationSize
)
366 if (!info
->IsInitialized())
370 AbstractTraceEntryWithStackTrace
* traceEntry
= info
->TraceEntry();
372 if (traceEntry
!= NULL
&& info
->IsTraceEntryValid()) {
373 caller
= tracing_find_caller_in_stack_trace(
374 traceEntry
->StackTrace(), kSlabCodeAddressRanges
,
375 kSlabCodeAddressRangeCount
);
378 caller_info
* callerInfo
= get_caller_info(caller
);
379 if (callerInfo
== NULL
) {
380 kprintf("out of space for caller infos\n");
385 callerInfo
->size
+= allocationSize
;
398 class AllocationInfoPrinterCallback
: public AllocationTrackingCallback
{
400 AllocationInfoPrinterCallback(bool printStackTrace
, addr_t addressFilter
,
401 team_id teamFilter
, thread_id threadFilter
)
403 fPrintStackTrace(printStackTrace
),
404 fAddressFilter(addressFilter
),
405 fTeamFilter(teamFilter
),
406 fThreadFilter(threadFilter
)
410 virtual bool ProcessTrackingInfo(AllocationTrackingInfo
* info
,
411 void* allocation
, size_t allocationSize
)
413 if (!info
->IsInitialized())
416 if (fAddressFilter
!= 0 && (addr_t
)allocation
!= fAddressFilter
)
419 AbstractTraceEntryWithStackTrace
* traceEntry
= info
->TraceEntry();
420 if (traceEntry
!= NULL
&& !info
->IsTraceEntryValid())
423 if (traceEntry
!= NULL
) {
424 if (fTeamFilter
!= -1 && traceEntry
->TeamID() != fTeamFilter
)
426 if (fThreadFilter
!= -1 && traceEntry
->ThreadID() != fThreadFilter
)
429 // we need the info if we have filters set
430 if (fTeamFilter
!= -1 || fThreadFilter
!= -1)
434 kprintf("allocation %p, size: %" B_PRIuSIZE
, allocation
,
437 if (traceEntry
!= NULL
) {
438 kprintf(", team: %" B_PRId32
", thread %" B_PRId32
439 ", time %" B_PRId64
"\n", traceEntry
->TeamID(),
440 traceEntry
->ThreadID(), traceEntry
->Time());
442 if (fPrintStackTrace
)
443 tracing_print_stack_trace(traceEntry
->StackTrace());
451 bool fPrintStackTrace
;
452 addr_t fAddressFilter
;
454 thread_id fThreadFilter
;
458 class AllocationDetailPrinterCallback
: public AllocationTrackingCallback
{
460 AllocationDetailPrinterCallback(addr_t caller
)
466 virtual bool ProcessTrackingInfo(AllocationTrackingInfo
* info
,
467 void* allocation
, size_t allocationSize
)
469 if (!info
->IsInitialized())
473 AbstractTraceEntryWithStackTrace
* traceEntry
= info
->TraceEntry();
474 if (traceEntry
!= NULL
&& !info
->IsTraceEntryValid())
477 if (traceEntry
!= NULL
) {
478 caller
= tracing_find_caller_in_stack_trace(
479 traceEntry
->StackTrace(), kSlabCodeAddressRanges
,
480 kSlabCodeAddressRangeCount
);
483 if (caller
!= fCaller
)
486 kprintf("allocation %p, size: %" B_PRIuSIZE
"\n", allocation
,
488 if (traceEntry
!= NULL
)
489 tracing_print_stack_trace(traceEntry
->StackTrace());
498 } // unnamed namespace
501 get_caller_info(addr_t caller
)
503 // find the caller info
504 for (int32 i
= 0; i
< sCallerInfoCount
; i
++) {
505 if (caller
== sCallerInfoTable
[i
].caller
)
506 return &sCallerInfoTable
[i
];
509 // not found, add a new entry, if there are free slots
510 if (sCallerInfoCount
>= kCallerInfoTableSize
)
513 caller_info
* info
= &sCallerInfoTable
[sCallerInfoCount
++];
514 info
->caller
= caller
;
523 caller_info_compare_size(const void* _a
, const void* _b
)
525 const caller_info
* a
= (const caller_info
*)_a
;
526 const caller_info
* b
= (const caller_info
*)_b
;
527 return (int)(b
->size
- a
->size
);
532 caller_info_compare_count(const void* _a
, const void* _b
)
534 const caller_info
* a
= (const caller_info
*)_a
;
535 const caller_info
* b
= (const caller_info
*)_b
;
536 return (int)(b
->count
- a
->count
);
540 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
543 analyze_allocation_callers(ObjectCache
* cache
, slab
* slab
,
544 AllocationTrackingCallback
& callback
)
546 for (uint32 i
= 0; i
< slab
->size
; i
++) {
547 if (!callback
.ProcessTrackingInfo(&slab
->tracking
[i
],
548 cache
->ObjectAtIndex(slab
, i
), cache
->object_size
)) {
558 analyze_allocation_callers(ObjectCache
* cache
, const SlabList
& slabList
,
559 AllocationTrackingCallback
& callback
)
561 for (SlabList::ConstIterator it
= slabList
.GetIterator();
562 slab
* slab
= it
.Next();) {
563 if (!analyze_allocation_callers(cache
, slab
, callback
))
572 analyze_allocation_callers(ObjectCache
* cache
,
573 AllocationTrackingCallback
& callback
)
575 return analyze_allocation_callers(cache
, cache
->full
, callback
)
576 && analyze_allocation_callers(cache
, cache
->partial
, callback
);
579 #endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
583 dump_allocation_infos(int argc
, char **argv
)
585 ObjectCache
* cache
= NULL
;
587 addr_t addressFilter
= 0;
588 team_id teamFilter
= -1;
589 thread_id threadFilter
= -1;
590 bool printStackTraces
= false;
592 for (int32 i
= 1; i
< argc
; i
++) {
593 if (strcmp(argv
[i
], "--stacktrace") == 0)
594 printStackTraces
= true;
595 else if (strcmp(argv
[i
], "-a") == 0) {
598 || !evaluate_debug_expression(argv
[i
], &address
, true)) {
599 print_debugger_command_usage(argv
[0]);
603 addressFilter
= address
;
604 } else if (strcmp(argv
[i
], "-o") == 0) {
607 || !evaluate_debug_expression(argv
[i
], &cacheAddress
, true)) {
608 print_debugger_command_usage(argv
[0]);
612 cache
= (ObjectCache
*)(addr_t
)cacheAddress
;
613 } else if (strcasecmp(argv
[i
], "-s") == 0) {
616 || !evaluate_debug_expression(argv
[i
], &slabAddress
, true)) {
617 print_debugger_command_usage(argv
[0]);
621 void* slabPages
= (void*)slabAddress
;
622 if (strcmp(argv
[i
], "-s") == 0) {
623 slab
= (struct slab
*)(addr_t
)slabAddress
;
624 slabPages
= slab
->pages
;
627 cache
= MemoryManager::DebugObjectCacheForAddress(slabPages
);
629 kprintf("Couldn't find object cache for address %p.\n",
635 slab
= cache
->ObjectSlab(slabPages
);
638 kprintf("Couldn't find slab for address %p.\n", slabPages
);
642 } else if (strcmp(argv
[i
], "--team") == 0) {
645 || !evaluate_debug_expression(argv
[i
], &team
, true)) {
646 print_debugger_command_usage(argv
[0]);
651 } else if (strcmp(argv
[i
], "--thread") == 0) {
654 || !evaluate_debug_expression(argv
[i
], &thread
, true)) {
655 print_debugger_command_usage(argv
[0]);
659 threadFilter
= thread
;
661 print_debugger_command_usage(argv
[0]);
666 AllocationInfoPrinterCallback
callback(printStackTraces
, addressFilter
,
667 teamFilter
, threadFilter
);
669 if (slab
!= NULL
|| cache
!= NULL
) {
670 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
672 if (!analyze_allocation_callers(cache
, slab
, callback
))
674 } else if (cache
!= NULL
) {
675 if (!analyze_allocation_callers(cache
, callback
))
679 kprintf("Object cache allocation tracking not available. "
680 "SLAB_OBJECT_CACHE_TRACING (%d) and "
681 "SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
682 SLAB_OBJECT_CACHE_TRACING
, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE
);
686 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
688 for (ObjectCacheList::Iterator it
= sObjectCaches
.GetIterator();
690 if (!analyze_allocation_callers(it
.Next(), callback
))
695 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
696 if (!MemoryManager::AnalyzeAllocationCallers(callback
))
706 dump_allocations_per_caller(int argc
, char **argv
)
708 bool sortBySize
= true;
709 bool resetAllocationInfos
= false;
710 bool printDetails
= false;
711 ObjectCache
* cache
= NULL
;
714 for (int32 i
= 1; i
< argc
; i
++) {
715 if (strcmp(argv
[i
], "-c") == 0) {
717 } else if (strcmp(argv
[i
], "-d") == 0) {
718 uint64 callerAddress
;
720 || !evaluate_debug_expression(argv
[i
], &callerAddress
, true)) {
721 print_debugger_command_usage(argv
[0]);
725 caller
= callerAddress
;
727 } else if (strcmp(argv
[i
], "-o") == 0) {
730 || !evaluate_debug_expression(argv
[i
], &cacheAddress
, true)) {
731 print_debugger_command_usage(argv
[0]);
735 cache
= (ObjectCache
*)(addr_t
)cacheAddress
;
736 } else if (strcmp(argv
[i
], "-r") == 0) {
737 resetAllocationInfos
= true;
739 print_debugger_command_usage(argv
[0]);
744 sCallerInfoCount
= 0;
746 AllocationCollectorCallback
collectorCallback(resetAllocationInfos
);
747 AllocationDetailPrinterCallback
detailsCallback(caller
);
748 AllocationTrackingCallback
& callback
= printDetails
749 ? (AllocationTrackingCallback
&)detailsCallback
750 : (AllocationTrackingCallback
&)collectorCallback
;
753 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
754 if (!analyze_allocation_callers(cache
, callback
))
757 kprintf("Object cache allocation tracking not available. "
758 "SLAB_OBJECT_CACHE_TRACING (%d) and "
759 "SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
760 SLAB_OBJECT_CACHE_TRACING
, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE
);
764 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
766 for (ObjectCacheList::Iterator it
= sObjectCaches
.GetIterator();
768 if (!analyze_allocation_callers(it
.Next(), callback
))
773 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
774 if (!MemoryManager::AnalyzeAllocationCallers(callback
))
783 qsort(sCallerInfoTable
, sCallerInfoCount
, sizeof(caller_info
),
784 sortBySize
? &caller_info_compare_size
: &caller_info_compare_count
);
786 kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount
,
787 sortBySize
? "size" : "count");
789 size_t totalAllocationSize
= 0;
790 size_t totalAllocationCount
= 0;
792 kprintf(" count size caller\n");
793 kprintf("----------------------------------\n");
794 for (int32 i
= 0; i
< sCallerInfoCount
; i
++) {
795 caller_info
& info
= sCallerInfoTable
[i
];
796 kprintf("%10" B_PRIuSIZE
" %10" B_PRIuSIZE
" %p", info
.count
,
797 info
.size
, (void*)info
.caller
);
800 const char* imageName
;
804 if (elf_debug_lookup_symbol_address(info
.caller
, &baseAddress
, &symbol
,
805 &imageName
, &exactMatch
) == B_OK
) {
806 kprintf(" %s + %#" B_PRIxADDR
" (%s)%s\n", symbol
,
807 info
.caller
- baseAddress
, imageName
,
808 exactMatch
? "" : " (nearest)");
812 totalAllocationCount
+= info
.count
;
813 totalAllocationSize
+= info
.size
;
816 kprintf("\ntotal allocations: %" B_PRIuSIZE
", %" B_PRIuSIZE
" bytes\n",
817 totalAllocationCount
, totalAllocationSize
);
822 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
826 add_alloc_tracing_entry(ObjectCache
* cache
, uint32 flags
, void* object
)
828 #if SLAB_OBJECT_CACHE_TRACING
829 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
830 MutexLocker
_(cache
->lock
);
831 cache
->TrackingInfoFor(object
)->Init(T(Alloc(cache
, flags
, object
)));
833 T(Alloc(cache
, flags
, object
));
843 request_memory_manager_maintenance()
845 MutexLocker
locker(sMaintenanceLock
);
846 sMaintenanceCondition
.NotifyAll();
854 delete_object_cache_internal(object_cache
* cache
)
856 if (!(cache
->flags
& CACHE_NO_DEPOT
))
857 object_depot_destroy(&cache
->depot
, 0);
859 mutex_lock(&cache
->lock
);
861 if (!cache
->full
.IsEmpty())
862 panic("cache destroy: still has full slabs");
864 if (!cache
->partial
.IsEmpty())
865 panic("cache destroy: still has partial slabs");
867 while (!cache
->empty
.IsEmpty())
868 cache
->ReturnSlab(cache
->empty
.RemoveHead(), 0);
870 mutex_destroy(&cache
->lock
);
876 increase_object_reserve(ObjectCache
* cache
)
878 MutexLocker
locker(sMaintenanceLock
);
880 cache
->maintenance_resize
= true;
882 if (!cache
->maintenance_pending
) {
883 cache
->maintenance_pending
= true;
884 sMaintenanceQueue
.Add(cache
);
885 sMaintenanceCondition
.NotifyAll();
890 /*! Makes sure that \a objectCount objects can be allocated.
893 object_cache_reserve_internal(ObjectCache
* cache
, size_t objectCount
,
896 // If someone else is already adding slabs, we wait for that to be finished
898 thread_id thread
= find_thread(NULL
);
900 if (objectCount
<= cache
->total_objects
- cache
->used_count
)
903 ObjectCacheResizeEntry
* resizeEntry
= NULL
;
904 if (cache
->resize_entry_dont_wait
!= NULL
) {
905 resizeEntry
= cache
->resize_entry_dont_wait
;
906 if (thread
== resizeEntry
->thread
)
907 return B_WOULD_BLOCK
;
908 // Note: We could still have reentered the function, i.e.
909 // resize_entry_can_wait would be ours. That doesn't matter much,
910 // though, since after the don't-wait thread has done its job
911 // everyone will be happy.
912 } else if (cache
->resize_entry_can_wait
!= NULL
) {
913 resizeEntry
= cache
->resize_entry_can_wait
;
914 if (thread
== resizeEntry
->thread
)
915 return B_WOULD_BLOCK
;
917 if ((flags
& CACHE_DONT_WAIT_FOR_MEMORY
) != 0)
922 ConditionVariableEntry entry
;
923 resizeEntry
->condition
.Add(&entry
);
930 // prepare the resize entry others can wait on
931 ObjectCacheResizeEntry
*& resizeEntry
932 = (flags
& CACHE_DONT_WAIT_FOR_MEMORY
) != 0
933 ? cache
->resize_entry_dont_wait
: cache
->resize_entry_can_wait
;
935 ObjectCacheResizeEntry myResizeEntry
;
936 resizeEntry
= &myResizeEntry
;
937 resizeEntry
->condition
.Init(cache
, "wait for slabs");
938 resizeEntry
->thread
= thread
;
940 // add new slabs until there are as many free ones as requested
941 while (objectCount
> cache
->total_objects
- cache
->used_count
) {
942 slab
* newSlab
= cache
->CreateSlab(flags
);
943 if (newSlab
== NULL
) {
944 resizeEntry
->condition
.NotifyAll();
949 cache
->usage
+= cache
->slab_size
;
950 cache
->total_objects
+= newSlab
->size
;
952 cache
->empty
.Add(newSlab
);
953 cache
->empty_count
++;
956 resizeEntry
->condition
.NotifyAll();
964 object_cache_low_memory(void* dummy
, uint32 resources
, int32 level
)
966 if (level
== B_NO_LOW_RESOURCE
)
969 MutexLocker
cacheListLocker(sObjectCacheListLock
);
971 // Append the first cache to the end of the queue. We assume that it is
972 // one of the caches that will never be deleted and thus we use it as a
974 ObjectCache
* firstCache
= sObjectCaches
.RemoveHead();
975 sObjectCaches
.Add(firstCache
);
976 cacheListLocker
.Unlock();
980 cacheListLocker
.Lock();
982 cache
= sObjectCaches
.RemoveHead();
983 sObjectCaches
.Add(cache
);
985 MutexLocker
maintenanceLocker(sMaintenanceLock
);
986 if (cache
->maintenance_pending
|| cache
->maintenance_in_progress
) {
987 // We don't want to mess with caches in maintenance.
991 cache
->maintenance_pending
= true;
992 cache
->maintenance_in_progress
= true;
994 maintenanceLocker
.Unlock();
995 cacheListLocker
.Unlock();
997 // We are calling the reclaimer without the object cache lock
998 // to give the owner a chance to return objects to the slabs.
1000 if (cache
->reclaimer
)
1001 cache
->reclaimer(cache
->cookie
, level
);
1003 if ((cache
->flags
& CACHE_NO_DEPOT
) == 0)
1004 object_depot_make_empty(&cache
->depot
, 0);
1006 MutexLocker
cacheLocker(cache
->lock
);
1007 size_t minimumAllowed
;
1010 case B_LOW_RESOURCE_NOTE
:
1011 minimumAllowed
= cache
->pressure
/ 2 + 1;
1012 cache
->pressure
-= cache
->pressure
/ 8;
1015 case B_LOW_RESOURCE_WARNING
:
1016 cache
->pressure
/= 2;
1021 cache
->pressure
= 0;
1026 while (cache
->empty_count
> minimumAllowed
) {
1027 // make sure we respect the cache's minimum object reserve
1028 size_t objectsPerSlab
= cache
->empty
.Head()->size
;
1029 size_t freeObjects
= cache
->total_objects
- cache
->used_count
;
1030 if (freeObjects
< cache
->min_object_reserve
+ objectsPerSlab
)
1033 cache
->ReturnSlab(cache
->empty
.RemoveHead(), 0);
1034 cache
->empty_count
--;
1037 cacheLocker
.Unlock();
1039 // Check whether in the meantime someone has really requested
1040 // maintenance for the cache.
1041 maintenanceLocker
.Lock();
1043 if (cache
->maintenance_delete
) {
1044 delete_object_cache_internal(cache
);
1048 cache
->maintenance_in_progress
= false;
1050 if (cache
->maintenance_resize
)
1051 sMaintenanceQueue
.Add(cache
);
1053 cache
->maintenance_pending
= false;
1054 } while (cache
!= firstCache
);
1059 object_cache_maintainer(void*)
1062 MutexLocker
locker(sMaintenanceLock
);
1064 // wait for the next request
1065 while (sMaintenanceQueue
.IsEmpty()) {
1066 // perform memory manager maintenance, if needed
1067 if (MemoryManager::MaintenanceNeeded()) {
1069 MemoryManager::PerformMaintenance();
1074 ConditionVariableEntry entry
;
1075 sMaintenanceCondition
.Add(&entry
);
1081 ObjectCache
* cache
= sMaintenanceQueue
.RemoveHead();
1084 bool resizeRequested
= cache
->maintenance_resize
;
1085 bool deleteRequested
= cache
->maintenance_delete
;
1087 if (!resizeRequested
&& !deleteRequested
) {
1088 cache
->maintenance_pending
= false;
1089 cache
->maintenance_in_progress
= false;
1093 cache
->maintenance_resize
= false;
1094 cache
->maintenance_in_progress
= true;
1098 if (deleteRequested
) {
1099 delete_object_cache_internal(cache
);
1103 // resize the cache, if necessary
1105 MutexLocker
cacheLocker(cache
->lock
);
1107 if (resizeRequested
) {
1108 status_t error
= object_cache_reserve_internal(cache
,
1109 cache
->min_object_reserve
, 0);
1110 if (error
!= B_OK
) {
1111 dprintf("object cache resizer: Failed to resize object "
1112 "cache %p!\n", cache
);
1121 // never can get here
1126 // #pragma mark - public API
1130 create_object_cache(const char* name
, size_t object_size
, size_t alignment
,
1131 void* cookie
, object_cache_constructor constructor
,
1132 object_cache_destructor destructor
)
1134 return create_object_cache_etc(name
, object_size
, alignment
, 0, 0, 0, 0,
1135 cookie
, constructor
, destructor
, NULL
);
1140 create_object_cache_etc(const char* name
, size_t objectSize
, size_t alignment
,
1141 size_t maximum
, size_t magazineCapacity
, size_t maxMagazineCount
,
1142 uint32 flags
, void* cookie
, object_cache_constructor constructor
,
1143 object_cache_destructor destructor
, object_cache_reclaimer reclaimer
)
1147 if (objectSize
== 0) {
1149 } else if (objectSize
<= 256) {
1150 cache
= SmallObjectCache::Create(name
, objectSize
, alignment
, maximum
,
1151 magazineCapacity
, maxMagazineCount
, flags
, cookie
, constructor
,
1152 destructor
, reclaimer
);
1154 cache
= HashedObjectCache::Create(name
, objectSize
, alignment
, maximum
,
1155 magazineCapacity
, maxMagazineCount
, flags
, cookie
, constructor
,
1156 destructor
, reclaimer
);
1159 if (cache
!= NULL
) {
1160 MutexLocker
_(sObjectCacheListLock
);
1161 sObjectCaches
.Add(cache
);
1164 T(Create(name
, objectSize
, alignment
, maximum
, flags
, cookie
, cache
));
1170 delete_object_cache(object_cache
* cache
)
1175 MutexLocker
_(sObjectCacheListLock
);
1176 sObjectCaches
.Remove(cache
);
1179 MutexLocker
cacheLocker(cache
->lock
);
1182 MutexLocker
maintenanceLocker(sMaintenanceLock
);
1183 if (cache
->maintenance_in_progress
) {
1184 // The maintainer thread is working with the cache. Just mark it
1186 cache
->maintenance_delete
= true;
1190 // unschedule maintenance
1191 if (cache
->maintenance_pending
)
1192 sMaintenanceQueue
.Remove(cache
);
1195 // at this point no-one should have a reference to the cache anymore
1196 cacheLocker
.Unlock();
1198 delete_object_cache_internal(cache
);
1203 object_cache_set_minimum_reserve(object_cache
* cache
, size_t objectCount
)
1205 MutexLocker
_(cache
->lock
);
1207 if (cache
->min_object_reserve
== objectCount
)
1210 cache
->min_object_reserve
= objectCount
;
1212 increase_object_reserve(cache
);
1219 object_cache_alloc(object_cache
* cache
, uint32 flags
)
1221 if (!(cache
->flags
& CACHE_NO_DEPOT
)) {
1222 void* object
= object_depot_obtain(&cache
->depot
);
1224 add_alloc_tracing_entry(cache
, flags
, object
);
1225 return fill_allocated_block(object
, cache
->object_size
);
1229 MutexLocker
locker(cache
->lock
);
1230 slab
* source
= NULL
;
1233 source
= cache
->partial
.Head();
1237 source
= cache
->empty
.RemoveHead();
1238 if (source
!= NULL
) {
1239 cache
->empty_count
--;
1240 cache
->partial
.Add(source
);
1244 if (object_cache_reserve_internal(cache
, 1, flags
) != B_OK
) {
1245 T(Alloc(cache
, flags
, NULL
));
1252 ParanoiaChecker
_2(source
);
1254 object_link
* link
= _pop(source
->free
);
1256 cache
->used_count
++;
1258 if (cache
->total_objects
- cache
->used_count
< cache
->min_object_reserve
)
1259 increase_object_reserve(cache
);
1261 REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS
, source
, &link
->next
,
1264 TRACE_CACHE(cache
, "allocate %p (%p) from %p, %lu remaining.",
1265 link_to_object(link
, cache
->object_size
), link
, source
, source
->count
);
1267 if (source
->count
== 0) {
1268 cache
->partial
.Remove(source
);
1269 cache
->full
.Add(source
);
1272 void* object
= link_to_object(link
, cache
->object_size
);
1275 add_alloc_tracing_entry(cache
, flags
, object
);
1276 return fill_allocated_block(object
, cache
->object_size
);
1281 object_cache_free(object_cache
* cache
, void* object
, uint32 flags
)
1286 T(Free(cache
, object
));
1288 #if PARANOID_KERNEL_FREE
1289 // TODO: allow forcing the check even if we don't find deadbeef
1290 if (*(uint32
*)object
== 0xdeadbeef) {
1291 if (!cache
->AssertObjectNotFreed(object
))
1294 if ((cache
->flags
& CACHE_NO_DEPOT
) == 0) {
1295 if (object_depot_contains_object(&cache
->depot
, object
)) {
1296 panic("object_cache: object %p is already freed", object
);
1302 fill_freed_block(object
, cache
->object_size
);
1305 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1306 mutex_lock(&cache
->lock
);
1307 cache
->TrackingInfoFor(object
)->Clear();
1308 mutex_unlock(&cache
->lock
);
1311 if ((cache
->flags
& CACHE_NO_DEPOT
) == 0) {
1312 object_depot_store(&cache
->depot
, object
, flags
);
1316 MutexLocker
_(cache
->lock
);
1317 cache
->ReturnObjectToSlab(cache
->ObjectSlab(object
), object
, flags
);
1322 object_cache_reserve(object_cache
* cache
, size_t objectCount
, uint32 flags
)
1324 if (objectCount
== 0)
1327 T(Reserve(cache
, objectCount
, flags
));
1329 MutexLocker
_(cache
->lock
);
1330 return object_cache_reserve_internal(cache
, objectCount
, flags
);
1335 object_cache_get_usage(object_cache
* cache
, size_t* _allocatedMemory
)
1337 MutexLocker
_(cache
->lock
);
1338 *_allocatedMemory
= cache
->usage
;
1343 slab_init(kernel_args
* args
)
1345 MemoryManager::Init(args
);
1347 new (&sObjectCaches
) ObjectCacheList();
1349 block_allocator_init_boot();
1354 slab_init_post_area()
1356 MemoryManager::InitPostArea();
1358 add_debugger_command("slabs", dump_slabs
, "list all object caches");
1359 add_debugger_command("slab_cache", dump_cache_info
,
1360 "dump information about a specific object cache");
1361 add_debugger_command("slab_depot", dump_object_depot
,
1362 "dump contents of an object depot");
1363 add_debugger_command("slab_magazine", dump_depot_magazine
,
1364 "dump contents of a depot magazine");
1365 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
1366 add_debugger_command_etc("allocations_per_caller",
1367 &dump_allocations_per_caller
,
1368 "Dump current slab allocations summed up per caller",
1369 "[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1370 "The current allocations will by summed up by caller (their count and\n"
1371 "size) printed in decreasing order by size or, if \"-c\" is\n"
1372 "specified, by allocation count. If given <object cache> specifies\n"
1373 "the address of the object cache for which to print the allocations.\n"
1374 "If \"-d\" is given, each allocation for caller <caller> is printed\n"
1375 "including the respective stack trace.\n"
1376 "If \"-r\" is given, the allocation infos are reset after gathering\n"
1377 "the information, so the next command invocation will only show the\n"
1378 "allocations made after the reset.\n", 0);
1379 add_debugger_command_etc("allocation_infos",
1380 &dump_allocation_infos
,
1381 "Dump current slab allocations",
1382 "[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1383 "[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1384 "The current allocations filtered by optional values will be printed.\n"
1385 "If given, <object cache> specifies the address of the object cache\n"
1386 "or <slab> specifies the address of a slab, for which to print the\n"
1387 "allocations. Alternatively <address> specifies any address within\n"
1388 "a slab allocation range.\n"
1389 "The optional \"-a\" address filters for a specific allocation,\n"
1390 "with \"--team\" and \"--thread\" allocations by specific teams\n"
1391 "and/or threads can be filtered (these only work if a corresponding\n"
1392 "tracing entry is still available).\n"
1393 "If \"--stacktrace\" is given, then stack traces of the allocation\n"
1394 "callers are printed, where available\n", 0);
1395 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
1400 slab_init_post_sem()
1402 register_low_resource_handler(object_cache_low_memory
, NULL
,
1403 B_KERNEL_RESOURCE_PAGES
| B_KERNEL_RESOURCE_MEMORY
1404 | B_KERNEL_RESOURCE_ADDRESS_SPACE
, 5);
1406 block_allocator_init_rest();
1411 slab_init_post_thread()
1413 new(&sMaintenanceQueue
) MaintenanceQueue
;
1414 sMaintenanceCondition
.Init(&sMaintenanceQueue
, "object cache maintainer");
1416 thread_id objectCacheResizer
= spawn_kernel_thread(object_cache_maintainer
,
1417 "object cache resizer", B_URGENT_PRIORITY
, NULL
);
1418 if (objectCacheResizer
< 0) {
1419 panic("slab_init_post_thread(): failed to spawn object cache resizer "
1424 resume_thread(objectCacheResizer
);
1428 RANGE_MARKER_FUNCTION_END(Slab
)
1431 #endif // !USE_GUARDED_HEAP_FOR_OBJECT_CACHE