headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / slab / Slab.cpp
blob01695bce5104eeb0e7c23d53fa2e6287ac45e1d0
1 /*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
4 * Copyright 2007, Hugo Santos. All Rights Reserved.
6 * Distributed under the terms of the MIT License.
7 */
10 #include <slab/Slab.h>
12 #include <algorithm>
13 #include <new>
14 #include <stdlib.h>
15 #include <string.h>
17 #include <KernelExport.h>
19 #include <condition_variable.h>
20 #include <elf.h>
21 #include <kernel.h>
22 #include <low_resource_manager.h>
23 #include <slab/ObjectDepot.h>
24 #include <smp.h>
25 #include <tracing.h>
26 #include <util/AutoLock.h>
27 #include <util/DoublyLinkedList.h>
28 #include <vm/vm.h>
29 #include <vm/VMAddressSpace.h>
31 #include "HashedObjectCache.h"
32 #include "MemoryManager.h"
33 #include "slab_debug.h"
34 #include "slab_private.h"
35 #include "SmallObjectCache.h"
38 #if !USE_GUARDED_HEAP_FOR_OBJECT_CACHE
41 typedef DoublyLinkedList<ObjectCache> ObjectCacheList;
43 typedef DoublyLinkedList<ObjectCache,
44 DoublyLinkedListMemberGetLink<ObjectCache, &ObjectCache::maintenance_link> >
45 MaintenanceQueue;
47 static ObjectCacheList sObjectCaches;
48 static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
50 static mutex sMaintenanceLock
51 = MUTEX_INITIALIZER("object cache resize requests");
52 static MaintenanceQueue sMaintenanceQueue;
53 static ConditionVariable sMaintenanceCondition;
56 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
58 struct caller_info {
59 addr_t caller;
60 size_t count;
61 size_t size;
64 static const int32 kCallerInfoTableSize = 1024;
65 static caller_info sCallerInfoTable[kCallerInfoTableSize];
66 static int32 sCallerInfoCount = 0;
68 static caller_info* get_caller_info(addr_t caller);
71 RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
72 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
73 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
74 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
75 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
76 RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
77 RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
80 static const addr_t kSlabCodeAddressRanges[] = {
81 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
82 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
83 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
84 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
85 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
86 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
87 RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
90 static const uint32 kSlabCodeAddressRangeCount
91 = sizeof(kSlabCodeAddressRanges) / sizeof(kSlabCodeAddressRanges[0]) / 2;
93 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
96 RANGE_MARKER_FUNCTION_BEGIN(Slab)
99 #if SLAB_OBJECT_CACHE_TRACING
102 namespace SlabObjectCacheTracing {
104 class ObjectCacheTraceEntry
105 : public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
106 public:
107 ObjectCacheTraceEntry(ObjectCache* cache)
109 TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
110 fCache(cache)
114 protected:
115 ObjectCache* fCache;
119 class Create : public ObjectCacheTraceEntry {
120 public:
121 Create(const char* name, size_t objectSize, size_t alignment,
122 size_t maxByteUsage, uint32 flags, void* cookie,
123 ObjectCache* cache)
125 ObjectCacheTraceEntry(cache),
126 fObjectSize(objectSize),
127 fAlignment(alignment),
128 fMaxByteUsage(maxByteUsage),
129 fFlags(flags),
130 fCookie(cookie)
132 fName = alloc_tracing_buffer_strcpy(name, 64, false);
133 Initialized();
136 virtual void AddDump(TraceOutput& out)
138 out.Print("object cache create: name: \"%s\", object size: %lu, "
139 "alignment: %lu, max usage: %lu, flags: 0x%lx, cookie: %p "
140 "-> cache: %p", fName, fObjectSize, fAlignment, fMaxByteUsage,
141 fFlags, fCookie, fCache);
144 private:
145 const char* fName;
146 size_t fObjectSize;
147 size_t fAlignment;
148 size_t fMaxByteUsage;
149 uint32 fFlags;
150 void* fCookie;
154 class Delete : public ObjectCacheTraceEntry {
155 public:
156 Delete(ObjectCache* cache)
158 ObjectCacheTraceEntry(cache)
160 Initialized();
163 virtual void AddDump(TraceOutput& out)
165 out.Print("object cache delete: %p", fCache);
170 class Alloc : public ObjectCacheTraceEntry {
171 public:
172 Alloc(ObjectCache* cache, uint32 flags, void* object)
174 ObjectCacheTraceEntry(cache),
175 fFlags(flags),
176 fObject(object)
178 Initialized();
181 virtual void AddDump(TraceOutput& out)
183 out.Print("object cache alloc: cache: %p, flags: 0x%lx -> "
184 "object: %p", fCache, fFlags, fObject);
187 private:
188 uint32 fFlags;
189 void* fObject;
193 class Free : public ObjectCacheTraceEntry {
194 public:
195 Free(ObjectCache* cache, void* object)
197 ObjectCacheTraceEntry(cache),
198 fObject(object)
200 Initialized();
203 virtual void AddDump(TraceOutput& out)
205 out.Print("object cache free: cache: %p, object: %p", fCache,
206 fObject);
209 private:
210 void* fObject;
214 class Reserve : public ObjectCacheTraceEntry {
215 public:
216 Reserve(ObjectCache* cache, size_t count, uint32 flags)
218 ObjectCacheTraceEntry(cache),
219 fCount(count),
220 fFlags(flags)
222 Initialized();
225 virtual void AddDump(TraceOutput& out)
227 out.Print("object cache reserve: cache: %p, count: %lu, "
228 "flags: 0x%lx", fCache, fCount, fFlags);
231 private:
232 uint32 fCount;
233 uint32 fFlags;
237 } // namespace SlabObjectCacheTracing
239 # define T(x) new(std::nothrow) SlabObjectCacheTracing::x
241 #else
242 # define T(x)
243 #endif // SLAB_OBJECT_CACHE_TRACING
246 // #pragma mark -
249 static void
250 dump_slab(::slab* slab)
252 kprintf(" %p %p %6" B_PRIuSIZE " %6" B_PRIuSIZE " %6" B_PRIuSIZE " %p\n",
253 slab, slab->pages, slab->size, slab->count, slab->offset, slab->free);
257 static int
258 dump_slabs(int argc, char* argv[])
260 kprintf("%*s %22s %8s %8s %8s %6s %8s %8s %8s\n",
261 B_PRINTF_POINTER_WIDTH + 2, "address", "name", "objsize", "align",
262 "usage", "empty", "usedobj", "total", "flags");
264 ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
266 while (it.HasNext()) {
267 ObjectCache* cache = it.Next();
269 kprintf("%p %22s %8lu %8" B_PRIuSIZE " %8lu %6lu %8lu %8lu %8" B_PRIx32
270 "\n", cache, cache->name, cache->object_size, cache->alignment,
271 cache->usage, cache->empty_count, cache->used_count,
272 cache->total_objects, cache->flags);
275 return 0;
279 static int
280 dump_cache_info(int argc, char* argv[])
282 if (argc < 2) {
283 kprintf("usage: slab_cache [address]\n");
284 return 0;
287 ObjectCache* cache = (ObjectCache*)parse_expression(argv[1]);
289 kprintf("name: %s\n", cache->name);
290 kprintf("lock: %p\n", &cache->lock);
291 kprintf("object_size: %lu\n", cache->object_size);
292 kprintf("alignment: %" B_PRIuSIZE "\n", cache->alignment);
293 kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle);
294 kprintf("total_objects: %lu\n", cache->total_objects);
295 kprintf("used_count: %lu\n", cache->used_count);
296 kprintf("empty_count: %lu\n", cache->empty_count);
297 kprintf("pressure: %lu\n", cache->pressure);
298 kprintf("slab_size: %lu\n", cache->slab_size);
299 kprintf("usage: %lu\n", cache->usage);
300 kprintf("maximum: %lu\n", cache->maximum);
301 kprintf("flags: 0x%" B_PRIx32 "\n", cache->flags);
302 kprintf("cookie: %p\n", cache->cookie);
303 kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
304 kprintf("resize entry can wait: %p\n", cache->resize_entry_can_wait);
306 kprintf(" %-*s %-*s size used offset free\n",
307 B_PRINTF_POINTER_WIDTH, "slab", B_PRINTF_POINTER_WIDTH, "chunk");
309 SlabList::Iterator iterator = cache->empty.GetIterator();
310 if (iterator.HasNext())
311 kprintf("empty:\n");
312 while (::slab* slab = iterator.Next())
313 dump_slab(slab);
315 iterator = cache->partial.GetIterator();
316 if (iterator.HasNext())
317 kprintf("partial:\n");
318 while (::slab* slab = iterator.Next())
319 dump_slab(slab);
321 iterator = cache->full.GetIterator();
322 if (iterator.HasNext())
323 kprintf("full:\n");
324 while (::slab* slab = iterator.Next())
325 dump_slab(slab);
327 if ((cache->flags & CACHE_NO_DEPOT) == 0) {
328 kprintf("depot:\n");
329 dump_object_depot(&cache->depot);
332 return 0;
336 // #pragma mark - AllocationTrackingCallback
339 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
341 AllocationTrackingCallback::~AllocationTrackingCallback()
345 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
348 // #pragma mark -
351 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
353 namespace {
355 class AllocationCollectorCallback : public AllocationTrackingCallback {
356 public:
357 AllocationCollectorCallback(bool resetInfos)
359 fResetInfos(resetInfos)
363 virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
364 void* allocation, size_t allocationSize)
366 if (!info->IsInitialized())
367 return true;
369 addr_t caller = 0;
370 AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
372 if (traceEntry != NULL && info->IsTraceEntryValid()) {
373 caller = tracing_find_caller_in_stack_trace(
374 traceEntry->StackTrace(), kSlabCodeAddressRanges,
375 kSlabCodeAddressRangeCount);
378 caller_info* callerInfo = get_caller_info(caller);
379 if (callerInfo == NULL) {
380 kprintf("out of space for caller infos\n");
381 return false;
384 callerInfo->count++;
385 callerInfo->size += allocationSize;
387 if (fResetInfos)
388 info->Clear();
390 return true;
393 private:
394 bool fResetInfos;
398 class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
399 public:
400 AllocationInfoPrinterCallback(bool printStackTrace, addr_t addressFilter,
401 team_id teamFilter, thread_id threadFilter)
403 fPrintStackTrace(printStackTrace),
404 fAddressFilter(addressFilter),
405 fTeamFilter(teamFilter),
406 fThreadFilter(threadFilter)
410 virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
411 void* allocation, size_t allocationSize)
413 if (!info->IsInitialized())
414 return true;
416 if (fAddressFilter != 0 && (addr_t)allocation != fAddressFilter)
417 return true;
419 AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
420 if (traceEntry != NULL && !info->IsTraceEntryValid())
421 traceEntry = NULL;
423 if (traceEntry != NULL) {
424 if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
425 return true;
426 if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
427 return true;
428 } else {
429 // we need the info if we have filters set
430 if (fTeamFilter != -1 || fThreadFilter != -1)
431 return true;
434 kprintf("allocation %p, size: %" B_PRIuSIZE, allocation,
435 allocationSize);
437 if (traceEntry != NULL) {
438 kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
439 ", time %" B_PRId64 "\n", traceEntry->TeamID(),
440 traceEntry->ThreadID(), traceEntry->Time());
442 if (fPrintStackTrace)
443 tracing_print_stack_trace(traceEntry->StackTrace());
444 } else
445 kprintf("\n");
447 return true;
450 private:
451 bool fPrintStackTrace;
452 addr_t fAddressFilter;
453 team_id fTeamFilter;
454 thread_id fThreadFilter;
458 class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
459 public:
460 AllocationDetailPrinterCallback(addr_t caller)
462 fCaller(caller)
466 virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
467 void* allocation, size_t allocationSize)
469 if (!info->IsInitialized())
470 return true;
472 addr_t caller = 0;
473 AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
474 if (traceEntry != NULL && !info->IsTraceEntryValid())
475 traceEntry = NULL;
477 if (traceEntry != NULL) {
478 caller = tracing_find_caller_in_stack_trace(
479 traceEntry->StackTrace(), kSlabCodeAddressRanges,
480 kSlabCodeAddressRangeCount);
483 if (caller != fCaller)
484 return true;
486 kprintf("allocation %p, size: %" B_PRIuSIZE "\n", allocation,
487 allocationSize);
488 if (traceEntry != NULL)
489 tracing_print_stack_trace(traceEntry->StackTrace());
491 return true;
494 private:
495 addr_t fCaller;
498 } // unnamed namespace
500 static caller_info*
501 get_caller_info(addr_t caller)
503 // find the caller info
504 for (int32 i = 0; i < sCallerInfoCount; i++) {
505 if (caller == sCallerInfoTable[i].caller)
506 return &sCallerInfoTable[i];
509 // not found, add a new entry, if there are free slots
510 if (sCallerInfoCount >= kCallerInfoTableSize)
511 return NULL;
513 caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
514 info->caller = caller;
515 info->count = 0;
516 info->size = 0;
518 return info;
522 static int
523 caller_info_compare_size(const void* _a, const void* _b)
525 const caller_info* a = (const caller_info*)_a;
526 const caller_info* b = (const caller_info*)_b;
527 return (int)(b->size - a->size);
531 static int
532 caller_info_compare_count(const void* _a, const void* _b)
534 const caller_info* a = (const caller_info*)_a;
535 const caller_info* b = (const caller_info*)_b;
536 return (int)(b->count - a->count);
540 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
542 static bool
543 analyze_allocation_callers(ObjectCache* cache, slab* slab,
544 AllocationTrackingCallback& callback)
546 for (uint32 i = 0; i < slab->size; i++) {
547 if (!callback.ProcessTrackingInfo(&slab->tracking[i],
548 cache->ObjectAtIndex(slab, i), cache->object_size)) {
549 return false;
553 return true;
557 static bool
558 analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
559 AllocationTrackingCallback& callback)
561 for (SlabList::ConstIterator it = slabList.GetIterator();
562 slab* slab = it.Next();) {
563 if (!analyze_allocation_callers(cache, slab, callback))
564 return false;
567 return true;
571 static bool
572 analyze_allocation_callers(ObjectCache* cache,
573 AllocationTrackingCallback& callback)
575 return analyze_allocation_callers(cache, cache->full, callback)
576 && analyze_allocation_callers(cache, cache->partial, callback);
579 #endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
582 static int
583 dump_allocation_infos(int argc, char **argv)
585 ObjectCache* cache = NULL;
586 slab* slab = NULL;
587 addr_t addressFilter = 0;
588 team_id teamFilter = -1;
589 thread_id threadFilter = -1;
590 bool printStackTraces = false;
592 for (int32 i = 1; i < argc; i++) {
593 if (strcmp(argv[i], "--stacktrace") == 0)
594 printStackTraces = true;
595 else if (strcmp(argv[i], "-a") == 0) {
596 uint64 address;
597 if (++i >= argc
598 || !evaluate_debug_expression(argv[i], &address, true)) {
599 print_debugger_command_usage(argv[0]);
600 return 0;
603 addressFilter = address;
604 } else if (strcmp(argv[i], "-o") == 0) {
605 uint64 cacheAddress;
606 if (++i >= argc
607 || !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
608 print_debugger_command_usage(argv[0]);
609 return 0;
612 cache = (ObjectCache*)(addr_t)cacheAddress;
613 } else if (strcasecmp(argv[i], "-s") == 0) {
614 uint64 slabAddress;
615 if (++i >= argc
616 || !evaluate_debug_expression(argv[i], &slabAddress, true)) {
617 print_debugger_command_usage(argv[0]);
618 return 0;
621 void* slabPages = (void*)slabAddress;
622 if (strcmp(argv[i], "-s") == 0) {
623 slab = (struct slab*)(addr_t)slabAddress;
624 slabPages = slab->pages;
627 cache = MemoryManager::DebugObjectCacheForAddress(slabPages);
628 if (cache == NULL) {
629 kprintf("Couldn't find object cache for address %p.\n",
630 slabPages);
631 return 0;
634 if (slab == NULL) {
635 slab = cache->ObjectSlab(slabPages);
637 if (slab == NULL) {
638 kprintf("Couldn't find slab for address %p.\n", slabPages);
639 return 0;
642 } else if (strcmp(argv[i], "--team") == 0) {
643 uint64 team;
644 if (++i >= argc
645 || !evaluate_debug_expression(argv[i], &team, true)) {
646 print_debugger_command_usage(argv[0]);
647 return 0;
650 teamFilter = team;
651 } else if (strcmp(argv[i], "--thread") == 0) {
652 uint64 thread;
653 if (++i >= argc
654 || !evaluate_debug_expression(argv[i], &thread, true)) {
655 print_debugger_command_usage(argv[0]);
656 return 0;
659 threadFilter = thread;
660 } else {
661 print_debugger_command_usage(argv[0]);
662 return 0;
666 AllocationInfoPrinterCallback callback(printStackTraces, addressFilter,
667 teamFilter, threadFilter);
669 if (slab != NULL || cache != NULL) {
670 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
671 if (slab != NULL) {
672 if (!analyze_allocation_callers(cache, slab, callback))
673 return 0;
674 } else if (cache != NULL) {
675 if (!analyze_allocation_callers(cache, callback))
676 return 0;
678 #else
679 kprintf("Object cache allocation tracking not available. "
680 "SLAB_OBJECT_CACHE_TRACING (%d) and "
681 "SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
682 SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
683 return 0;
684 #endif
685 } else {
686 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
688 for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
689 it.HasNext();) {
690 if (!analyze_allocation_callers(it.Next(), callback))
691 return 0;
693 #endif
695 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
696 if (!MemoryManager::AnalyzeAllocationCallers(callback))
697 return 0;
698 #endif
701 return 0;
705 static int
706 dump_allocations_per_caller(int argc, char **argv)
708 bool sortBySize = true;
709 bool resetAllocationInfos = false;
710 bool printDetails = false;
711 ObjectCache* cache = NULL;
712 addr_t caller = 0;
714 for (int32 i = 1; i < argc; i++) {
715 if (strcmp(argv[i], "-c") == 0) {
716 sortBySize = false;
717 } else if (strcmp(argv[i], "-d") == 0) {
718 uint64 callerAddress;
719 if (++i >= argc
720 || !evaluate_debug_expression(argv[i], &callerAddress, true)) {
721 print_debugger_command_usage(argv[0]);
722 return 0;
725 caller = callerAddress;
726 printDetails = true;
727 } else if (strcmp(argv[i], "-o") == 0) {
728 uint64 cacheAddress;
729 if (++i >= argc
730 || !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
731 print_debugger_command_usage(argv[0]);
732 return 0;
735 cache = (ObjectCache*)(addr_t)cacheAddress;
736 } else if (strcmp(argv[i], "-r") == 0) {
737 resetAllocationInfos = true;
738 } else {
739 print_debugger_command_usage(argv[0]);
740 return 0;
744 sCallerInfoCount = 0;
746 AllocationCollectorCallback collectorCallback(resetAllocationInfos);
747 AllocationDetailPrinterCallback detailsCallback(caller);
748 AllocationTrackingCallback& callback = printDetails
749 ? (AllocationTrackingCallback&)detailsCallback
750 : (AllocationTrackingCallback&)collectorCallback;
752 if (cache != NULL) {
753 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
754 if (!analyze_allocation_callers(cache, callback))
755 return 0;
756 #else
757 kprintf("Object cache allocation tracking not available. "
758 "SLAB_OBJECT_CACHE_TRACING (%d) and "
759 "SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
760 SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
761 return 0;
762 #endif
763 } else {
764 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
766 for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
767 it.HasNext();) {
768 if (!analyze_allocation_callers(it.Next(), callback))
769 return 0;
771 #endif
773 #if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
774 if (!MemoryManager::AnalyzeAllocationCallers(callback))
775 return 0;
776 #endif
779 if (printDetails)
780 return 0;
782 // sort the array
783 qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
784 sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
786 kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
787 sortBySize ? "size" : "count");
789 size_t totalAllocationSize = 0;
790 size_t totalAllocationCount = 0;
792 kprintf(" count size caller\n");
793 kprintf("----------------------------------\n");
794 for (int32 i = 0; i < sCallerInfoCount; i++) {
795 caller_info& info = sCallerInfoTable[i];
796 kprintf("%10" B_PRIuSIZE " %10" B_PRIuSIZE " %p", info.count,
797 info.size, (void*)info.caller);
799 const char* symbol;
800 const char* imageName;
801 bool exactMatch;
802 addr_t baseAddress;
804 if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
805 &imageName, &exactMatch) == B_OK) {
806 kprintf(" %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
807 info.caller - baseAddress, imageName,
808 exactMatch ? "" : " (nearest)");
809 } else
810 kprintf("\n");
812 totalAllocationCount += info.count;
813 totalAllocationSize += info.size;
816 kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
817 totalAllocationCount, totalAllocationSize);
819 return 0;
822 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
825 void
826 add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
828 #if SLAB_OBJECT_CACHE_TRACING
829 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
830 MutexLocker _(cache->lock);
831 cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
832 #else
833 T(Alloc(cache, flags, object));
834 #endif
835 #endif
839 // #pragma mark -
842 void
843 request_memory_manager_maintenance()
845 MutexLocker locker(sMaintenanceLock);
846 sMaintenanceCondition.NotifyAll();
850 // #pragma mark -
853 static void
854 delete_object_cache_internal(object_cache* cache)
856 if (!(cache->flags & CACHE_NO_DEPOT))
857 object_depot_destroy(&cache->depot, 0);
859 mutex_lock(&cache->lock);
861 if (!cache->full.IsEmpty())
862 panic("cache destroy: still has full slabs");
864 if (!cache->partial.IsEmpty())
865 panic("cache destroy: still has partial slabs");
867 while (!cache->empty.IsEmpty())
868 cache->ReturnSlab(cache->empty.RemoveHead(), 0);
870 mutex_destroy(&cache->lock);
871 cache->Delete();
875 static void
876 increase_object_reserve(ObjectCache* cache)
878 MutexLocker locker(sMaintenanceLock);
880 cache->maintenance_resize = true;
882 if (!cache->maintenance_pending) {
883 cache->maintenance_pending = true;
884 sMaintenanceQueue.Add(cache);
885 sMaintenanceCondition.NotifyAll();
890 /*! Makes sure that \a objectCount objects can be allocated.
892 static status_t
893 object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
894 uint32 flags)
896 // If someone else is already adding slabs, we wait for that to be finished
897 // first.
898 thread_id thread = find_thread(NULL);
899 while (true) {
900 if (objectCount <= cache->total_objects - cache->used_count)
901 return B_OK;
903 ObjectCacheResizeEntry* resizeEntry = NULL;
904 if (cache->resize_entry_dont_wait != NULL) {
905 resizeEntry = cache->resize_entry_dont_wait;
906 if (thread == resizeEntry->thread)
907 return B_WOULD_BLOCK;
908 // Note: We could still have reentered the function, i.e.
909 // resize_entry_can_wait would be ours. That doesn't matter much,
910 // though, since after the don't-wait thread has done its job
911 // everyone will be happy.
912 } else if (cache->resize_entry_can_wait != NULL) {
913 resizeEntry = cache->resize_entry_can_wait;
914 if (thread == resizeEntry->thread)
915 return B_WOULD_BLOCK;
917 if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
918 break;
919 } else
920 break;
922 ConditionVariableEntry entry;
923 resizeEntry->condition.Add(&entry);
925 cache->Unlock();
926 entry.Wait();
927 cache->Lock();
930 // prepare the resize entry others can wait on
931 ObjectCacheResizeEntry*& resizeEntry
932 = (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
933 ? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
935 ObjectCacheResizeEntry myResizeEntry;
936 resizeEntry = &myResizeEntry;
937 resizeEntry->condition.Init(cache, "wait for slabs");
938 resizeEntry->thread = thread;
940 // add new slabs until there are as many free ones as requested
941 while (objectCount > cache->total_objects - cache->used_count) {
942 slab* newSlab = cache->CreateSlab(flags);
943 if (newSlab == NULL) {
944 resizeEntry->condition.NotifyAll();
945 resizeEntry = NULL;
946 return B_NO_MEMORY;
949 cache->usage += cache->slab_size;
950 cache->total_objects += newSlab->size;
952 cache->empty.Add(newSlab);
953 cache->empty_count++;
956 resizeEntry->condition.NotifyAll();
957 resizeEntry = NULL;
959 return B_OK;
963 static void
964 object_cache_low_memory(void* dummy, uint32 resources, int32 level)
966 if (level == B_NO_LOW_RESOURCE)
967 return;
969 MutexLocker cacheListLocker(sObjectCacheListLock);
971 // Append the first cache to the end of the queue. We assume that it is
972 // one of the caches that will never be deleted and thus we use it as a
973 // marker.
974 ObjectCache* firstCache = sObjectCaches.RemoveHead();
975 sObjectCaches.Add(firstCache);
976 cacheListLocker.Unlock();
978 ObjectCache* cache;
979 do {
980 cacheListLocker.Lock();
982 cache = sObjectCaches.RemoveHead();
983 sObjectCaches.Add(cache);
985 MutexLocker maintenanceLocker(sMaintenanceLock);
986 if (cache->maintenance_pending || cache->maintenance_in_progress) {
987 // We don't want to mess with caches in maintenance.
988 continue;
991 cache->maintenance_pending = true;
992 cache->maintenance_in_progress = true;
994 maintenanceLocker.Unlock();
995 cacheListLocker.Unlock();
997 // We are calling the reclaimer without the object cache lock
998 // to give the owner a chance to return objects to the slabs.
1000 if (cache->reclaimer)
1001 cache->reclaimer(cache->cookie, level);
1003 if ((cache->flags & CACHE_NO_DEPOT) == 0)
1004 object_depot_make_empty(&cache->depot, 0);
1006 MutexLocker cacheLocker(cache->lock);
1007 size_t minimumAllowed;
1009 switch (level) {
1010 case B_LOW_RESOURCE_NOTE:
1011 minimumAllowed = cache->pressure / 2 + 1;
1012 cache->pressure -= cache->pressure / 8;
1013 break;
1015 case B_LOW_RESOURCE_WARNING:
1016 cache->pressure /= 2;
1017 minimumAllowed = 0;
1018 break;
1020 default:
1021 cache->pressure = 0;
1022 minimumAllowed = 0;
1023 break;
1026 while (cache->empty_count > minimumAllowed) {
1027 // make sure we respect the cache's minimum object reserve
1028 size_t objectsPerSlab = cache->empty.Head()->size;
1029 size_t freeObjects = cache->total_objects - cache->used_count;
1030 if (freeObjects < cache->min_object_reserve + objectsPerSlab)
1031 break;
1033 cache->ReturnSlab(cache->empty.RemoveHead(), 0);
1034 cache->empty_count--;
1037 cacheLocker.Unlock();
1039 // Check whether in the meantime someone has really requested
1040 // maintenance for the cache.
1041 maintenanceLocker.Lock();
1043 if (cache->maintenance_delete) {
1044 delete_object_cache_internal(cache);
1045 continue;
1048 cache->maintenance_in_progress = false;
1050 if (cache->maintenance_resize)
1051 sMaintenanceQueue.Add(cache);
1052 else
1053 cache->maintenance_pending = false;
1054 } while (cache != firstCache);
1058 static status_t
1059 object_cache_maintainer(void*)
1061 while (true) {
1062 MutexLocker locker(sMaintenanceLock);
1064 // wait for the next request
1065 while (sMaintenanceQueue.IsEmpty()) {
1066 // perform memory manager maintenance, if needed
1067 if (MemoryManager::MaintenanceNeeded()) {
1068 locker.Unlock();
1069 MemoryManager::PerformMaintenance();
1070 locker.Lock();
1071 continue;
1074 ConditionVariableEntry entry;
1075 sMaintenanceCondition.Add(&entry);
1076 locker.Unlock();
1077 entry.Wait();
1078 locker.Lock();
1081 ObjectCache* cache = sMaintenanceQueue.RemoveHead();
1083 while (true) {
1084 bool resizeRequested = cache->maintenance_resize;
1085 bool deleteRequested = cache->maintenance_delete;
1087 if (!resizeRequested && !deleteRequested) {
1088 cache->maintenance_pending = false;
1089 cache->maintenance_in_progress = false;
1090 break;
1093 cache->maintenance_resize = false;
1094 cache->maintenance_in_progress = true;
1096 locker.Unlock();
1098 if (deleteRequested) {
1099 delete_object_cache_internal(cache);
1100 break;
1103 // resize the cache, if necessary
1105 MutexLocker cacheLocker(cache->lock);
1107 if (resizeRequested) {
1108 status_t error = object_cache_reserve_internal(cache,
1109 cache->min_object_reserve, 0);
1110 if (error != B_OK) {
1111 dprintf("object cache resizer: Failed to resize object "
1112 "cache %p!\n", cache);
1113 break;
1117 locker.Lock();
1121 // never can get here
1122 return B_OK;
1126 // #pragma mark - public API
1129 object_cache*
1130 create_object_cache(const char* name, size_t object_size, size_t alignment,
1131 void* cookie, object_cache_constructor constructor,
1132 object_cache_destructor destructor)
1134 return create_object_cache_etc(name, object_size, alignment, 0, 0, 0, 0,
1135 cookie, constructor, destructor, NULL);
1139 object_cache*
1140 create_object_cache_etc(const char* name, size_t objectSize, size_t alignment,
1141 size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
1142 uint32 flags, void* cookie, object_cache_constructor constructor,
1143 object_cache_destructor destructor, object_cache_reclaimer reclaimer)
1145 ObjectCache* cache;
1147 if (objectSize == 0) {
1148 cache = NULL;
1149 } else if (objectSize <= 256) {
1150 cache = SmallObjectCache::Create(name, objectSize, alignment, maximum,
1151 magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1152 destructor, reclaimer);
1153 } else {
1154 cache = HashedObjectCache::Create(name, objectSize, alignment, maximum,
1155 magazineCapacity, maxMagazineCount, flags, cookie, constructor,
1156 destructor, reclaimer);
1159 if (cache != NULL) {
1160 MutexLocker _(sObjectCacheListLock);
1161 sObjectCaches.Add(cache);
1164 T(Create(name, objectSize, alignment, maximum, flags, cookie, cache));
1165 return cache;
1169 void
1170 delete_object_cache(object_cache* cache)
1172 T(Delete(cache));
1175 MutexLocker _(sObjectCacheListLock);
1176 sObjectCaches.Remove(cache);
1179 MutexLocker cacheLocker(cache->lock);
1182 MutexLocker maintenanceLocker(sMaintenanceLock);
1183 if (cache->maintenance_in_progress) {
1184 // The maintainer thread is working with the cache. Just mark it
1185 // to be deleted.
1186 cache->maintenance_delete = true;
1187 return;
1190 // unschedule maintenance
1191 if (cache->maintenance_pending)
1192 sMaintenanceQueue.Remove(cache);
1195 // at this point no-one should have a reference to the cache anymore
1196 cacheLocker.Unlock();
1198 delete_object_cache_internal(cache);
1202 status_t
1203 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
1205 MutexLocker _(cache->lock);
1207 if (cache->min_object_reserve == objectCount)
1208 return B_OK;
1210 cache->min_object_reserve = objectCount;
1212 increase_object_reserve(cache);
1214 return B_OK;
1218 void*
1219 object_cache_alloc(object_cache* cache, uint32 flags)
1221 if (!(cache->flags & CACHE_NO_DEPOT)) {
1222 void* object = object_depot_obtain(&cache->depot);
1223 if (object) {
1224 add_alloc_tracing_entry(cache, flags, object);
1225 return fill_allocated_block(object, cache->object_size);
1229 MutexLocker locker(cache->lock);
1230 slab* source = NULL;
1232 while (true) {
1233 source = cache->partial.Head();
1234 if (source != NULL)
1235 break;
1237 source = cache->empty.RemoveHead();
1238 if (source != NULL) {
1239 cache->empty_count--;
1240 cache->partial.Add(source);
1241 break;
1244 if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
1245 T(Alloc(cache, flags, NULL));
1246 return NULL;
1249 cache->pressure++;
1252 ParanoiaChecker _2(source);
1254 object_link* link = _pop(source->free);
1255 source->count--;
1256 cache->used_count++;
1258 if (cache->total_objects - cache->used_count < cache->min_object_reserve)
1259 increase_object_reserve(cache);
1261 REMOVE_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next,
1262 sizeof(void*));
1264 TRACE_CACHE(cache, "allocate %p (%p) from %p, %lu remaining.",
1265 link_to_object(link, cache->object_size), link, source, source->count);
1267 if (source->count == 0) {
1268 cache->partial.Remove(source);
1269 cache->full.Add(source);
1272 void* object = link_to_object(link, cache->object_size);
1273 locker.Unlock();
1275 add_alloc_tracing_entry(cache, flags, object);
1276 return fill_allocated_block(object, cache->object_size);
1280 void
1281 object_cache_free(object_cache* cache, void* object, uint32 flags)
1283 if (object == NULL)
1284 return;
1286 T(Free(cache, object));
1288 #if PARANOID_KERNEL_FREE
1289 // TODO: allow forcing the check even if we don't find deadbeef
1290 if (*(uint32*)object == 0xdeadbeef) {
1291 if (!cache->AssertObjectNotFreed(object))
1292 return;
1294 if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1295 if (object_depot_contains_object(&cache->depot, object)) {
1296 panic("object_cache: object %p is already freed", object);
1297 return;
1302 fill_freed_block(object, cache->object_size);
1303 #endif
1305 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
1306 mutex_lock(&cache->lock);
1307 cache->TrackingInfoFor(object)->Clear();
1308 mutex_unlock(&cache->lock);
1309 #endif
1311 if ((cache->flags & CACHE_NO_DEPOT) == 0) {
1312 object_depot_store(&cache->depot, object, flags);
1313 return;
1316 MutexLocker _(cache->lock);
1317 cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
1321 status_t
1322 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1324 if (objectCount == 0)
1325 return B_OK;
1327 T(Reserve(cache, objectCount, flags));
1329 MutexLocker _(cache->lock);
1330 return object_cache_reserve_internal(cache, objectCount, flags);
1334 void
1335 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1337 MutexLocker _(cache->lock);
1338 *_allocatedMemory = cache->usage;
1342 void
1343 slab_init(kernel_args* args)
1345 MemoryManager::Init(args);
1347 new (&sObjectCaches) ObjectCacheList();
1349 block_allocator_init_boot();
1353 void
1354 slab_init_post_area()
1356 MemoryManager::InitPostArea();
1358 add_debugger_command("slabs", dump_slabs, "list all object caches");
1359 add_debugger_command("slab_cache", dump_cache_info,
1360 "dump information about a specific object cache");
1361 add_debugger_command("slab_depot", dump_object_depot,
1362 "dump contents of an object depot");
1363 add_debugger_command("slab_magazine", dump_depot_magazine,
1364 "dump contents of a depot magazine");
1365 #if SLAB_ALLOCATION_TRACKING_AVAILABLE
1366 add_debugger_command_etc("allocations_per_caller",
1367 &dump_allocations_per_caller,
1368 "Dump current slab allocations summed up per caller",
1369 "[ -c ] [ -d <caller> ] [ -o <object cache> ] [ -r ]\n"
1370 "The current allocations will by summed up by caller (their count and\n"
1371 "size) printed in decreasing order by size or, if \"-c\" is\n"
1372 "specified, by allocation count. If given <object cache> specifies\n"
1373 "the address of the object cache for which to print the allocations.\n"
1374 "If \"-d\" is given, each allocation for caller <caller> is printed\n"
1375 "including the respective stack trace.\n"
1376 "If \"-r\" is given, the allocation infos are reset after gathering\n"
1377 "the information, so the next command invocation will only show the\n"
1378 "allocations made after the reset.\n", 0);
1379 add_debugger_command_etc("allocation_infos",
1380 &dump_allocation_infos,
1381 "Dump current slab allocations",
1382 "[ --stacktrace ] [ -o <object cache> | -s <slab> | -S <address> ] "
1383 "[ -a <allocation> ] [ --team <team ID> ] [ --thread <thread ID> ]\n"
1384 "The current allocations filtered by optional values will be printed.\n"
1385 "If given, <object cache> specifies the address of the object cache\n"
1386 "or <slab> specifies the address of a slab, for which to print the\n"
1387 "allocations. Alternatively <address> specifies any address within\n"
1388 "a slab allocation range.\n"
1389 "The optional \"-a\" address filters for a specific allocation,\n"
1390 "with \"--team\" and \"--thread\" allocations by specific teams\n"
1391 "and/or threads can be filtered (these only work if a corresponding\n"
1392 "tracing entry is still available).\n"
1393 "If \"--stacktrace\" is given, then stack traces of the allocation\n"
1394 "callers are printed, where available\n", 0);
1395 #endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
1399 void
1400 slab_init_post_sem()
1402 register_low_resource_handler(object_cache_low_memory, NULL,
1403 B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
1404 | B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);
1406 block_allocator_init_rest();
1410 void
1411 slab_init_post_thread()
1413 new(&sMaintenanceQueue) MaintenanceQueue;
1414 sMaintenanceCondition.Init(&sMaintenanceQueue, "object cache maintainer");
1416 thread_id objectCacheResizer = spawn_kernel_thread(object_cache_maintainer,
1417 "object cache resizer", B_URGENT_PRIORITY, NULL);
1418 if (objectCacheResizer < 0) {
1419 panic("slab_init_post_thread(): failed to spawn object cache resizer "
1420 "thread\n");
1421 return;
1424 resume_thread(objectCacheResizer);
1428 RANGE_MARKER_FUNCTION_END(Slab)
1431 #endif // !USE_GUARDED_HEAP_FOR_OBJECT_CACHE