[AMDGPU] Infer amdgpu-no-flat-scratch-init attribute in AMDGPUAttributor (#94647)
[llvm-project.git] / compiler-rt / lib / scudo / standalone / local_cache.h
blob46d6affdc033b1f06b9e3e2c92e409cbd39209ce
1 //===-- local_cache.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_LOCAL_CACHE_H_
10 #define SCUDO_LOCAL_CACHE_H_
12 #include "internal_defs.h"
13 #include "list.h"
14 #include "platform.h"
15 #include "report.h"
16 #include "stats.h"
17 #include "string_utils.h"
19 namespace scudo {
21 template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
22 typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
23 typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
25 void init(GlobalStats *S, SizeClassAllocator *A) {
26 DCHECK(isEmpty());
27 Stats.init();
28 if (LIKELY(S))
29 S->link(&Stats);
30 Allocator = A;
31 initCache();
34 void destroy(GlobalStats *S) {
35 drain();
36 if (LIKELY(S))
37 S->unlink(&Stats);
40 void *allocate(uptr ClassId) {
41 DCHECK_LT(ClassId, NumClasses);
42 PerClass *C = &PerClassArray[ClassId];
43 if (C->Count == 0) {
44 // Refill half of the number of max cached.
45 DCHECK_GT(C->MaxCount / 2, 0U);
46 if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
47 return nullptr;
48 DCHECK_GT(C->Count, 0);
50 // We read ClassSize first before accessing Chunks because it's adjacent to
51 // Count, while Chunks might be further off (depending on Count). That keeps
52 // the memory accesses in close quarters.
53 const uptr ClassSize = C->ClassSize;
54 CompactPtrT CompactP = C->Chunks[--C->Count];
55 Stats.add(StatAllocated, ClassSize);
56 Stats.sub(StatFree, ClassSize);
57 return Allocator->decompactPtr(ClassId, CompactP);
60 bool deallocate(uptr ClassId, void *P) {
61 CHECK_LT(ClassId, NumClasses);
62 PerClass *C = &PerClassArray[ClassId];
64 // If the cache is full, drain half of blocks back to the main allocator.
65 const bool NeedToDrainCache = C->Count == C->MaxCount;
66 if (NeedToDrainCache)
67 drain(C, ClassId);
68 // See comment in allocate() about memory accesses.
69 const uptr ClassSize = C->ClassSize;
70 C->Chunks[C->Count++] =
71 Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
72 Stats.sub(StatAllocated, ClassSize);
73 Stats.add(StatFree, ClassSize);
75 return NeedToDrainCache;
78 bool isEmpty() const {
79 for (uptr I = 0; I < NumClasses; ++I)
80 if (PerClassArray[I].Count)
81 return false;
82 return true;
85 void drain() {
86 // Drain BatchClassId last as it may be needed while draining normal blocks.
87 for (uptr I = 0; I < NumClasses; ++I) {
88 if (I == BatchClassId)
89 continue;
90 while (PerClassArray[I].Count > 0)
91 drain(&PerClassArray[I], I);
93 while (PerClassArray[BatchClassId].Count > 0)
94 drain(&PerClassArray[BatchClassId], BatchClassId);
95 DCHECK(isEmpty());
98 void *getBatchClassBlock() {
99 void *B = allocate(BatchClassId);
100 if (UNLIKELY(!B))
101 reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
102 return B;
105 LocalStats &getStats() { return Stats; }
107 void getStats(ScopedString *Str) {
108 bool EmptyCache = true;
109 for (uptr I = 0; I < NumClasses; ++I) {
110 if (PerClassArray[I].Count == 0)
111 continue;
113 EmptyCache = false;
114 // The size of BatchClass is set to 0 intentionally. See the comment in
115 // initCache() for more details.
116 const uptr ClassSize = I == BatchClassId
117 ? SizeClassAllocator::getSizeByClassId(I)
118 : PerClassArray[I].ClassSize;
119 // Note that the string utils don't support printing u16 thus we cast it
120 // to a common use type uptr.
121 Str->append(" %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
122 static_cast<uptr>(PerClassArray[I].Count),
123 static_cast<uptr>(PerClassArray[I].MaxCount));
126 if (EmptyCache)
127 Str->append(" No block is cached.\n");
130 static u16 getMaxCached(uptr Size) {
131 return Min(SizeClassMap::MaxNumCachedHint,
132 SizeClassMap::getMaxCachedHint(Size));
135 private:
136 static const uptr NumClasses = SizeClassMap::NumClasses;
137 static const uptr BatchClassId = SizeClassMap::BatchClassId;
138 struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
139 u16 Count;
140 u16 MaxCount;
141 // Note: ClassSize is zero for the transfer batch.
142 uptr ClassSize;
143 CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
145 PerClass PerClassArray[NumClasses] = {};
146 LocalStats Stats;
147 SizeClassAllocator *Allocator = nullptr;
149 NOINLINE void initCache() {
150 for (uptr I = 0; I < NumClasses; I++) {
151 PerClass *P = &PerClassArray[I];
152 const uptr Size = SizeClassAllocator::getSizeByClassId(I);
153 P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
154 if (I != BatchClassId) {
155 P->ClassSize = Size;
156 } else {
157 // ClassSize in this struct is only used for malloc/free stats, which
158 // should only track user allocations, not internal movements.
159 P->ClassSize = 0;
164 void destroyBatch(uptr ClassId, void *B) {
165 if (ClassId != BatchClassId)
166 deallocate(BatchClassId, B);
169 NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
170 const u16 NumBlocksRefilled =
171 Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
172 DCHECK_LE(NumBlocksRefilled, MaxRefill);
173 C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
174 return NumBlocksRefilled != 0;
177 NOINLINE void drain(PerClass *C, uptr ClassId) {
178 const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
179 Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
180 // u16 will be promoted to int by arithmetic type conversion.
181 C->Count = static_cast<u16>(C->Count - Count);
182 for (u16 I = 0; I < C->Count; I++)
183 C->Chunks[I] = C->Chunks[I + Count];
187 } // namespace scudo
189 #endif // SCUDO_LOCAL_CACHE_H_