1 //===-- local_cache.h -------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_LOCAL_CACHE_H_
10 #define SCUDO_LOCAL_CACHE_H_
12 #include "internal_defs.h"
17 #include "string_utils.h"
21 template <class SizeClassAllocator
> struct SizeClassAllocatorLocalCache
{
22 typedef typename
SizeClassAllocator::SizeClassMap SizeClassMap
;
23 typedef typename
SizeClassAllocator::CompactPtrT CompactPtrT
;
25 void init(GlobalStats
*S
, SizeClassAllocator
*A
) {
34 void destroy(GlobalStats
*S
) {
40 void *allocate(uptr ClassId
) {
41 DCHECK_LT(ClassId
, NumClasses
);
42 PerClass
*C
= &PerClassArray
[ClassId
];
44 // Refill half of the number of max cached.
45 DCHECK_GT(C
->MaxCount
/ 2, 0U);
46 if (UNLIKELY(!refill(C
, ClassId
, C
->MaxCount
/ 2)))
48 DCHECK_GT(C
->Count
, 0);
50 // We read ClassSize first before accessing Chunks because it's adjacent to
51 // Count, while Chunks might be further off (depending on Count). That keeps
52 // the memory accesses in close quarters.
53 const uptr ClassSize
= C
->ClassSize
;
54 CompactPtrT CompactP
= C
->Chunks
[--C
->Count
];
55 Stats
.add(StatAllocated
, ClassSize
);
56 Stats
.sub(StatFree
, ClassSize
);
57 return Allocator
->decompactPtr(ClassId
, CompactP
);
60 bool deallocate(uptr ClassId
, void *P
) {
61 CHECK_LT(ClassId
, NumClasses
);
62 PerClass
*C
= &PerClassArray
[ClassId
];
64 // If the cache is full, drain half of blocks back to the main allocator.
65 const bool NeedToDrainCache
= C
->Count
== C
->MaxCount
;
68 // See comment in allocate() about memory accesses.
69 const uptr ClassSize
= C
->ClassSize
;
70 C
->Chunks
[C
->Count
++] =
71 Allocator
->compactPtr(ClassId
, reinterpret_cast<uptr
>(P
));
72 Stats
.sub(StatAllocated
, ClassSize
);
73 Stats
.add(StatFree
, ClassSize
);
75 return NeedToDrainCache
;
78 bool isEmpty() const {
79 for (uptr I
= 0; I
< NumClasses
; ++I
)
80 if (PerClassArray
[I
].Count
)
86 // Drain BatchClassId last as it may be needed while draining normal blocks.
87 for (uptr I
= 0; I
< NumClasses
; ++I
) {
88 if (I
== BatchClassId
)
90 while (PerClassArray
[I
].Count
> 0)
91 drain(&PerClassArray
[I
], I
);
93 while (PerClassArray
[BatchClassId
].Count
> 0)
94 drain(&PerClassArray
[BatchClassId
], BatchClassId
);
98 void *getBatchClassBlock() {
99 void *B
= allocate(BatchClassId
);
101 reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId
));
105 LocalStats
&getStats() { return Stats
; }
107 void getStats(ScopedString
*Str
) {
108 bool EmptyCache
= true;
109 for (uptr I
= 0; I
< NumClasses
; ++I
) {
110 if (PerClassArray
[I
].Count
== 0)
114 // The size of BatchClass is set to 0 intentionally. See the comment in
115 // initCache() for more details.
116 const uptr ClassSize
= I
== BatchClassId
117 ? SizeClassAllocator::getSizeByClassId(I
)
118 : PerClassArray
[I
].ClassSize
;
119 // Note that the string utils don't support printing u16 thus we cast it
120 // to a common use type uptr.
121 Str
->append(" %02zu (%6zu): cached: %4zu max: %4zu\n", I
, ClassSize
,
122 static_cast<uptr
>(PerClassArray
[I
].Count
),
123 static_cast<uptr
>(PerClassArray
[I
].MaxCount
));
127 Str
->append(" No block is cached.\n");
130 static u16
getMaxCached(uptr Size
) {
131 return Min(SizeClassMap::MaxNumCachedHint
,
132 SizeClassMap::getMaxCachedHint(Size
));
136 static const uptr NumClasses
= SizeClassMap::NumClasses
;
137 static const uptr BatchClassId
= SizeClassMap::BatchClassId
;
138 struct alignas(SCUDO_CACHE_LINE_SIZE
) PerClass
{
141 // Note: ClassSize is zero for the transfer batch.
143 CompactPtrT Chunks
[2 * SizeClassMap::MaxNumCachedHint
];
145 PerClass PerClassArray
[NumClasses
] = {};
147 SizeClassAllocator
*Allocator
= nullptr;
149 NOINLINE
void initCache() {
150 for (uptr I
= 0; I
< NumClasses
; I
++) {
151 PerClass
*P
= &PerClassArray
[I
];
152 const uptr Size
= SizeClassAllocator::getSizeByClassId(I
);
153 P
->MaxCount
= static_cast<u16
>(2 * getMaxCached(Size
));
154 if (I
!= BatchClassId
) {
157 // ClassSize in this struct is only used for malloc/free stats, which
158 // should only track user allocations, not internal movements.
164 void destroyBatch(uptr ClassId
, void *B
) {
165 if (ClassId
!= BatchClassId
)
166 deallocate(BatchClassId
, B
);
169 NOINLINE
bool refill(PerClass
*C
, uptr ClassId
, u16 MaxRefill
) {
170 const u16 NumBlocksRefilled
=
171 Allocator
->popBlocks(this, ClassId
, C
->Chunks
, MaxRefill
);
172 DCHECK_LE(NumBlocksRefilled
, MaxRefill
);
173 C
->Count
= static_cast<u16
>(C
->Count
+ NumBlocksRefilled
);
174 return NumBlocksRefilled
!= 0;
177 NOINLINE
void drain(PerClass
*C
, uptr ClassId
) {
178 const u16 Count
= Min(static_cast<u16
>(C
->MaxCount
/ 2), C
->Count
);
179 Allocator
->pushBlocks(this, ClassId
, &C
->Chunks
[0], Count
);
180 // u16 will be promoted to int by arithmetic type conversion.
181 C
->Count
= static_cast<u16
>(C
->Count
- Count
);
182 for (u16 I
= 0; I
< C
->Count
; I
++)
183 C
->Chunks
[I
] = C
->Chunks
[I
+ Count
];
189 #endif // SCUDO_LOCAL_CACHE_H_