1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat <opensource@google.com>
35 #include "central_freelist.h"
36 #include "free_list.h" // for FL_Next, FL_Push, etc
37 #include "internal_logging.h" // for ASSERT, MESSAGE
38 #include "page_heap.h" // for PageHeap
39 #include "static_vars.h" // for Static
46 void CentralFreeList::Init(size_t cl
) {
48 tcmalloc::DLL_Init(&empty_
);
49 tcmalloc::DLL_Init(&nonempty_
);
53 max_cache_size_
= kMaxNumTransferEntries
;
54 #ifdef TCMALLOC_SMALL_BUT_SLOW
55 // Disable the transfer cache for the small footprint case.
61 // Limit the maximum size of the cache based on the size class. If this
62 // is not done, large size class objects will consume a lot of memory if
63 // they just sit in the transfer cache.
64 int32_t bytes
= Static::sizemap()->ByteSizeForClass(cl
);
65 int32_t objs_to_move
= Static::sizemap()->num_objects_to_move(cl
);
67 ASSERT(objs_to_move
> 0 && bytes
> 0);
68 // Limit each size class cache to at most 1MB of objects or one entry,
69 // whichever is greater. Total transfer cache memory used across all
70 // size classes then can't be greater than approximately
71 // 1MB * kMaxNumTransferEntries.
72 // min and max are in parens to avoid macro-expansion on windows.
73 max_cache_size_
= (min
)(max_cache_size_
,
74 (max
)(1, (1024 * 1024) / (bytes
* objs_to_move
)));
75 cache_size_
= (min
)(cache_size_
, max_cache_size_
);
78 ASSERT(cache_size_
<= max_cache_size_
);
81 void CentralFreeList::ReleaseListToSpans(void* start
) {
83 void *next
= FL_Next(start
);
84 ReleaseToSpans(start
);
89 // MapObjectToSpan should logically be part of ReleaseToSpans. But
90 // this triggers an optimization bug in gcc 4.5.0. Moving to a
91 // separate function, and making sure that function isn't inlined,
92 // seems to fix the problem. It also should be fixed for gcc 4.5.1.
94 #if __GNUC__ == 4 && __GNUC_MINOR__ == 5 && __GNUC_PATCHLEVEL__ == 0
95 __attribute__ ((noinline
))
97 Span
* MapObjectToSpan(void* object
) {
98 const PageID p
= reinterpret_cast<uintptr_t>(object
) >> kPageShift
;
99 Span
* span
= Static::pageheap()->GetDescriptor(p
);
103 void CentralFreeList::ReleaseToSpans(void* object
) {
104 Span
* span
= MapObjectToSpan(object
);
105 ASSERT(span
!= NULL
);
106 ASSERT(span
->refcount
> 0);
108 // If span is empty, move it to non-empty list
109 if (span
->objects
== NULL
) {
110 tcmalloc::DLL_Remove(span
);
111 tcmalloc::DLL_Prepend(&nonempty_
, span
);
115 // The following check is expensive, so it is disabled by default
117 // Check that object does not occur in list
119 for (void* p
= span
->objects
; p
!= NULL
; p
= FL_Next(p
)){
123 ASSERT(got
+ span
->refcount
==
124 (span
->length
<<kPageShift
) /
125 Static::sizemap()->ByteSizeForClass(span
->sizeclass
));
130 if (span
->refcount
== 0) {
132 counter_
-= ((span
->length
<<kPageShift
) /
133 Static::sizemap()->ByteSizeForClass(span
->sizeclass
));
134 tcmalloc::DLL_Remove(span
);
137 // Release central list lock while operating on pageheap
140 SpinLockHolder
h(Static::pageheap_lock());
141 Static::pageheap()->Delete(span
);
145 FL_Push(&(span
->objects
), object
);
149 bool CentralFreeList::EvictRandomSizeClass(
150 int locked_size_class
, bool force
) {
151 static int race_counter
= 0;
152 int t
= race_counter
++; // Updated without a lock, but who cares.
153 if (t
>= kNumClasses
) {
154 while (t
>= kNumClasses
) {
160 ASSERT(t
< kNumClasses
);
161 if (t
== locked_size_class
) return false;
162 return Static::central_cache()[t
].ShrinkCache(locked_size_class
, force
);
165 bool CentralFreeList::MakeCacheSpace() {
166 // Is there room in the cache?
167 if (used_slots_
< cache_size_
) return true;
168 // Check if we can expand this cache?
169 if (cache_size_
== max_cache_size_
) return false;
170 // Ok, we'll try to grab an entry from some other size class.
171 if (EvictRandomSizeClass(size_class_
, false) ||
172 EvictRandomSizeClass(size_class_
, true)) {
173 // Succeeded in evicting, we're going to make our cache larger.
174 // However, we may have dropped and re-acquired the lock in
175 // EvictRandomSizeClass (via ShrinkCache and the LockInverter), so the
176 // cache_size may have changed. Therefore, check and verify that it is
177 // still OK to increase the cache_size.
178 if (cache_size_
< max_cache_size_
) {
190 SpinLock
*held_
, *temp_
;
192 inline explicit LockInverter(SpinLock
* held
, SpinLock
*temp
)
193 : held_(held
), temp_(temp
) { held_
->Unlock(); temp_
->Lock(); }
194 inline ~LockInverter() { temp_
->Unlock(); held_
->Lock(); }
198 // This function is marked as NO_THREAD_SAFETY_ANALYSIS because it uses
199 // LockInverter to release one lock and acquire another in scoped-lock
200 // style, which our current annotation/analysis does not support.
201 bool CentralFreeList::ShrinkCache(int locked_size_class
, bool force
)
202 NO_THREAD_SAFETY_ANALYSIS
{
203 // Start with a quick check without taking a lock.
204 if (cache_size_
== 0) return false;
205 // We don't evict from a full cache unless we are 'forcing'.
206 if (force
== false && used_slots_
== cache_size_
) return false;
208 // Grab lock, but first release the other lock held by this thread. We use
209 // the lock inverter to ensure that we never hold two size class locks
210 // concurrently. That can create a deadlock because there is no well
211 // defined nesting order.
212 LockInverter
li(&Static::central_cache()[locked_size_class
].lock_
, &lock_
);
213 ASSERT(used_slots_
<= cache_size_
);
214 ASSERT(0 <= cache_size_
);
215 if (cache_size_
== 0) return false;
216 if (used_slots_
== cache_size_
) {
217 if (force
== false) return false;
218 // ReleaseListToSpans releases the lock, so we have to make all the
219 // updates to the central list before calling it.
222 ReleaseListToSpans(tc_slots_
[used_slots_
].head
);
229 void CentralFreeList::InsertRange(void *start
, void *end
, int N
) {
230 SpinLockHolder
h(&lock_
);
231 if (N
== Static::sizemap()->num_objects_to_move(size_class_
) &&
233 int slot
= used_slots_
++;
235 ASSERT(slot
< max_cache_size_
);
236 TCEntry
*entry
= &tc_slots_
[slot
];
241 ReleaseListToSpans(start
);
244 int CentralFreeList::RemoveRange(void **start
, void **end
, int N
) {
247 if (N
== Static::sizemap()->num_objects_to_move(size_class_
) &&
249 int slot
= --used_slots_
;
251 TCEntry
*entry
= &tc_slots_
[slot
];
252 *start
= entry
->head
;
261 // TODO: Prefetch multiple TCEntries?
262 tail
= FetchFromSpansSafe();
264 FL_Push(&head
, tail
);
267 void *t
= FetchFromSpans();
280 void* CentralFreeList::FetchFromSpansSafe() {
281 void *t
= FetchFromSpans();
284 t
= FetchFromSpans();
289 void* CentralFreeList::FetchFromSpans() {
290 if (tcmalloc::DLL_IsEmpty(&nonempty_
)) return NULL
;
291 Span
* span
= nonempty_
.next
;
293 ASSERT(span
->objects
!= NULL
);
295 void *result
= FL_Pop(&(span
->objects
));
296 if (span
->objects
== NULL
) {
297 // Move to empty list
298 tcmalloc::DLL_Remove(span
);
299 tcmalloc::DLL_Prepend(&empty_
, span
);
306 // Fetch memory from the system and add to the central cache freelist.
307 void CentralFreeList::Populate() {
308 // Release central list lock while operating on pageheap
310 const size_t npages
= Static::sizemap()->class_to_pages(size_class_
);
314 SpinLockHolder
h(Static::pageheap_lock());
315 span
= Static::pageheap()->New(npages
);
316 if (span
) Static::pageheap()->RegisterSizeClass(span
, size_class_
);
319 Log(kLog
, __FILE__
, __LINE__
,
320 "tcmalloc: allocation failed", npages
<< kPageShift
);
324 ASSERT(span
->length
== npages
);
325 // Cache sizeclass info eagerly. Locking is not necessary.
326 // (Instead of being eager, we could just replace any stale info
327 // about this span, but that seems to be no better in practice.)
328 for (int i
= 0; i
< npages
; i
++) {
329 Static::pageheap()->CacheSizeClass(span
->start
+ i
, size_class_
);
332 // Split the block into pieces and add to the free-list
333 // TODO: coloring of objects to avoid cache conflicts?
335 char* ptr
= reinterpret_cast<char*>(span
->start
<< kPageShift
);
336 char* limit
= ptr
+ (npages
<< kPageShift
);
337 const size_t size
= Static::sizemap()->ByteSizeForClass(size_class_
);
339 while (ptr
+ size
<= limit
) {
344 ASSERT(ptr
<= limit
);
345 span
->objects
= list
;
346 span
->refcount
= 0; // No sub-object in use yet
348 // Add span to list of non-empty spans
350 tcmalloc::DLL_Prepend(&nonempty_
, span
);
355 int CentralFreeList::tc_length() {
356 SpinLockHolder
h(&lock_
);
357 return used_slots_
* Static::sizemap()->num_objects_to_move(size_class_
);
360 size_t CentralFreeList::OverheadBytes() {
361 SpinLockHolder
h(&lock_
);
362 if (size_class_
== 0) { // 0 holds the 0-sized allocations
365 const size_t pages_per_span
= Static::sizemap()->class_to_pages(size_class_
);
366 const size_t object_size
= Static::sizemap()->class_to_size(size_class_
);
367 ASSERT(object_size
> 0);
368 const size_t overhead_per_span
= (pages_per_span
* kPageSize
) % object_size
;
369 return num_spans_
* overhead_per_span
;
372 } // namespace tcmalloc