Restore variadic macros in DevToolsEmbedderMessageDispatcher
[chromium-blink-merge.git] / third_party / tcmalloc / vendor / src / central_freelist.cc
blob87a1825c02b5011aec3166436d4e7ca16e013164
1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // ---
31 // Author: Sanjay Ghemawat <opensource@google.com>
33 #include "config.h"
34 #include <algorithm>
35 #include "central_freelist.h"
36 #include "internal_logging.h" // for ASSERT, MESSAGE
37 #include "linked_list.h" // for SLL_Next, SLL_Push, etc
38 #include "page_heap.h" // for PageHeap
39 #include "static_vars.h" // for Static
41 using std::min;
42 using std::max;
44 namespace tcmalloc {
46 void CentralFreeList::Init(size_t cl) {
47 size_class_ = cl;
48 tcmalloc::DLL_Init(&empty_);
49 tcmalloc::DLL_Init(&nonempty_);
50 num_spans_ = 0;
51 counter_ = 0;
53 max_cache_size_ = kMaxNumTransferEntries;
54 #ifdef TCMALLOC_SMALL_BUT_SLOW
55 // Disable the transfer cache for the small footprint case.
56 cache_size_ = 0;
57 #else
58 cache_size_ = 16;
59 #endif
60 if (cl > 0) {
61 // Limit the maximum size of the cache based on the size class. If this
62 // is not done, large size class objects will consume a lot of memory if
63 // they just sit in the transfer cache.
64 int32_t bytes = Static::sizemap()->ByteSizeForClass(cl);
65 int32_t objs_to_move = Static::sizemap()->num_objects_to_move(cl);
67 ASSERT(objs_to_move > 0 && bytes > 0);
68 // Limit each size class cache to at most 1MB of objects or one entry,
69 // whichever is greater. Total transfer cache memory used across all
70 // size classes then can't be greater than approximately
71 // 1MB * kMaxNumTransferEntries.
72 // min and max are in parens to avoid macro-expansion on windows.
73 max_cache_size_ = (min)(max_cache_size_,
74 (max)(1, (1024 * 1024) / (bytes * objs_to_move)));
75 cache_size_ = (min)(cache_size_, max_cache_size_);
77 used_slots_ = 0;
78 ASSERT(cache_size_ <= max_cache_size_);
81 void CentralFreeList::ReleaseListToSpans(void* start) {
82 while (start) {
83 void *next = SLL_Next(start);
84 ReleaseToSpans(start);
85 start = next;
89 // MapObjectToSpan should logically be part of ReleaseToSpans. But
90 // this triggers an optimization bug in gcc 4.5.0. Moving to a
91 // separate function, and making sure that function isn't inlined,
92 // seems to fix the problem. It also should be fixed for gcc 4.5.1.
93 static
94 #if __GNUC__ == 4 && __GNUC_MINOR__ == 5 && __GNUC_PATCHLEVEL__ == 0
95 __attribute__ ((noinline))
96 #endif
97 Span* MapObjectToSpan(void* object) {
98 const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
99 Span* span = Static::pageheap()->GetDescriptor(p);
100 return span;
103 void CentralFreeList::ReleaseToSpans(void* object) {
104 Span* span = MapObjectToSpan(object);
105 ASSERT(span != NULL);
106 ASSERT(span->refcount > 0);
108 // If span is empty, move it to non-empty list
109 if (span->objects == NULL) {
110 tcmalloc::DLL_Remove(span);
111 tcmalloc::DLL_Prepend(&nonempty_, span);
112 Event(span, 'N', 0);
115 // The following check is expensive, so it is disabled by default
116 if (false) {
117 // Check that object does not occur in list
118 int got = 0;
119 for (void* p = span->objects; p != NULL; p = *((void**) p)) {
120 ASSERT(p != object);
121 got++;
123 ASSERT(got + span->refcount ==
124 (span->length<<kPageShift) /
125 Static::sizemap()->ByteSizeForClass(span->sizeclass));
128 counter_++;
129 span->refcount--;
130 if (span->refcount == 0) {
131 Event(span, '#', 0);
132 counter_ -= ((span->length<<kPageShift) /
133 Static::sizemap()->ByteSizeForClass(span->sizeclass));
134 tcmalloc::DLL_Remove(span);
135 --num_spans_;
137 // Release central list lock while operating on pageheap
138 lock_.Unlock();
140 SpinLockHolder h(Static::pageheap_lock());
141 Static::pageheap()->Delete(span);
143 lock_.Lock();
144 } else {
145 *(reinterpret_cast<void**>(object)) = span->objects;
146 span->objects = object;
150 bool CentralFreeList::EvictRandomSizeClass(
151 int locked_size_class, bool force) {
152 static int race_counter = 0;
153 int t = race_counter++; // Updated without a lock, but who cares.
154 if (t >= kNumClasses) {
155 while (t >= kNumClasses) {
156 t -= kNumClasses;
158 race_counter = t;
160 ASSERT(t >= 0);
161 ASSERT(t < kNumClasses);
162 if (t == locked_size_class) return false;
163 return Static::central_cache()[t].ShrinkCache(locked_size_class, force);
166 bool CentralFreeList::MakeCacheSpace() {
167 // Is there room in the cache?
168 if (used_slots_ < cache_size_) return true;
169 // Check if we can expand this cache?
170 if (cache_size_ == max_cache_size_) return false;
171 // Ok, we'll try to grab an entry from some other size class.
172 if (EvictRandomSizeClass(size_class_, false) ||
173 EvictRandomSizeClass(size_class_, true)) {
174 // Succeeded in evicting, we're going to make our cache larger.
175 // However, we may have dropped and re-acquired the lock in
176 // EvictRandomSizeClass (via ShrinkCache and the LockInverter), so the
177 // cache_size may have changed. Therefore, check and verify that it is
178 // still OK to increase the cache_size.
179 if (cache_size_ < max_cache_size_) {
180 cache_size_++;
181 return true;
184 return false;
188 namespace {
189 class LockInverter {
190 private:
191 SpinLock *held_, *temp_;
192 public:
193 inline explicit LockInverter(SpinLock* held, SpinLock *temp)
194 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
195 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
199 // This function is marked as NO_THREAD_SAFETY_ANALYSIS because it uses
200 // LockInverter to release one lock and acquire another in scoped-lock
201 // style, which our current annotation/analysis does not support.
202 bool CentralFreeList::ShrinkCache(int locked_size_class, bool force)
203 NO_THREAD_SAFETY_ANALYSIS {
204 // Start with a quick check without taking a lock.
205 if (cache_size_ == 0) return false;
206 // We don't evict from a full cache unless we are 'forcing'.
207 if (force == false && used_slots_ == cache_size_) return false;
209 // Grab lock, but first release the other lock held by this thread. We use
210 // the lock inverter to ensure that we never hold two size class locks
211 // concurrently. That can create a deadlock because there is no well
212 // defined nesting order.
213 LockInverter li(&Static::central_cache()[locked_size_class].lock_, &lock_);
214 ASSERT(used_slots_ <= cache_size_);
215 ASSERT(0 <= cache_size_);
216 if (cache_size_ == 0) return false;
217 if (used_slots_ == cache_size_) {
218 if (force == false) return false;
219 // ReleaseListToSpans releases the lock, so we have to make all the
220 // updates to the central list before calling it.
221 cache_size_--;
222 used_slots_--;
223 ReleaseListToSpans(tc_slots_[used_slots_].head);
224 return true;
226 cache_size_--;
227 return true;
230 void CentralFreeList::InsertRange(void *start, void *end, int N) {
231 SpinLockHolder h(&lock_);
232 if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
233 MakeCacheSpace()) {
234 int slot = used_slots_++;
235 ASSERT(slot >=0);
236 ASSERT(slot < max_cache_size_);
237 TCEntry *entry = &tc_slots_[slot];
238 entry->head = start;
239 entry->tail = end;
240 return;
242 ReleaseListToSpans(start);
245 int CentralFreeList::RemoveRange(void **start, void **end, int N) {
246 ASSERT(N > 0);
247 lock_.Lock();
248 if (N == Static::sizemap()->num_objects_to_move(size_class_) &&
249 used_slots_ > 0) {
250 int slot = --used_slots_;
251 ASSERT(slot >= 0);
252 TCEntry *entry = &tc_slots_[slot];
253 *start = entry->head;
254 *end = entry->tail;
255 lock_.Unlock();
256 return N;
259 int result = 0;
260 void* head = NULL;
261 void* tail = NULL;
262 // TODO: Prefetch multiple TCEntries?
263 tail = FetchFromSpansSafe();
264 if (tail != NULL) {
265 SLL_SetNext(tail, NULL);
266 head = tail;
267 result = 1;
268 while (result < N) {
269 void *t = FetchFromSpans();
270 if (!t) break;
271 SLL_Push(&head, t);
272 result++;
275 lock_.Unlock();
276 *start = head;
277 *end = tail;
278 return result;
282 void* CentralFreeList::FetchFromSpansSafe() {
283 void *t = FetchFromSpans();
284 if (!t) {
285 Populate();
286 t = FetchFromSpans();
288 return t;
291 void* CentralFreeList::FetchFromSpans() {
292 if (tcmalloc::DLL_IsEmpty(&nonempty_)) return NULL;
293 Span* span = nonempty_.next;
295 ASSERT(span->objects != NULL);
296 span->refcount++;
297 void* result = span->objects;
298 span->objects = *(reinterpret_cast<void**>(result));
299 if (span->objects == NULL) {
300 // Move to empty list
301 tcmalloc::DLL_Remove(span);
302 tcmalloc::DLL_Prepend(&empty_, span);
303 Event(span, 'E', 0);
305 counter_--;
306 return result;
309 // Fetch memory from the system and add to the central cache freelist.
310 void CentralFreeList::Populate() {
311 // Release central list lock while operating on pageheap
312 lock_.Unlock();
313 const size_t npages = Static::sizemap()->class_to_pages(size_class_);
315 Span* span;
317 SpinLockHolder h(Static::pageheap_lock());
318 span = Static::pageheap()->New(npages);
319 if (span) Static::pageheap()->RegisterSizeClass(span, size_class_);
321 if (span == NULL) {
322 Log(kLog, __FILE__, __LINE__,
323 "tcmalloc: allocation failed", npages << kPageShift);
324 lock_.Lock();
325 return;
327 ASSERT(span->length == npages);
328 // Cache sizeclass info eagerly. Locking is not necessary.
329 // (Instead of being eager, we could just replace any stale info
330 // about this span, but that seems to be no better in practice.)
331 for (int i = 0; i < npages; i++) {
332 Static::pageheap()->CacheSizeClass(span->start + i, size_class_);
335 // Split the block into pieces and add to the free-list
336 // TODO: coloring of objects to avoid cache conflicts?
337 void** tail = &span->objects;
338 char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
339 char* limit = ptr + (npages << kPageShift);
340 const size_t size = Static::sizemap()->ByteSizeForClass(size_class_);
341 int num = 0;
342 while (ptr + size <= limit) {
343 *tail = ptr;
344 tail = reinterpret_cast<void**>(ptr);
345 ptr += size;
346 num++;
348 ASSERT(ptr <= limit);
349 *tail = NULL;
350 span->refcount = 0; // No sub-object in use yet
352 // Add span to list of non-empty spans
353 lock_.Lock();
354 tcmalloc::DLL_Prepend(&nonempty_, span);
355 ++num_spans_;
356 counter_ += num;
359 int CentralFreeList::tc_length() {
360 SpinLockHolder h(&lock_);
361 return used_slots_ * Static::sizemap()->num_objects_to_move(size_class_);
364 size_t CentralFreeList::OverheadBytes() {
365 SpinLockHolder h(&lock_);
366 if (size_class_ == 0) { // 0 holds the 0-sized allocations
367 return 0;
369 const size_t pages_per_span = Static::sizemap()->class_to_pages(size_class_);
370 const size_t object_size = Static::sizemap()->class_to_size(size_class_);
371 ASSERT(object_size > 0);
372 const size_t overhead_per_span = (pages_per_span * kPageSize) % object_size;
373 return num_spans_ * overhead_per_span;
376 } // namespace tcmalloc