1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sanjay Ghemawat <opensource@google.com>
34 #ifdef HAVE_INTTYPES_H
35 #include <inttypes.h> // for PRIuPTR
37 #include <gperftools/malloc_extension.h> // for MallocRange, etc
38 #include "base/basictypes.h"
39 #include "base/commandlineflags.h"
40 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc
41 #include "page_heap_allocator.h" // for PageHeapAllocator
42 #include "static_vars.h" // for Static
43 #include "system-alloc.h" // for TCMalloc_SystemAlloc, etc
45 DEFINE_double(tcmalloc_release_rate
,
46 EnvToDouble("TCMALLOC_RELEASE_RATE", 1.0),
47 "Rate at which we release unused memory to the system. "
48 "Zero means we never release memory back to the system. "
49 "Increase this flag to return memory faster; decrease it "
50 "to return memory slower. Reasonable rates are in the "
56 : pagemap_(MetaDataAlloc
),
59 // Start scavenging at kMaxPages list
60 release_index_(kMaxPages
) {
61 COMPILE_ASSERT(kNumClasses
<= (1 << PageMapCache::kValuebits
), valuebits
);
62 DLL_Init(&large_
.normal
);
63 DLL_Init(&large_
.returned
);
64 for (int i
= 0; i
< kMaxPages
; i
++) {
65 DLL_Init(&free_
[i
].normal
);
66 DLL_Init(&free_
[i
].returned
);
70 Span
* PageHeap::SearchFreeAndLargeLists(Length n
) {
74 // Find first size >= n that has a non-empty list
75 for (Length s
= n
; s
< kMaxPages
; s
++) {
76 Span
* ll
= &free_
[s
].normal
;
77 // If we're lucky, ll is non-empty, meaning it has a suitable span.
78 if (!DLL_IsEmpty(ll
)) {
79 ASSERT(ll
->next
->location
== Span::ON_NORMAL_FREELIST
);
80 return Carve(ll
->next
, n
);
82 // Alternatively, maybe there's a usable returned span.
83 ll
= &free_
[s
].returned
;
84 if (!DLL_IsEmpty(ll
)) {
85 ASSERT(ll
->next
->location
== Span::ON_RETURNED_FREELIST
);
86 return Carve(ll
->next
, n
);
89 // No luck in free lists, our last chance is in a larger class.
90 return AllocLarge(n
); // May be NULL
93 Span
* PageHeap::New(Length n
) {
97 Span
* result
= SearchFreeAndLargeLists(n
);
101 // Grow the heap and try again.
106 return SearchFreeAndLargeLists(n
);
109 Span
* PageHeap::AllocLarge(Length n
) {
110 // find the best span (closest to n in size).
111 // The following loops implements address-ordered best-fit.
114 // Search through normal list
115 for (Span
* span
= large_
.normal
.next
;
116 span
!= &large_
.normal
;
118 if (span
->length
>= n
) {
120 || (span
->length
< best
->length
)
121 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
123 ASSERT(best
->location
== Span::ON_NORMAL_FREELIST
);
128 // Search through released list in case it has a better fit
129 for (Span
* span
= large_
.returned
.next
;
130 span
!= &large_
.returned
;
132 if (span
->length
>= n
) {
134 || (span
->length
< best
->length
)
135 || ((span
->length
== best
->length
) && (span
->start
< best
->start
))) {
137 ASSERT(best
->location
== Span::ON_RETURNED_FREELIST
);
142 return best
== NULL
? NULL
: Carve(best
, n
);
145 Span
* PageHeap::Split(Span
* span
, Length n
) {
147 ASSERT(n
< span
->length
);
148 ASSERT(span
->location
== Span::IN_USE
);
149 ASSERT(span
->sizeclass
== 0);
152 const int extra
= span
->length
- n
;
153 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
154 ASSERT(leftover
->location
== Span::IN_USE
);
155 Event(leftover
, 'U', extra
);
156 RecordSpan(leftover
);
157 pagemap_
.set(span
->start
+ n
- 1, span
); // Update map from pageid to span
163 Span
* PageHeap::Carve(Span
* span
, Length n
) {
165 ASSERT(span
->location
!= Span::IN_USE
);
166 const int old_location
= span
->location
;
167 RemoveFromFreeList(span
);
168 span
->location
= Span::IN_USE
;
171 const int extra
= span
->length
- n
;
174 Span
* leftover
= NewSpan(span
->start
+ n
, extra
);
175 leftover
->location
= old_location
;
176 Event(leftover
, 'S', extra
);
177 RecordSpan(leftover
);
178 PrependToFreeList(leftover
); // Skip coalescing - no candidates possible
180 pagemap_
.set(span
->start
+ n
- 1, span
);
186 void PageHeap::Delete(Span
* span
) {
188 ASSERT(span
->location
== Span::IN_USE
);
189 ASSERT(span
->length
> 0);
190 ASSERT(GetDescriptor(span
->start
) == span
);
191 ASSERT(GetDescriptor(span
->start
+ span
->length
- 1) == span
);
192 const Length n
= span
->length
;
195 span
->location
= Span::ON_NORMAL_FREELIST
;
196 Event(span
, 'D', span
->length
);
197 MergeIntoFreeList(span
); // Coalesces if possible
198 IncrementalScavenge(n
);
202 void PageHeap::MergeIntoFreeList(Span
* span
) {
203 ASSERT(span
->location
!= Span::IN_USE
);
205 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
206 // necessary. We do not bother resetting the stale pagemap
207 // entries for the pieces we are merging together because we only
208 // care about the pagemap entries for the boundaries.
210 // Note that only similar spans are merged together. For example,
211 // we do not coalesce "returned" spans with "normal" spans.
212 const PageID p
= span
->start
;
213 const Length n
= span
->length
;
214 Span
* prev
= GetDescriptor(p
-1);
215 if (prev
!= NULL
&& prev
->location
== span
->location
) {
216 // Merge preceding span into this span
217 ASSERT(prev
->start
+ prev
->length
== p
);
218 const Length len
= prev
->length
;
219 RemoveFromFreeList(prev
);
223 pagemap_
.set(span
->start
, span
);
224 Event(span
, 'L', len
);
226 Span
* next
= GetDescriptor(p
+n
);
227 if (next
!= NULL
&& next
->location
== span
->location
) {
228 // Merge next span into this span
229 ASSERT(next
->start
== p
+n
);
230 const Length len
= next
->length
;
231 RemoveFromFreeList(next
);
234 pagemap_
.set(span
->start
+ span
->length
- 1, span
);
235 Event(span
, 'R', len
);
238 PrependToFreeList(span
);
241 void PageHeap::PrependToFreeList(Span
* span
) {
242 ASSERT(span
->location
!= Span::IN_USE
);
243 SpanList
* list
= (span
->length
< kMaxPages
) ? &free_
[span
->length
] : &large_
;
244 if (span
->location
== Span::ON_NORMAL_FREELIST
) {
245 stats_
.free_bytes
+= (span
->length
<< kPageShift
);
246 DLL_Prepend(&list
->normal
, span
);
248 stats_
.unmapped_bytes
+= (span
->length
<< kPageShift
);
249 DLL_Prepend(&list
->returned
, span
);
253 void PageHeap::RemoveFromFreeList(Span
* span
) {
254 ASSERT(span
->location
!= Span::IN_USE
);
255 if (span
->location
== Span::ON_NORMAL_FREELIST
) {
256 stats_
.free_bytes
-= (span
->length
<< kPageShift
);
258 stats_
.unmapped_bytes
-= (span
->length
<< kPageShift
);
263 void PageHeap::IncrementalScavenge(Length n
) {
264 // Fast path; not yet time to release memory
265 scavenge_counter_
-= n
;
266 if (scavenge_counter_
>= 0) return; // Not yet time to scavenge
268 const double rate
= FLAGS_tcmalloc_release_rate
;
270 // Tiny release rate means that releasing is disabled.
271 scavenge_counter_
= kDefaultReleaseDelay
;
275 Length released_pages
= ReleaseAtLeastNPages(1);
277 if (released_pages
== 0) {
278 // Nothing to scavenge, delay for a while.
279 scavenge_counter_
= kDefaultReleaseDelay
;
281 // Compute how long to wait until we return memory.
282 // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
283 // after releasing one page.
284 const double mult
= 1000.0 / rate
;
285 double wait
= mult
* static_cast<double>(released_pages
);
286 if (wait
> kMaxReleaseDelay
) {
287 // Avoid overflow and bound to reasonable range.
288 wait
= kMaxReleaseDelay
;
290 scavenge_counter_
= static_cast<int64_t>(wait
);
294 Length
PageHeap::ReleaseLastNormalSpan(SpanList
* slist
) {
295 Span
* s
= slist
->normal
.prev
;
296 ASSERT(s
->location
== Span::ON_NORMAL_FREELIST
);
297 RemoveFromFreeList(s
);
298 const Length n
= s
->length
;
299 TCMalloc_SystemRelease(reinterpret_cast<void*>(s
->start
<< kPageShift
),
300 static_cast<size_t>(s
->length
<< kPageShift
));
301 s
->location
= Span::ON_RETURNED_FREELIST
;
302 MergeIntoFreeList(s
); // Coalesces if possible.
306 Length
PageHeap::ReleaseAtLeastNPages(Length num_pages
) {
307 Length released_pages
= 0;
308 Length prev_released_pages
= -1;
310 // Round robin through the lists of free spans, releasing the last
311 // span in each list. Stop after releasing at least num_pages.
312 while (released_pages
< num_pages
) {
313 if (released_pages
== prev_released_pages
) {
314 // Last iteration of while loop made no progress.
317 prev_released_pages
= released_pages
;
319 for (int i
= 0; i
< kMaxPages
+1 && released_pages
< num_pages
;
320 i
++, release_index_
++) {
321 if (release_index_
> kMaxPages
) release_index_
= 0;
322 SpanList
* slist
= (release_index_
== kMaxPages
) ?
323 &large_
: &free_
[release_index_
];
324 if (!DLL_IsEmpty(&slist
->normal
)) {
325 Length released_len
= ReleaseLastNormalSpan(slist
);
326 released_pages
+= released_len
;
330 return released_pages
;
333 void PageHeap::RegisterSizeClass(Span
* span
, size_t sc
) {
334 // Associate span object with all interior pages as well
335 ASSERT(span
->location
== Span::IN_USE
);
336 ASSERT(GetDescriptor(span
->start
) == span
);
337 ASSERT(GetDescriptor(span
->start
+span
->length
-1) == span
);
338 Event(span
, 'C', sc
);
339 span
->sizeclass
= sc
;
340 for (Length i
= 1; i
< span
->length
-1; i
++) {
341 pagemap_
.set(span
->start
+i
, span
);
345 void PageHeap::GetSmallSpanStats(SmallSpanStats
* result
) {
346 for (int s
= 0; s
< kMaxPages
; s
++) {
347 result
->normal_length
[s
] = DLL_Length(&free_
[s
].normal
);
348 result
->returned_length
[s
] = DLL_Length(&free_
[s
].returned
);
352 void PageHeap::GetLargeSpanStats(LargeSpanStats
* result
) {
354 result
->normal_pages
= 0;
355 result
->returned_pages
= 0;
356 for (Span
* s
= large_
.normal
.next
; s
!= &large_
.normal
; s
= s
->next
) {
357 result
->normal_pages
+= s
->length
;;
360 for (Span
* s
= large_
.returned
.next
; s
!= &large_
.returned
; s
= s
->next
) {
361 result
->returned_pages
+= s
->length
;
366 bool PageHeap::GetNextRange(PageID start
, base::MallocRange
* r
) {
367 Span
* span
= reinterpret_cast<Span
*>(pagemap_
.Next(start
));
371 r
->address
= span
->start
<< kPageShift
;
372 r
->length
= span
->length
<< kPageShift
;
374 switch (span
->location
) {
376 r
->type
= base::MallocRange::INUSE
;
378 if (span
->sizeclass
> 0) {
379 // Only some of the objects in this span may be in use.
380 const size_t osize
= Static::sizemap()->class_to_size(span
->sizeclass
);
381 r
->fraction
= (1.0 * osize
* span
->refcount
) / r
->length
;
384 case Span::ON_NORMAL_FREELIST
:
385 r
->type
= base::MallocRange::FREE
;
387 case Span::ON_RETURNED_FREELIST
:
388 r
->type
= base::MallocRange::UNMAPPED
;
391 r
->type
= base::MallocRange::UNKNOWN
;
397 static void RecordGrowth(size_t growth
) {
398 StackTrace
* t
= Static::stacktrace_allocator()->New();
399 t
->depth
= GetStackTrace(t
->stack
, kMaxStackDepth
-1, 3);
401 t
->stack
[kMaxStackDepth
-1] = reinterpret_cast<void*>(Static::growth_stacks());
402 Static::set_growth_stacks(t
);
405 bool PageHeap::GrowHeap(Length n
) {
406 ASSERT(kMaxPages
>= kMinSystemAlloc
);
407 if (n
> kMaxValidPages
) return false;
408 Length ask
= (n
>kMinSystemAlloc
) ? n
: static_cast<Length
>(kMinSystemAlloc
);
410 void* ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
413 // Try growing just "n" pages
415 ptr
= TCMalloc_SystemAlloc(ask
<< kPageShift
, &actual_size
, kPageSize
);
417 if (ptr
== NULL
) return false;
419 ask
= actual_size
>> kPageShift
;
420 RecordGrowth(ask
<< kPageShift
);
422 uint64_t old_system_bytes
= stats_
.system_bytes
;
423 stats_
.system_bytes
+= (ask
<< kPageShift
);
424 const PageID p
= reinterpret_cast<uintptr_t>(ptr
) >> kPageShift
;
427 // If we have already a lot of pages allocated, just pre allocate a bunch of
428 // memory for the page map. This prevents fragmentation by pagemap metadata
429 // when a program keeps allocating and freeing large blocks.
431 if (old_system_bytes
< kPageMapBigAllocationThreshold
432 && stats_
.system_bytes
>= kPageMapBigAllocationThreshold
) {
433 pagemap_
.PreallocateMoreMemory();
436 // Make sure pagemap_ has entries for all of the new pages.
437 // Plus ensure one before and one after so coalescing code
438 // does not need bounds-checking.
439 if (pagemap_
.Ensure(p
-1, ask
+2)) {
440 // Pretend the new area is allocated and then Delete() it to cause
441 // any necessary coalescing to occur.
442 Span
* span
= NewSpan(p
, ask
);
448 // We could not allocate memory within "pagemap_"
449 // TODO: Once we can return memory to the system, return the new span
454 bool PageHeap::Check() {
455 ASSERT(free_
[0].normal
.next
== &free_
[0].normal
);
456 ASSERT(free_
[0].returned
.next
== &free_
[0].returned
);
460 bool PageHeap::CheckExpensive() {
461 bool result
= Check();
462 CheckList(&large_
.normal
, kMaxPages
, 1000000000, Span::ON_NORMAL_FREELIST
);
463 CheckList(&large_
.returned
, kMaxPages
, 1000000000, Span::ON_RETURNED_FREELIST
);
464 for (Length s
= 1; s
< kMaxPages
; s
++) {
465 CheckList(&free_
[s
].normal
, s
, s
, Span::ON_NORMAL_FREELIST
);
466 CheckList(&free_
[s
].returned
, s
, s
, Span::ON_RETURNED_FREELIST
);
471 bool PageHeap::CheckList(Span
* list
, Length min_pages
, Length max_pages
,
473 for (Span
* s
= list
->next
; s
!= list
; s
= s
->next
) {
474 CHECK_CONDITION(s
->location
== freelist
); // NORMAL or RETURNED
475 CHECK_CONDITION(s
->length
>= min_pages
);
476 CHECK_CONDITION(s
->length
<= max_pages
);
477 CHECK_CONDITION(GetDescriptor(s
->start
) == s
);
478 CHECK_CONDITION(GetDescriptor(s
->start
+s
->length
-1) == s
);
483 } // namespace tcmalloc