Restore variadic macros in DevToolsEmbedderMessageDispatcher
[chromium-blink-merge.git] / third_party / tcmalloc / vendor / src / page_heap.h
blob37188010a6fb62562cb7bc1437696ab1c5f23494
1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // ---
31 // Author: Sanjay Ghemawat <opensource@google.com>
33 #ifndef TCMALLOC_PAGE_HEAP_H_
34 #define TCMALLOC_PAGE_HEAP_H_
36 #include <config.h>
37 #include <stddef.h> // for size_t
38 #ifdef HAVE_STDINT_H
39 #include <stdint.h> // for uint64_t, int64_t, uint16_t
40 #endif
41 #include <gperftools/malloc_extension.h>
42 #include "base/basictypes.h"
43 #include "common.h"
44 #include "packed-cache-inl.h"
45 #include "pagemap.h"
46 #include "span.h"
48 // We need to dllexport PageHeap just for the unittest. MSVC complains
49 // that we don't dllexport the PageHeap members, but we don't need to
50 // test those, so I just suppress this warning.
51 #ifdef _MSC_VER
52 #pragma warning(push)
53 #pragma warning(disable:4251)
54 #endif
56 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
57 // you're porting to a system where you really can't get a stacktrace.
58 // Because we control the definition of GetStackTrace, all clients of
59 // GetStackTrace should #include us rather than stacktrace.h.
60 #ifdef NO_TCMALLOC_SAMPLES
61 // We use #define so code compiles even if you #include stacktrace.h somehow.
62 # define GetStackTrace(stack, depth, skip) (0)
63 #else
64 # include <gperftools/stacktrace.h>
65 #endif
67 namespace base {
68 struct MallocRange;
71 namespace tcmalloc {
73 // -------------------------------------------------------------------------
74 // Map from page-id to per-page data
75 // -------------------------------------------------------------------------
77 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
78 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
79 // because sometimes the sizeclass is all the information we need.
81 // Selector class -- general selector uses 3-level map
82 template <int BITS> class MapSelector {
83 public:
84 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
85 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
88 // A two-level map for 32-bit machines
89 template <> class MapSelector<32> {
90 public:
91 typedef TCMalloc_PageMap2<32-kPageShift> Type;
92 typedef PackedCache<32-kPageShift, uint16_t> CacheType;
95 // -------------------------------------------------------------------------
96 // Page-level allocator
97 // * Eager coalescing
99 // Heap for page-level allocation. We allow allocating and freeing a
100 // contiguous runs of pages (called a "span").
101 // -------------------------------------------------------------------------
103 class PERFTOOLS_DLL_DECL PageHeap {
104 public:
105 PageHeap();
107 // Allocate a run of "n" pages. Returns zero if out of memory.
108 // Caller should not pass "n == 0" -- instead, n should have
109 // been rounded up already.
110 Span* New(Length n);
112 // Delete the span "[p, p+n-1]".
113 // REQUIRES: span was returned by earlier call to New() and
114 // has not yet been deleted.
115 void Delete(Span* span);
117 // Mark an allocated span as being used for small objects of the
118 // specified size-class.
119 // REQUIRES: span was returned by an earlier call to New()
120 // and has not yet been deleted.
121 void RegisterSizeClass(Span* span, size_t sc);
123 // Split an allocated span into two spans: one of length "n" pages
124 // followed by another span of length "span->length - n" pages.
125 // Modifies "*span" to point to the first span of length "n" pages.
126 // Returns a pointer to the second span.
128 // REQUIRES: "0 < n < span->length"
129 // REQUIRES: span->location == IN_USE
130 // REQUIRES: span->sizeclass == 0
131 Span* Split(Span* span, Length n);
133 // Return the descriptor for the specified page. Returns NULL if
134 // this PageID was not allocated previously.
135 inline Span* GetDescriptor(PageID p) const {
136 return reinterpret_cast<Span*>(pagemap_.get(p));
139 // If this page heap is managing a range with starting page # >= start,
140 // store info about the range in *r and return true. Else return false.
141 bool GetNextRange(PageID start, base::MallocRange* r);
143 // Page heap statistics
144 struct Stats {
145 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {}
146 uint64_t system_bytes; // Total bytes allocated from system
147 uint64_t free_bytes; // Total bytes on normal freelists
148 uint64_t unmapped_bytes; // Total bytes on returned freelists
150 inline Stats stats() const { return stats_; }
152 struct SmallSpanStats {
153 // For each free list of small spans, the length (in spans) of the
154 // normal and returned free lists for that size.
155 int64 normal_length[kMaxPages];
156 int64 returned_length[kMaxPages];
158 void GetSmallSpanStats(SmallSpanStats* result);
160 // Stats for free large spans (i.e., spans with more than kMaxPages pages).
161 struct LargeSpanStats {
162 int64 spans; // Number of such spans
163 int64 normal_pages; // Combined page length of normal large spans
164 int64 returned_pages; // Combined page length of unmapped spans
166 void GetLargeSpanStats(LargeSpanStats* result);
168 bool Check();
169 // Like Check() but does some more comprehensive checking.
170 bool CheckExpensive();
171 bool CheckList(Span* list, Length min_pages, Length max_pages,
172 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
174 // Try to release at least num_pages for reuse by the OS. Returns
175 // the actual number of pages released, which may be less than
176 // num_pages if there weren't enough pages to release. The result
177 // may also be larger than num_pages since page_heap might decide to
178 // release one large range instead of fragmenting it into two
179 // smaller released and unreleased ranges.
180 Length ReleaseAtLeastNPages(Length num_pages);
182 // Return 0 if we have no information, or else the correct sizeclass for p.
183 // Reads and writes to pagemap_cache_ do not require locking.
184 // The entries are 64 bits on 64-bit hardware and 16 bits on
185 // 32-bit hardware, and we don't mind raciness as long as each read of
186 // an entry yields a valid entry, not a partially updated entry.
187 size_t GetSizeClassIfCached(PageID p) const {
188 return pagemap_cache_.GetOrDefault(p, 0);
190 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
192 private:
193 // Allocates a big block of memory for the pagemap once we reach more than
194 // 128MB
195 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
197 // Minimum number of pages to fetch from system at a time. Must be
198 // significantly bigger than kBlockSize to amortize system-call
199 // overhead, and also to reduce external fragementation. Also, we
200 // should keep this value big because various incarnations of Linux
201 // have small limits on the number of mmap() regions per
202 // address-space.
203 // REQUIRED: kMinSystemAlloc <= kMaxPages;
204 static const int kMinSystemAlloc = kMaxPages;
206 // Never delay scavenging for more than the following number of
207 // deallocated pages. With 4K pages, this comes to 4GB of
208 // deallocation.
209 static const int kMaxReleaseDelay = 1 << 20;
211 // If there is nothing to release, wait for so many pages before
212 // scavenging again. With 4K pages, this comes to 1GB of memory.
213 static const int kDefaultReleaseDelay = 1 << 18;
215 // Pick the appropriate map and cache types based on pointer size
216 typedef MapSelector<kAddressBits>::Type PageMap;
217 typedef MapSelector<kAddressBits>::CacheType PageMapCache;
218 PageMap pagemap_;
219 mutable PageMapCache pagemap_cache_;
221 // We segregate spans of a given size into two circular linked
222 // lists: one for normal spans, and one for spans whose memory
223 // has been returned to the system.
224 struct SpanList {
225 Span normal;
226 Span returned;
229 // List of free spans of length >= kMaxPages
230 SpanList large_;
232 // Array mapping from span length to a doubly linked list of free spans
233 SpanList free_[kMaxPages];
235 // Statistics on system, free, and unmapped bytes
236 Stats stats_;
238 Span* SearchFreeAndLargeLists(Length n);
240 bool GrowHeap(Length n);
242 // REQUIRES: span->length >= n
243 // REQUIRES: span->location != IN_USE
244 // Remove span from its free list, and move any leftover part of
245 // span into appropriate free lists. Also update "span" to have
246 // length exactly "n" and mark it as non-free so it can be returned
247 // to the client. After all that, decrease free_pages_ by n and
248 // return span.
249 Span* Carve(Span* span, Length n);
251 void RecordSpan(Span* span) {
252 pagemap_.set(span->start, span);
253 if (span->length > 1) {
254 pagemap_.set(span->start + span->length - 1, span);
258 // Allocate a large span of length == n. If successful, returns a
259 // span of exactly the specified length. Else, returns NULL.
260 Span* AllocLarge(Length n);
262 // Coalesce span with neighboring spans if possible, prepend to
263 // appropriate free list, and adjust stats.
264 void MergeIntoFreeList(Span* span);
266 // Prepends span to appropriate free list, and adjusts stats.
267 void PrependToFreeList(Span* span);
269 // Removes span from its free list, and adjust stats.
270 void RemoveFromFreeList(Span* span);
272 // Incrementally release some memory to the system.
273 // IncrementalScavenge(n) is called whenever n pages are freed.
274 void IncrementalScavenge(Length n);
276 // Release the last span on the normal portion of this list.
277 // Return the length of that span.
278 Length ReleaseLastNormalSpan(SpanList* slist);
281 // Number of pages to deallocate before doing more scavenging
282 int64_t scavenge_counter_;
284 // Index of last free list where we released memory to the OS.
285 int release_index_;
288 } // namespace tcmalloc
290 #ifdef _MSC_VER
291 #pragma warning(pop)
292 #endif
294 #endif // TCMALLOC_PAGE_HEAP_H_