Separate Simple Backend creation from initialization.
[chromium-blink-merge.git] / third_party / tcmalloc / chromium / src / page_heap.h
blob9376a66746f41bcb529c3da6fd3fdb00e3882692
1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // ---
31 // Author: Sanjay Ghemawat <opensource@google.com>
33 #ifndef TCMALLOC_PAGE_HEAP_H_
34 #define TCMALLOC_PAGE_HEAP_H_
36 #include <config.h>
37 #include <stddef.h> // for size_t
38 #ifdef HAVE_STDINT_H
39 #include <stdint.h> // for uint64_t, int64_t, uint16_t
40 #endif
41 #include <gperftools/malloc_extension.h>
42 #include "base/basictypes.h"
43 #include "common.h"
44 #include "packed-cache-inl.h"
45 #include "pagemap.h"
46 #include "span.h"
48 // We need to dllexport PageHeap just for the unittest. MSVC complains
49 // that we don't dllexport the PageHeap members, but we don't need to
50 // test those, so I just suppress this warning.
51 #ifdef _MSC_VER
52 #pragma warning(push)
53 #pragma warning(disable:4251)
54 #endif
56 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
57 // you're porting to a system where you really can't get a stacktrace.
58 // Because we control the definition of GetStackTrace, all clients of
59 // GetStackTrace should #include us rather than stacktrace.h.
60 #ifdef NO_TCMALLOC_SAMPLES
61 // We use #define so code compiles even if you #include stacktrace.h somehow.
62 # define GetStackTrace(stack, depth, skip) (0)
63 #else
64 # include <gperftools/stacktrace.h>
65 #endif
67 namespace base {
68 struct MallocRange;
71 namespace tcmalloc {
73 // -------------------------------------------------------------------------
74 // Map from page-id to per-page data
75 // -------------------------------------------------------------------------
77 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
78 // ...except...
79 // On Windows, we use TCMalloc_PageMap1_LazyCommit<> for 32-bit machines.
80 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
81 // because sometimes the sizeclass is all the information we need.
83 // Selector class -- general selector uses 3-level map
84 template <int BITS> class MapSelector {
85 public:
86 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
87 typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
90 // A two-level map for 32-bit machines
91 template <> class MapSelector<32> {
92 public:
93 #ifdef WIN32
94 // A flat map for 32-bit machines (with lazy commit of memory).
95 typedef TCMalloc_PageMap1_LazyCommit<32-kPageShift> Type;
96 #else
97 // A two-level map for 32-bit machines
98 typedef TCMalloc_PageMap2<32-kPageShift> Type;
99 #endif
100 typedef PackedCache<32-kPageShift, uint16_t> CacheType;
103 // -------------------------------------------------------------------------
104 // Page-level allocator
105 // * Eager coalescing
107 // Heap for page-level allocation. We allow allocating and freeing a
108 // contiguous runs of pages (called a "span").
109 // -------------------------------------------------------------------------
111 class PERFTOOLS_DLL_DECL PageHeap {
112 public:
113 PageHeap();
115 // Allocate a run of "n" pages. Returns zero if out of memory.
116 // Caller should not pass "n == 0" -- instead, n should have
117 // been rounded up already.
118 Span* New(Length n);
120 // Delete the span "[p, p+n-1]".
121 // REQUIRES: span was returned by earlier call to New() and
122 // has not yet been deleted.
123 void Delete(Span* span);
125 // Mark an allocated span as being used for small objects of the
126 // specified size-class.
127 // REQUIRES: span was returned by an earlier call to New()
128 // and has not yet been deleted.
129 void RegisterSizeClass(Span* span, size_t sc);
131 // Split an allocated span into two spans: one of length "n" pages
132 // followed by another span of length "span->length - n" pages.
133 // Modifies "*span" to point to the first span of length "n" pages.
134 // Returns a pointer to the second span.
136 // REQUIRES: "0 < n < span->length"
137 // REQUIRES: span->location == IN_USE
138 // REQUIRES: span->sizeclass == 0
139 Span* Split(Span* span, Length n);
141 // Return the descriptor for the specified page. Returns NULL if
142 // this PageID was not allocated previously.
143 inline Span* GetDescriptor(PageID p) const {
144 return reinterpret_cast<Span*>(pagemap_.get(p));
147 // If this page heap is managing a range with starting page # >= start,
148 // store info about the range in *r and return true. Else return false.
149 bool GetNextRange(PageID start, base::MallocRange* r);
151 // Page heap statistics
152 struct Stats {
153 Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0) {}
154 uint64_t system_bytes; // Total bytes allocated from system
155 uint64_t free_bytes; // Total bytes on normal freelists
156 uint64_t unmapped_bytes; // Total bytes on returned freelists
157 uint64_t committed_bytes; // Bytes committed, always <= system_bytes_.
160 inline Stats stats() const { return stats_; }
162 struct SmallSpanStats {
163 // For each free list of small spans, the length (in spans) of the
164 // normal and returned free lists for that size.
165 int64 normal_length[kMaxPages];
166 int64 returned_length[kMaxPages];
168 void GetSmallSpanStats(SmallSpanStats* result);
170 // Stats for free large spans (i.e., spans with more than kMaxPages pages).
171 struct LargeSpanStats {
172 int64 spans; // Number of such spans
173 int64 normal_pages; // Combined page length of normal large spans
174 int64 returned_pages; // Combined page length of unmapped spans
176 void GetLargeSpanStats(LargeSpanStats* result);
178 bool Check();
179 // Like Check() but does some more comprehensive checking.
180 bool CheckExpensive();
181 bool CheckList(Span* list, Length min_pages, Length max_pages,
182 int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
184 // Try to release at least num_pages for reuse by the OS. Returns
185 // the actual number of pages released, which may be less than
186 // num_pages if there weren't enough pages to release. The result
187 // may also be larger than num_pages since page_heap might decide to
188 // release one large range instead of fragmenting it into two
189 // smaller released and unreleased ranges.
190 Length ReleaseAtLeastNPages(Length num_pages);
192 // Return 0 if we have no information, or else the correct sizeclass for p.
193 // Reads and writes to pagemap_cache_ do not require locking.
194 // The entries are 64 bits on 64-bit hardware and 16 bits on
195 // 32-bit hardware, and we don't mind raciness as long as each read of
196 // an entry yields a valid entry, not a partially updated entry.
197 size_t GetSizeClassIfCached(PageID p) const {
198 return pagemap_cache_.GetOrDefault(p, 0);
200 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
202 private:
203 // Allocates a big block of memory for the pagemap once we reach more than
204 // 128MB
205 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
207 // Minimum number of pages to fetch from system at a time. Must be
208 // significantly bigger than kBlockSize to amortize system-call
209 // overhead, and also to reduce external fragementation. Also, we
210 // should keep this value big because various incarnations of Linux
211 // have small limits on the number of mmap() regions per
212 // address-space.
213 // REQUIRED: kMinSystemAlloc <= kMaxPages;
214 static const int kMinSystemAlloc = kMaxPages;
216 // Never delay scavenging for more than the following number of
217 // deallocated pages. With 4K pages, this comes to 4GB of
218 // deallocation.
219 // Chrome: Changed to 64MB
220 static const int kMaxReleaseDelay = 1 << 14;
222 // If there is nothing to release, wait for so many pages before
223 // scavenging again. With 4K pages, this comes to 1GB of memory.
224 // Chrome: Changed to 16MB
225 static const int kDefaultReleaseDelay = 1 << 12;
227 // Pick the appropriate map and cache types based on pointer size
228 typedef MapSelector<kAddressBits>::Type PageMap;
229 typedef MapSelector<kAddressBits>::CacheType PageMapCache;
230 PageMap pagemap_;
231 mutable PageMapCache pagemap_cache_;
233 // We segregate spans of a given size into two circular linked
234 // lists: one for normal spans, and one for spans whose memory
235 // has been returned to the system.
236 struct SpanList {
237 Span normal;
238 Span returned;
241 // List of free spans of length >= kMaxPages
242 SpanList large_;
244 // Array mapping from span length to a doubly linked list of free spans
245 SpanList free_[kMaxPages];
247 // Statistics on system, free, and unmapped bytes
248 Stats stats_;
249 Span* SearchFreeAndLargeLists(Length n);
251 bool GrowHeap(Length n);
253 // REQUIRES: span->length >= n
254 // REQUIRES: span->location != IN_USE
255 // Remove span from its free list, and move any leftover part of
256 // span into appropriate free lists. Also update "span" to have
257 // length exactly "n" and mark it as non-free so it can be returned
258 // to the client. After all that, decrease free_pages_ by n and
259 // return span.
260 Span* Carve(Span* span, Length n);
262 void RecordSpan(Span* span) {
263 pagemap_.set(span->start, span);
264 if (span->length > 1) {
265 pagemap_.set(span->start + span->length - 1, span);
269 // Allocate a large span of length == n. If successful, returns a
270 // span of exactly the specified length. Else, returns NULL.
271 Span* AllocLarge(Length n);
273 // Coalesce span with neighboring spans if possible, prepend to
274 // appropriate free list, and adjust stats.
275 void MergeIntoFreeList(Span* span);
277 // Commit the span.
278 void CommitSpan(Span* span);
280 // Decommit the span.
281 void DecommitSpan(Span* span);
283 // Prepends span to appropriate free list, and adjusts stats.
284 void PrependToFreeList(Span* span);
286 // Removes span from its free list, and adjust stats.
287 void RemoveFromFreeList(Span* span);
289 // Incrementally release some memory to the system.
290 // IncrementalScavenge(n) is called whenever n pages are freed.
291 void IncrementalScavenge(Length n);
293 // Release the last span on the normal portion of this list.
294 // Return the length of that span.
295 Length ReleaseLastNormalSpan(SpanList* slist);
298 // Number of pages to deallocate before doing more scavenging
299 int64_t scavenge_counter_;
301 // Index of last free list where we released memory to the OS.
302 int release_index_;
305 } // namespace tcmalloc
307 #ifdef _MSC_VER
308 #pragma warning(pop)
309 #endif
311 #endif // TCMALLOC_PAGE_HEAP_H_