2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "wtf/PageAllocator.h"
34 #include "wtf/AddressSpaceRandomization.h"
35 #include "wtf/Assertions.h"
44 #define MADV_FREE MADV_DONTNEED
48 #define MAP_ANONYMOUS MAP_ANON
61 // This simple internal function wraps the OS-specific page allocation call so
62 // that it behaves consistently: the address is a hint and if it cannot be used,
63 // the allocation will be placed elsewhere.
64 static void* systemAllocPages(void* addr
, size_t len
, PageAccessibilityConfiguration pageAccessibility
)
66 ASSERT(!(len
& kPageAllocationGranularityOffsetMask
));
67 ASSERT(!(reinterpret_cast<uintptr_t>(addr
) & kPageAllocationGranularityOffsetMask
));
70 int accessFlag
= pageAccessibility
== PageAccessible
? PAGE_READWRITE
: PAGE_NOACCESS
;
71 ret
= VirtualAlloc(addr
, len
, MEM_RESERVE
| MEM_COMMIT
, accessFlag
);
73 ret
= VirtualAlloc(0, len
, MEM_RESERVE
| MEM_COMMIT
, accessFlag
);
75 int accessFlag
= pageAccessibility
== PageAccessible
? (PROT_READ
| PROT_WRITE
) : PROT_NONE
;
76 ret
= mmap(addr
, len
, accessFlag
, MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
77 if (ret
== MAP_FAILED
)
83 static bool trimMapping(void* baseAddr
, size_t baseLen
, void* trimAddr
, size_t trimLen
)
88 char* basePtr
= static_cast<char*>(baseAddr
);
89 char* trimPtr
= static_cast<char*>(trimAddr
);
90 ASSERT(trimPtr
>= basePtr
);
91 ASSERT(trimPtr
+ trimLen
<= basePtr
+ baseLen
);
92 size_t preLen
= trimPtr
- basePtr
;
94 int ret
= munmap(basePtr
, preLen
);
97 size_t postLen
= (basePtr
+ baseLen
) - (trimPtr
+ trimLen
);
99 int ret
= munmap(trimPtr
+ trimLen
, postLen
);
100 RELEASE_ASSERT(!ret
);
106 void* allocPages(void* addr
, size_t len
, size_t align
, PageAccessibilityConfiguration pageAccessibility
)
108 ASSERT(len
>= kPageAllocationGranularity
);
109 ASSERT(!(len
& kPageAllocationGranularityOffsetMask
));
110 ASSERT(align
>= kPageAllocationGranularity
);
111 ASSERT(!(align
& kPageAllocationGranularityOffsetMask
));
112 ASSERT(!(reinterpret_cast<uintptr_t>(addr
) & kPageAllocationGranularityOffsetMask
));
113 size_t alignOffsetMask
= align
- 1;
114 size_t alignBaseMask
= ~alignOffsetMask
;
115 ASSERT(!(reinterpret_cast<uintptr_t>(addr
) & alignOffsetMask
));
116 // If the client passed null as the address, choose a good one.
118 addr
= getRandomPageBase();
119 addr
= reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr
) & alignBaseMask
);
122 // The common case, which is also the least work we can do, is that the
123 // address and length are suitable. Just try it.
124 void* ret
= systemAllocPages(addr
, len
, pageAccessibility
);
125 // If the alignment is to our liking, we're done.
126 if (!ret
|| !(reinterpret_cast<uintptr_t>(ret
) & alignOffsetMask
))
129 // Annoying. Unmap and map a larger range to be sure to succeed on the
130 // second, slower attempt.
133 size_t tryLen
= len
+ (align
- kPageAllocationGranularity
);
134 RELEASE_ASSERT(tryLen
> len
);
136 // We loop to cater for the unlikely case where another thread maps on top
137 // of the aligned location we choose.
139 while (count
++ < 100) {
140 ret
= systemAllocPages(addr
, tryLen
, pageAccessibility
);
143 // We can now try and trim out a subset of the mapping.
144 addr
= reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret
) + alignOffsetMask
) & alignBaseMask
);
146 // On POSIX systems, we can trim the oversized mapping to fit exactly.
147 // This will always work on POSIX systems.
148 if (trimMapping(ret
, tryLen
, addr
, len
))
151 // On Windows, you can't trim an existing mapping so we unmap and remap
152 // a subset. We used to do for all platforms, but OSX 10.8 has a
153 // broken mmap() that ignores address hints for valid, unused addresses.
154 freePages(ret
, tryLen
);
155 ret
= systemAllocPages(addr
, len
, pageAccessibility
);
156 if (ret
== addr
|| !ret
)
159 // Unlikely race / collision. Do the simple thing and just start again.
161 addr
= getRandomPageBase();
162 addr
= reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr
) & alignBaseMask
);
168 void freePages(void* addr
, size_t len
)
170 ASSERT(!(reinterpret_cast<uintptr_t>(addr
) & kPageAllocationGranularityOffsetMask
));
171 ASSERT(!(len
& kPageAllocationGranularityOffsetMask
));
173 int ret
= munmap(addr
, len
);
174 RELEASE_ASSERT(!ret
);
176 BOOL ret
= VirtualFree(addr
, 0, MEM_RELEASE
);
181 void setSystemPagesInaccessible(void* addr
, size_t len
)
183 ASSERT(!(len
& kSystemPageOffsetMask
));
185 int ret
= mprotect(addr
, len
, PROT_NONE
);
186 RELEASE_ASSERT(!ret
);
188 BOOL ret
= VirtualFree(addr
, len
, MEM_DECOMMIT
);
193 bool setSystemPagesAccessible(void* addr
, size_t len
)
195 ASSERT(!(len
& kSystemPageOffsetMask
));
197 return !mprotect(addr
, len
, PROT_READ
| PROT_WRITE
);
199 return !!VirtualAlloc(addr
, len
, MEM_COMMIT
, PAGE_READWRITE
);
203 void decommitSystemPages(void* addr
, size_t len
)
205 ASSERT(!(len
& kSystemPageOffsetMask
));
207 int ret
= madvise(addr
, len
, MADV_FREE
);
208 RELEASE_ASSERT(!ret
);
210 setSystemPagesInaccessible(addr
, len
);
214 void recommitSystemPages(void* addr
, size_t len
)
216 ASSERT(!(len
& kSystemPageOffsetMask
));
220 RELEASE_ASSERT(setSystemPagesAccessible(addr
, len
));
224 void discardSystemPages(void* addr
, size_t len
)
226 ASSERT(!(len
& kSystemPageOffsetMask
));
228 // On POSIX, the implementation detail is that discard and decommit are the
229 // same, and lead to pages that are returned to the system immediately and
230 // get replaced with zeroed pages when touched. So we just call
231 // decommitSystemPages() here to avoid code duplication.
232 decommitSystemPages(addr
, len
);
236 // TODO(cevans): implement this using MEM_RESET for Windows, once we've
237 // decided that the semantics are a match.