Refactor FacetManager into a separate file.
[chromium-blink-merge.git] / base / security_unittest.cc
blobf4c60a75db7291df8f4d51b8a495d48e6915a34d
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <fcntl.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/stat.h>
10 #include <sys/types.h>
12 #include <algorithm>
13 #include <limits>
15 #include "base/files/file_util.h"
16 #include "base/logging.h"
17 #include "base/memory/scoped_ptr.h"
18 #include "build/build_config.h"
19 #include "testing/gtest/include/gtest/gtest.h"
21 #if defined(OS_POSIX)
22 #include <sys/mman.h>
23 #include <unistd.h>
24 #endif
26 #if defined(OS_WIN)
27 #include <new.h>
28 #endif
30 using std::nothrow;
31 using std::numeric_limits;
33 namespace {
35 #if defined(OS_WIN)
36 // This is a permitted size but exhausts memory pretty quickly.
37 const size_t kLargePermittedAllocation = 0x7FFFE000;
39 int OnNoMemory(size_t) {
40 _exit(1);
43 void ExhaustMemoryWithMalloc() {
44 for (;;) {
45 // Without the |volatile|, clang optimizes away the allocation.
46 void* volatile buf = malloc(kLargePermittedAllocation);
47 if (!buf)
48 break;
52 void ExhaustMemoryWithRealloc() {
53 size_t size = kLargePermittedAllocation;
54 void* buf = malloc(size);
55 if (!buf)
56 return;
57 for (;;) {
58 size += kLargePermittedAllocation;
59 void* new_buf = realloc(buf, size);
60 if (!buf)
61 break;
62 buf = new_buf;
65 #endif
67 // This function acts as a compiler optimization barrier. We use it to
68 // prevent the compiler from making an expression a compile-time constant.
69 // We also use it so that the compiler doesn't discard certain return values
70 // as something we don't need (see the comment with calloc below).
71 template <typename Type>
72 Type HideValueFromCompiler(volatile Type value) {
73 #if defined(__GNUC__)
74 // In a GCC compatible compiler (GCC or Clang), make this compiler barrier
75 // more robust than merely using "volatile".
76 __asm__ volatile ("" : "+r" (value));
77 #endif // __GNUC__
78 return value;
81 // Tcmalloc and Windows allocator shim support setting malloc limits.
82 // - NO_TCMALLOC (should be defined if compiled with use_allocator!="tcmalloc")
83 // - ADDRESS_SANITIZER and SYZYASAN because they have their own memory allocator
84 // - IOS does not use tcmalloc
85 // - OS_MACOSX does not use tcmalloc
86 // - Windows allocator shim defines ALLOCATOR_SHIM
87 #if (!defined(NO_TCMALLOC) || defined(ALLOCATOR_SHIM)) && \
88 !defined(ADDRESS_SANITIZER) && !defined(OS_IOS) && !defined(OS_MACOSX) && \
89 !defined(SYZYASAN)
90 #define MALLOC_OVERFLOW_TEST(function) function
91 #else
92 #define MALLOC_OVERFLOW_TEST(function) DISABLED_##function
93 #endif
95 // TODO(jln): switch to std::numeric_limits<int>::max() when we switch to
96 // C++11.
97 const size_t kTooBigAllocSize = INT_MAX;
99 // Detect runtime TCMalloc bypasses.
100 bool IsTcMallocBypassed() {
101 #if defined(OS_LINUX)
102 // This should detect a TCMalloc bypass from Valgrind.
103 char* g_slice = getenv("G_SLICE");
104 if (g_slice && !strcmp(g_slice, "always-malloc"))
105 return true;
106 #endif
107 return false;
110 bool CallocDiesOnOOM() {
111 // The sanitizers' calloc dies on OOM instead of returning NULL.
112 // The wrapper function in base/process_util_linux.cc that is used when we
113 // compile without TCMalloc will just die on OOM instead of returning NULL.
114 #if defined(ADDRESS_SANITIZER) || \
115 defined(MEMORY_SANITIZER) || \
116 defined(THREAD_SANITIZER) || \
117 (defined(OS_LINUX) && defined(NO_TCMALLOC))
118 return true;
119 #else
120 return false;
121 #endif
124 // Fake test that allow to know the state of TCMalloc by looking at bots.
125 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(IsTCMallocDynamicallyBypassed)) {
126 printf("Malloc is dynamically bypassed: %s\n",
127 IsTcMallocBypassed() ? "yes." : "no.");
130 // The MemoryAllocationRestrictions* tests test that we can not allocate a
131 // memory range that cannot be indexed via an int. This is used to mitigate
132 // vulnerabilities in libraries that use int instead of size_t. See
133 // crbug.com/169327.
135 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsMalloc)) {
136 if (!IsTcMallocBypassed()) {
137 scoped_ptr<char, base::FreeDeleter> ptr(static_cast<char*>(
138 HideValueFromCompiler(malloc(kTooBigAllocSize))));
139 ASSERT_TRUE(!ptr);
143 #if defined(GTEST_HAS_DEATH_TEST) && defined(OS_WIN)
144 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationMallocDeathTest)) {
145 _set_new_handler(&OnNoMemory);
146 _set_new_mode(1);
148 scoped_ptr<char, base::FreeDeleter> ptr;
149 EXPECT_DEATH(ptr.reset(static_cast<char*>(
150 HideValueFromCompiler(malloc(kTooBigAllocSize)))),
151 "");
152 ASSERT_TRUE(!ptr);
154 _set_new_handler(NULL);
155 _set_new_mode(0);
158 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationExhaustDeathTest)) {
159 _set_new_handler(&OnNoMemory);
160 _set_new_mode(1);
162 ASSERT_DEATH(ExhaustMemoryWithMalloc(), "");
164 _set_new_handler(NULL);
165 _set_new_mode(0);
168 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryReallocationExhaustDeathTest)) {
169 _set_new_handler(&OnNoMemory);
170 _set_new_mode(1);
172 ASSERT_DEATH(ExhaustMemoryWithRealloc(), "");
174 _set_new_handler(NULL);
175 _set_new_mode(0);
177 #endif
179 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsCalloc)) {
180 if (!IsTcMallocBypassed()) {
181 scoped_ptr<char, base::FreeDeleter> ptr(static_cast<char*>(
182 HideValueFromCompiler(calloc(kTooBigAllocSize, 1))));
183 ASSERT_TRUE(!ptr);
187 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsRealloc)) {
188 if (!IsTcMallocBypassed()) {
189 char* orig_ptr = static_cast<char*>(malloc(1));
190 ASSERT_TRUE(orig_ptr);
191 scoped_ptr<char, base::FreeDeleter> ptr(static_cast<char*>(
192 HideValueFromCompiler(realloc(orig_ptr, kTooBigAllocSize))));
193 ASSERT_TRUE(!ptr);
194 // If realloc() did not succeed, we need to free orig_ptr.
195 free(orig_ptr);
199 typedef struct {
200 char large_array[kTooBigAllocSize];
201 } VeryLargeStruct;
203 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsNew)) {
204 if (!IsTcMallocBypassed()) {
205 scoped_ptr<VeryLargeStruct> ptr(
206 HideValueFromCompiler(new (nothrow) VeryLargeStruct));
207 ASSERT_TRUE(!ptr);
211 #if defined(GTEST_HAS_DEATH_TEST) && defined(OS_WIN)
212 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationNewDeathTest)) {
213 _set_new_handler(&OnNoMemory);
215 scoped_ptr<VeryLargeStruct> ptr;
216 EXPECT_DEATH(
217 ptr.reset(HideValueFromCompiler(new (nothrow) VeryLargeStruct)), "");
218 ASSERT_TRUE(!ptr);
220 _set_new_handler(NULL);
222 #endif
224 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsNewArray)) {
225 if (!IsTcMallocBypassed()) {
226 scoped_ptr<char[]> ptr(
227 HideValueFromCompiler(new (nothrow) char[kTooBigAllocSize]));
228 ASSERT_TRUE(!ptr);
232 // The tests bellow check for overflows in new[] and calloc().
234 // There are platforms where these tests are known to fail. We would like to
235 // be able to easily check the status on the bots, but marking tests as
236 // FAILS_ is too clunky.
237 void OverflowTestsSoftExpectTrue(bool overflow_detected) {
238 if (!overflow_detected) {
239 #if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_MACOSX)
240 // Sadly, on Linux, Android, and OSX we don't have a good story yet. Don't
241 // fail the test, but report.
242 printf("Platform has overflow: %s\n",
243 !overflow_detected ? "yes." : "no.");
244 #else
245 // Otherwise, fail the test. (Note: EXPECT are ok in subfunctions, ASSERT
246 // aren't).
247 EXPECT_TRUE(overflow_detected);
248 #endif
252 #if defined(OS_IOS) || defined(OS_WIN) || defined(THREAD_SANITIZER) || defined(OS_MACOSX)
253 #define MAYBE_NewOverflow DISABLED_NewOverflow
254 #else
255 #define MAYBE_NewOverflow NewOverflow
256 #endif
257 // Test array[TooBig][X] and array[X][TooBig] allocations for int overflows.
258 // IOS doesn't honor nothrow, so disable the test there.
259 // Crashes on Windows Dbg builds, disable there as well.
260 // Fails on Mac 10.8 http://crbug.com/227092
261 TEST(SecurityTest, MAYBE_NewOverflow) {
262 const size_t kArraySize = 4096;
263 // We want something "dynamic" here, so that the compiler doesn't
264 // immediately reject crazy arrays.
265 const size_t kDynamicArraySize = HideValueFromCompiler(kArraySize);
266 // numeric_limits are still not constexpr until we switch to C++11, so we
267 // use an ugly cast.
268 const size_t kMaxSizeT = ~static_cast<size_t>(0);
269 ASSERT_EQ(numeric_limits<size_t>::max(), kMaxSizeT);
270 const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
271 const size_t kDynamicArraySize2 = HideValueFromCompiler(kArraySize2);
273 scoped_ptr<char[][kArraySize]> array_pointer(new (nothrow)
274 char[kDynamicArraySize2][kArraySize]);
275 OverflowTestsSoftExpectTrue(!array_pointer);
277 // On windows, the compiler prevents static array sizes of more than
278 // 0x7fffffff (error C2148).
279 #if defined(OS_WIN) && defined(ARCH_CPU_64_BITS)
280 ALLOW_UNUSED_LOCAL(kDynamicArraySize);
281 #else
283 scoped_ptr<char[][kArraySize2]> array_pointer(new (nothrow)
284 char[kDynamicArraySize][kArraySize2]);
285 OverflowTestsSoftExpectTrue(!array_pointer);
287 #endif // !defined(OS_WIN) || !defined(ARCH_CPU_64_BITS)
290 // Call calloc(), eventually free the memory and return whether or not
291 // calloc() did succeed.
292 bool CallocReturnsNull(size_t nmemb, size_t size) {
293 scoped_ptr<char, base::FreeDeleter> array_pointer(
294 static_cast<char*>(calloc(nmemb, size)));
295 // We need the call to HideValueFromCompiler(): we have seen LLVM
296 // optimize away the call to calloc() entirely and assume
297 // the pointer to not be NULL.
298 return HideValueFromCompiler(array_pointer.get()) == NULL;
301 // Test if calloc() can overflow.
302 TEST(SecurityTest, CallocOverflow) {
303 const size_t kArraySize = 4096;
304 const size_t kMaxSizeT = numeric_limits<size_t>::max();
305 const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
306 if (!CallocDiesOnOOM()) {
307 EXPECT_TRUE(CallocReturnsNull(kArraySize, kArraySize2));
308 EXPECT_TRUE(CallocReturnsNull(kArraySize2, kArraySize));
309 } else {
310 // It's also ok for calloc to just terminate the process.
311 #if defined(GTEST_HAS_DEATH_TEST)
312 EXPECT_DEATH(CallocReturnsNull(kArraySize, kArraySize2), "");
313 EXPECT_DEATH(CallocReturnsNull(kArraySize2, kArraySize), "");
314 #endif // GTEST_HAS_DEATH_TEST
318 #if defined(OS_LINUX) && defined(__x86_64__)
319 // Check if ptr1 and ptr2 are separated by less than size chars.
320 bool ArePointersToSameArea(void* ptr1, void* ptr2, size_t size) {
321 ptrdiff_t ptr_diff = reinterpret_cast<char*>(std::max(ptr1, ptr2)) -
322 reinterpret_cast<char*>(std::min(ptr1, ptr2));
323 return static_cast<size_t>(ptr_diff) <= size;
326 // Check if TCMalloc uses an underlying random memory allocator.
327 TEST(SecurityTest, MALLOC_OVERFLOW_TEST(RandomMemoryAllocations)) {
328 if (IsTcMallocBypassed())
329 return;
330 size_t kPageSize = 4096; // We support x86_64 only.
331 // Check that malloc() returns an address that is neither the kernel's
332 // un-hinted mmap area, nor the current brk() area. The first malloc() may
333 // not be at a random address because TCMalloc will first exhaust any memory
334 // that it has allocated early on, before starting the sophisticated
335 // allocators.
336 void* default_mmap_heap_address =
337 mmap(0, kPageSize, PROT_READ|PROT_WRITE,
338 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
339 ASSERT_NE(default_mmap_heap_address,
340 static_cast<void*>(MAP_FAILED));
341 ASSERT_EQ(munmap(default_mmap_heap_address, kPageSize), 0);
342 void* brk_heap_address = sbrk(0);
343 ASSERT_NE(brk_heap_address, reinterpret_cast<void*>(-1));
344 ASSERT_TRUE(brk_heap_address != NULL);
345 // 1 MB should get us past what TCMalloc pre-allocated before initializing
346 // the sophisticated allocators.
347 size_t kAllocSize = 1<<20;
348 scoped_ptr<char, base::FreeDeleter> ptr(
349 static_cast<char*>(malloc(kAllocSize)));
350 ASSERT_TRUE(ptr != NULL);
351 // If two pointers are separated by less than 512MB, they are considered
352 // to be in the same area.
353 // Our random pointer could be anywhere within 0x3fffffffffff (46bits),
354 // and we are checking that it's not withing 1GB (30 bits) from two
355 // addresses (brk and mmap heap). We have roughly one chance out of
356 // 2^15 to flake.
357 const size_t kAreaRadius = 1<<29;
358 bool in_default_mmap_heap = ArePointersToSameArea(
359 ptr.get(), default_mmap_heap_address, kAreaRadius);
360 EXPECT_FALSE(in_default_mmap_heap);
362 bool in_default_brk_heap = ArePointersToSameArea(
363 ptr.get(), brk_heap_address, kAreaRadius);
364 EXPECT_FALSE(in_default_brk_heap);
366 // In the implementation, we always mask our random addresses with
367 // kRandomMask, so we use it as an additional detection mechanism.
368 const uintptr_t kRandomMask = 0x3fffffffffffULL;
369 bool impossible_random_address =
370 reinterpret_cast<uintptr_t>(ptr.get()) & ~kRandomMask;
371 EXPECT_FALSE(impossible_random_address);
374 #endif // defined(OS_LINUX) && defined(__x86_64__)
376 } // namespace