1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/process_util.h"
7 #import <Cocoa/Cocoa.h>
8 #include <crt_externs.h>
10 #include <mach/mach.h>
11 #include <mach/mach_init.h>
12 #include <mach/mach_vm.h>
13 #include <mach/shared_region.h>
14 #include <mach/task.h>
15 #include <mach-o/dyld.h>
16 #include <mach-o/nlist.h>
17 #include <malloc/malloc.h>
18 #import <objc/runtime.h>
21 #include <sys/sysctl.h>
22 #include <sys/types.h>
28 #include "base/debug/debugger.h"
29 #include "base/eintr_wrapper.h"
30 #include "base/hash_tables.h"
31 #include "base/logging.h"
32 #include "base/mac/mac_util.h"
33 #include "base/string_util.h"
34 #include "base/sys_info.h"
35 #include "base/sys_string_conversions.h"
36 #include "base/time.h"
37 #include "third_party/apple_apsl/CFBase.h"
38 #include "third_party/apple_apsl/malloc.h"
39 #include "third_party/mach_override/mach_override.h"
43 void RestoreDefaultExceptionHandler() {
44 // This function is tailored to remove the Breakpad exception handler.
45 // exception_mask matches s_exception_mask in
46 // breakpad/src/client/mac/handler/exception_handler.cc
47 const exception_mask_t exception_mask = EXC_MASK_BAD_ACCESS |
48 EXC_MASK_BAD_INSTRUCTION |
52 // Setting the exception port to MACH_PORT_NULL may not be entirely
53 // kosher to restore the default exception handler, but in practice,
54 // it results in the exception port being set to Apple Crash Reporter,
55 // the desired behavior.
56 task_set_exception_ports(mach_task_self(), exception_mask, MACH_PORT_NULL,
57 EXCEPTION_DEFAULT, THREAD_STATE_NONE);
60 ProcessIterator::ProcessIterator(const ProcessFilter* filter)
61 : index_of_kinfo_proc_(0),
63 // Get a snapshot of all of my processes (yes, as we loop it can go stale, but
64 // but trying to find where we were in a constantly changing list is basically
67 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID, geteuid() };
69 // Since more processes could start between when we get the size and when
70 // we get the list, we do a loop to keep trying until we get it.
73 const int max_tries = 10;
75 // Get the size of the buffer
77 if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
78 LOG(ERROR) << "failed to get the size needed for the process list";
79 kinfo_procs_.resize(0);
82 size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
83 // Leave some spare room for process table growth (more could show up
84 // between when we check and now)
85 num_of_kinfo_proc += 16;
86 kinfo_procs_.resize(num_of_kinfo_proc);
87 len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
88 // Load the list of processes
89 if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) < 0) {
90 // If we get a mem error, it just means we need a bigger buffer, so
91 // loop around again. Anything else is a real error and give up.
92 if (errno != ENOMEM) {
93 LOG(ERROR) << "failed to get the process list";
94 kinfo_procs_.resize(0);
98 // Got the list, just make sure we're sized exactly right
99 size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
100 kinfo_procs_.resize(num_of_kinfo_proc);
104 } while (!done && (try_num++ < max_tries));
107 LOG(ERROR) << "failed to collect the process list in a few tries";
108 kinfo_procs_.resize(0);
112 ProcessIterator::~ProcessIterator() {
115 bool ProcessIterator::CheckForNextProcess() {
117 for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
118 kinfo_proc& kinfo = kinfo_procs_[index_of_kinfo_proc_];
120 // Skip processes just awaiting collection
121 if ((kinfo.kp_proc.p_pid > 0) && (kinfo.kp_proc.p_stat == SZOMB))
124 int mib[] = { CTL_KERN, KERN_PROCARGS, kinfo.kp_proc.p_pid };
126 // Find out what size buffer we need.
128 if (sysctl(mib, arraysize(mib), NULL, &data_len, NULL, 0) < 0) {
129 DVPLOG(1) << "failed to figure out the buffer size for a commandline";
133 data.resize(data_len);
134 if (sysctl(mib, arraysize(mib), &data[0], &data_len, NULL, 0) < 0) {
135 DVPLOG(1) << "failed to fetch a commandline";
139 // |data| contains all the command line parameters of the process, separated
140 // by blocks of one or more null characters. We tokenize |data| into a
141 // vector of strings using '\0' as a delimiter and populate
142 // |entry_.cmd_line_args_|.
143 std::string delimiters;
144 delimiters.push_back('\0');
145 Tokenize(data, delimiters, &entry_.cmd_line_args_);
147 // |data| starts with the full executable path followed by a null character.
148 // We search for the first instance of '\0' and extract everything before it
149 // to populate |entry_.exe_file_|.
150 size_t exec_name_end = data.find('\0');
151 if (exec_name_end == std::string::npos) {
152 LOG(ERROR) << "command line data didn't match expected format";
156 entry_.pid_ = kinfo.kp_proc.p_pid;
157 entry_.ppid_ = kinfo.kp_eproc.e_ppid;
158 entry_.gid_ = kinfo.kp_eproc.e_pgid;
159 size_t last_slash = data.rfind('/', exec_name_end);
160 if (last_slash == std::string::npos)
161 entry_.exe_file_.assign(data, 0, exec_name_end);
163 entry_.exe_file_.assign(data, last_slash + 1,
164 exec_name_end - last_slash - 1);
165 // Start w/ the next entry next time through
166 ++index_of_kinfo_proc_;
173 bool NamedProcessIterator::IncludeEntry() {
174 return (executable_name_ == entry().exe_file() &&
175 ProcessIterator::IncludeEntry());
179 // ------------------------------------------------------------------------
180 // NOTE: about ProcessMetrics
182 // Getting a mach task from a pid for another process requires permissions in
183 // general, so there doesn't really seem to be a way to do these (and spinning
184 // up ps to fetch each stats seems dangerous to put in a base api for anyone to
185 // call). Child processes ipc their port, so return something if available,
186 // otherwise return 0.
189 ProcessMetrics::ProcessMetrics(ProcessHandle process,
190 ProcessMetrics::PortProvider* port_provider)
193 last_system_time_(0),
194 port_provider_(port_provider) {
195 processor_count_ = SysInfo::NumberOfProcessors();
199 ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
200 ProcessHandle process,
201 ProcessMetrics::PortProvider* port_provider) {
202 return new ProcessMetrics(process, port_provider);
205 bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
209 static bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
210 if (task == MACH_PORT_NULL)
212 mach_msg_type_number_t count = TASK_BASIC_INFO_64_COUNT;
213 kern_return_t kr = task_info(task,
215 reinterpret_cast<task_info_t>(task_info_data),
217 // Most likely cause for failure: |task| is a zombie.
218 return kr == KERN_SUCCESS;
221 size_t ProcessMetrics::GetPagefileUsage() const {
222 task_basic_info_64 task_info_data;
223 if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
225 return task_info_data.virtual_size;
228 size_t ProcessMetrics::GetPeakPagefileUsage() const {
232 size_t ProcessMetrics::GetWorkingSetSize() const {
233 task_basic_info_64 task_info_data;
234 if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
236 return task_info_data.resident_size;
239 size_t ProcessMetrics::GetPeakWorkingSetSize() const {
243 static bool GetCPUTypeForProcess(pid_t pid, cpu_type_t* cpu_type) {
244 size_t len = sizeof(*cpu_type);
245 int result = sysctlbyname("sysctl.proc_cputype",
251 PLOG(ERROR) << "sysctlbyname(""sysctl.proc_cputype"")";
258 static bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
259 if (type == CPU_TYPE_I386)
260 return addr >= SHARED_REGION_BASE_I386 &&
261 addr < (SHARED_REGION_BASE_I386 + SHARED_REGION_SIZE_I386);
262 else if (type == CPU_TYPE_X86_64)
263 return addr >= SHARED_REGION_BASE_X86_64 &&
264 addr < (SHARED_REGION_BASE_X86_64 + SHARED_REGION_SIZE_X86_64);
269 // This is a rough approximation of the algorithm that libtop uses.
270 // private_bytes is the size of private resident memory.
271 // shared_bytes is the size of shared resident memory.
272 bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
273 size_t* shared_bytes) {
275 size_t private_pages_count = 0;
276 size_t shared_pages_count = 0;
278 if (!private_bytes && !shared_bytes)
281 mach_port_t task = TaskForPid(process_);
282 if (task == MACH_PORT_NULL) {
283 LOG(ERROR) << "Invalid process";
288 if (!GetCPUTypeForProcess(process_, &cpu_type))
291 // The same region can be referenced multiple times. To avoid double counting
292 // we need to keep track of which regions we've already counted.
293 base::hash_set<int> seen_objects;
295 // We iterate through each VM region in the task's address map. For shared
296 // memory we add up all the pages that are marked as shared. Like libtop we
297 // try to avoid counting pages that are also referenced by other tasks. Since
298 // we don't have access to the VM regions of other tasks the only hint we have
299 // is if the address is in the shared region area.
301 // Private memory is much simpler. We simply count the pages that are marked
302 // as private or copy on write (COW).
304 // See libtop_update_vm_regions in
305 // http://www.opensource.apple.com/source/top/top-67/libtop.c
306 mach_vm_size_t size = 0;
307 for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) {
308 vm_region_top_info_data_t info;
309 mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
310 mach_port_t object_name;
311 kr = mach_vm_region(task,
315 (vm_region_info_t)&info,
318 if (kr == KERN_INVALID_ADDRESS) {
319 // We're at the end of the address space.
321 } else if (kr != KERN_SUCCESS) {
322 LOG(ERROR) << "Calling mach_vm_region failed with error: "
323 << mach_error_string(kr);
327 if (IsAddressInSharedRegion(address, cpu_type) &&
328 info.share_mode != SM_PRIVATE)
331 if (info.share_mode == SM_COW && info.ref_count == 1)
332 info.share_mode = SM_PRIVATE;
334 switch (info.share_mode) {
336 private_pages_count += info.private_pages_resident;
337 private_pages_count += info.shared_pages_resident;
340 private_pages_count += info.private_pages_resident;
343 if (seen_objects.count(info.obj_id) == 0) {
344 // Only count the first reference to this region.
345 seen_objects.insert(info.obj_id);
346 shared_pages_count += info.shared_pages_resident;
355 kr = host_page_size(task, &page_size);
356 if (kr != KERN_SUCCESS) {
357 LOG(ERROR) << "Failed to fetch host page size, error: "
358 << mach_error_string(kr);
363 *private_bytes = private_pages_count * page_size;
365 *shared_bytes = shared_pages_count * page_size;
370 void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
373 bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
374 size_t priv = GetWorkingSetSize();
377 ws_usage->priv = priv / 1024;
378 ws_usage->shareable = 0;
379 ws_usage->shared = 0;
383 #define TIME_VALUE_TO_TIMEVAL(a, r) do { \
384 (r)->tv_sec = (a)->seconds; \
385 (r)->tv_usec = (a)->microseconds; \
388 double ProcessMetrics::GetCPUUsage() {
389 mach_port_t task = TaskForPid(process_);
390 if (task == MACH_PORT_NULL)
395 // Libtop explicitly loops over the threads (libtop_pinfo_update_cpu_usage()
396 // in libtop.c), but this is more concise and gives the same results:
397 task_thread_times_info thread_info_data;
398 mach_msg_type_number_t thread_info_count = TASK_THREAD_TIMES_INFO_COUNT;
400 TASK_THREAD_TIMES_INFO,
401 reinterpret_cast<task_info_t>(&thread_info_data),
403 if (kr != KERN_SUCCESS) {
404 // Most likely cause: |task| is a zombie.
408 task_basic_info_64 task_info_data;
409 if (!GetTaskInfo(task, &task_info_data))
412 /* Set total_time. */
413 // thread info contains live time...
414 struct timeval user_timeval, system_timeval, task_timeval;
415 TIME_VALUE_TO_TIMEVAL(&thread_info_data.user_time, &user_timeval);
416 TIME_VALUE_TO_TIMEVAL(&thread_info_data.system_time, &system_timeval);
417 timeradd(&user_timeval, &system_timeval, &task_timeval);
419 // ... task info contains terminated time.
420 TIME_VALUE_TO_TIMEVAL(&task_info_data.user_time, &user_timeval);
421 TIME_VALUE_TO_TIMEVAL(&task_info_data.system_time, &system_timeval);
422 timeradd(&user_timeval, &task_timeval, &task_timeval);
423 timeradd(&system_timeval, &task_timeval, &task_timeval);
426 int retval = gettimeofday(&now, NULL);
430 int64 time = TimeValToMicroseconds(now);
431 int64 task_time = TimeValToMicroseconds(task_timeval);
433 if ((last_system_time_ == 0) || (last_time_ == 0)) {
434 // First call, just set the last values.
435 last_system_time_ = task_time;
440 int64 system_time_delta = task_time - last_system_time_;
441 int64 time_delta = time - last_time_;
442 DCHECK_NE(0U, time_delta);
446 // We add time_delta / 2 so the result is rounded.
447 double cpu = static_cast<double>((system_time_delta * 100.0) / time_delta);
449 last_system_time_ = task_time;
455 mach_port_t ProcessMetrics::TaskForPid(ProcessHandle process) const {
456 mach_port_t task = MACH_PORT_NULL;
458 task = port_provider_->TaskForPid(process_);
459 if (task == MACH_PORT_NULL && process_ == getpid())
460 task = mach_task_self();
464 // ------------------------------------------------------------------------
466 // Bytes committed by the system.
467 size_t GetSystemCommitCharge() {
468 host_name_port_t host = mach_host_self();
469 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
470 vm_statistics_data_t data;
471 kern_return_t kr = host_statistics(host, HOST_VM_INFO,
472 reinterpret_cast<host_info_t>(&data),
475 LOG(WARNING) << "Failed to fetch host statistics.";
480 kr = host_page_size(host, &page_size);
482 LOG(ERROR) << "Failed to fetch host page size.";
486 return (data.active_count * page_size) / 1024;
491 // Finds the library path for malloc() and thus the libC part of libSystem,
492 // which in Lion is in a separate image.
493 const char* LookUpLibCPath() {
494 const void* addr = reinterpret_cast<void*>(&malloc);
497 if (dladdr(addr, &info))
498 return info.dli_fname;
500 LOG(WARNING) << "Could not find image path for malloc()";
504 typedef void(*malloc_error_break_t)(void);
505 malloc_error_break_t g_original_malloc_error_break = NULL;
507 // Returns the function pointer for malloc_error_break. This symbol is declared
508 // as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to
510 malloc_error_break_t LookUpMallocErrorBreak() {
512 const char* lib_c_path = LookUpLibCPath();
516 // Only need to look up two symbols, but nlist() requires a NULL-terminated
517 // array and takes no count.
519 bzero(&nl, sizeof(nl));
521 // The symbol to find.
522 nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break");
524 // A reference symbol by which the address of the desired symbol will be
526 nl[1].n_un.n_name = const_cast<char*>("_malloc");
528 int rv = nlist(lib_c_path, nl);
529 if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) {
533 // nlist() returns addresses as offsets in the image, not the instruction
534 // pointer in memory. Use the known in-memory address of malloc()
535 // to compute the offset for malloc_error_break().
536 uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc);
537 reference_addr -= nl[1].n_value;
538 reference_addr += nl[0].n_value;
540 return reinterpret_cast<malloc_error_break_t>(reference_addr);
541 #endif // ARCH_CPU_32_BITS
546 void CrMallocErrorBreak() {
547 g_original_malloc_error_break();
549 "Terminating process due to a potential for future heap corruption";
550 base::debug::BreakDebugger();
555 void EnableTerminationOnHeapCorruption() {
556 malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak();
557 if (!malloc_error_break) {
558 LOG(WARNING) << "Could not find malloc_error_break";
562 mach_error_t err = mach_override_ptr(
563 (void*)malloc_error_break,
564 (void*)&CrMallocErrorBreak,
565 (void**)&g_original_malloc_error_break);
568 LOG(WARNING) << "Could not override malloc_error_break; error = " << err;
571 // ------------------------------------------------------------------------
575 bool g_oom_killer_enabled;
577 // === C malloc/calloc/valloc/realloc/posix_memalign ===
579 typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
581 typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
584 typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
586 typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
589 typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
593 malloc_type g_old_malloc;
594 calloc_type g_old_calloc;
595 valloc_type g_old_valloc;
596 realloc_type g_old_realloc;
597 memalign_type g_old_memalign;
599 malloc_type g_old_malloc_purgeable;
600 calloc_type g_old_calloc_purgeable;
601 valloc_type g_old_valloc_purgeable;
602 realloc_type g_old_realloc_purgeable;
603 memalign_type g_old_memalign_purgeable;
605 void* oom_killer_malloc(struct _malloc_zone_t* zone,
607 void* result = g_old_malloc(zone, size);
609 debug::BreakDebugger();
613 void* oom_killer_calloc(struct _malloc_zone_t* zone,
616 void* result = g_old_calloc(zone, num_items, size);
617 if (!result && num_items && size)
618 debug::BreakDebugger();
622 void* oom_killer_valloc(struct _malloc_zone_t* zone,
624 void* result = g_old_valloc(zone, size);
626 debug::BreakDebugger();
630 void* oom_killer_realloc(struct _malloc_zone_t* zone,
633 void* result = g_old_realloc(zone, ptr, size);
635 debug::BreakDebugger();
639 void* oom_killer_memalign(struct _malloc_zone_t* zone,
642 void* result = g_old_memalign(zone, alignment, size);
643 // Only die if posix_memalign would have returned ENOMEM, since there are
644 // other reasons why NULL might be returned (see
645 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
646 if (!result && size && alignment >= sizeof(void*)
647 && (alignment & (alignment - 1)) == 0) {
648 debug::BreakDebugger();
653 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
655 void* result = g_old_malloc_purgeable(zone, size);
657 debug::BreakDebugger();
661 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
664 void* result = g_old_calloc_purgeable(zone, num_items, size);
665 if (!result && num_items && size)
666 debug::BreakDebugger();
670 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
672 void* result = g_old_valloc_purgeable(zone, size);
674 debug::BreakDebugger();
678 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
681 void* result = g_old_realloc_purgeable(zone, ptr, size);
683 debug::BreakDebugger();
687 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
690 void* result = g_old_memalign_purgeable(zone, alignment, size);
691 // Only die if posix_memalign would have returned ENOMEM, since there are
692 // other reasons why NULL might be returned (see
693 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
694 if (!result && size && alignment >= sizeof(void*)
695 && (alignment & (alignment - 1)) == 0) {
696 debug::BreakDebugger();
701 // === C++ operator new ===
703 void oom_killer_new() {
704 debug::BreakDebugger();
707 // === Core Foundation CFAllocators ===
709 bool CanGetContextForCFAllocator() {
710 return !base::mac::IsOSLaterThanLion();
713 CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
714 if (base::mac::IsOSLeopard() || base::mac::IsOSSnowLeopard()) {
715 ChromeCFAllocatorLeopards* our_allocator =
716 const_cast<ChromeCFAllocatorLeopards*>(
717 reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator));
718 return &our_allocator->_context;
719 } else if (base::mac::IsOSLion()) {
720 ChromeCFAllocatorLion* our_allocator =
721 const_cast<ChromeCFAllocatorLion*>(
722 reinterpret_cast<const ChromeCFAllocatorLion*>(allocator));
723 return &our_allocator->_context;
729 CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
730 CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
731 CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
733 void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
736 void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
738 debug::BreakDebugger();
742 void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
745 void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
747 debug::BreakDebugger();
751 void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
754 void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
756 debug::BreakDebugger();
760 // === Cocoa NSObject allocation ===
762 typedef id (*allocWithZone_t)(id, SEL, NSZone*);
763 allocWithZone_t g_old_allocWithZone;
765 id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
767 id result = g_old_allocWithZone(self, _cmd, zone);
769 debug::BreakDebugger();
775 malloc_zone_t* GetPurgeableZone() {
776 // malloc_default_purgeable_zone only exists on >= 10.6. Use dlsym to grab it
777 // at runtime because it may not be present in the SDK used for compilation.
778 typedef malloc_zone_t* (*malloc_default_purgeable_zone_t)(void);
779 malloc_default_purgeable_zone_t malloc_purgeable_zone =
780 reinterpret_cast<malloc_default_purgeable_zone_t>(
781 dlsym(RTLD_DEFAULT, "malloc_default_purgeable_zone"));
782 if (malloc_purgeable_zone)
783 return malloc_purgeable_zone();
787 void EnableTerminationOnOutOfMemory() {
788 if (g_oom_killer_enabled)
791 g_oom_killer_enabled = true;
793 // === C malloc/calloc/valloc/realloc/posix_memalign ===
795 // This approach is not perfect, as requests for amounts of memory larger than
796 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
797 // still fail with a NULL rather than dying (see
798 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
799 // Unfortunately, it's the best we can do. Also note that this does not affect
800 // allocations from non-default zones.
802 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
803 !g_old_memalign) << "Old allocators unexpectedly non-null";
805 CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
806 !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
807 !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
809 // See http://trac.webkit.org/changeset/53362/trunk/Tools/DumpRenderTree/mac
810 bool zone_allocators_protected = base::mac::IsOSLionOrLater();
812 ChromeMallocZone* default_zone =
813 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
814 ChromeMallocZone* purgeable_zone =
815 reinterpret_cast<ChromeMallocZone*>(GetPurgeableZone());
817 vm_address_t page_start_default = NULL;
818 vm_address_t page_start_purgeable = NULL;
819 vm_size_t len_default = 0;
820 vm_size_t len_purgeable = 0;
821 if (zone_allocators_protected) {
822 page_start_default = reinterpret_cast<vm_address_t>(default_zone) &
823 static_cast<vm_size_t>(~(getpagesize() - 1));
824 len_default = reinterpret_cast<vm_address_t>(default_zone) -
825 page_start_default + sizeof(ChromeMallocZone);
826 mprotect(reinterpret_cast<void*>(page_start_default), len_default,
827 PROT_READ | PROT_WRITE);
829 if (purgeable_zone) {
830 page_start_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) &
831 static_cast<vm_size_t>(~(getpagesize() - 1));
832 len_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) -
833 page_start_purgeable + sizeof(ChromeMallocZone);
834 mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
835 PROT_READ | PROT_WRITE);
841 g_old_malloc = default_zone->malloc;
842 g_old_calloc = default_zone->calloc;
843 g_old_valloc = default_zone->valloc;
844 g_old_realloc = default_zone->realloc;
845 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_realloc)
846 << "Failed to get system allocation functions.";
848 default_zone->malloc = oom_killer_malloc;
849 default_zone->calloc = oom_killer_calloc;
850 default_zone->valloc = oom_killer_valloc;
851 default_zone->realloc = oom_killer_realloc;
853 if (default_zone->version >= 5) {
854 g_old_memalign = default_zone->memalign;
856 default_zone->memalign = oom_killer_memalign;
859 // Purgeable zone (if it exists)
861 if (purgeable_zone) {
862 g_old_malloc_purgeable = purgeable_zone->malloc;
863 g_old_calloc_purgeable = purgeable_zone->calloc;
864 g_old_valloc_purgeable = purgeable_zone->valloc;
865 g_old_realloc_purgeable = purgeable_zone->realloc;
866 CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
867 g_old_valloc_purgeable && g_old_realloc_purgeable)
868 << "Failed to get system allocation functions.";
870 purgeable_zone->malloc = oom_killer_malloc_purgeable;
871 purgeable_zone->calloc = oom_killer_calloc_purgeable;
872 purgeable_zone->valloc = oom_killer_valloc_purgeable;
873 purgeable_zone->realloc = oom_killer_realloc_purgeable;
875 if (purgeable_zone->version >= 5) {
876 g_old_memalign_purgeable = purgeable_zone->memalign;
877 if (g_old_memalign_purgeable)
878 purgeable_zone->memalign = oom_killer_memalign_purgeable;
882 if (zone_allocators_protected) {
883 mprotect(reinterpret_cast<void*>(page_start_default), len_default,
885 if (purgeable_zone) {
886 mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
891 // === C malloc_zone_batch_malloc ===
893 // batch_malloc is omitted because the default malloc zone's implementation
894 // only supports batch_malloc for "tiny" allocations from the free list. It
895 // will fail for allocations larger than "tiny", and will only allocate as
896 // many blocks as it's able to from the free list. These factors mean that it
897 // can return less than the requested memory even in a non-out-of-memory
898 // situation. There's no good way to detect whether a batch_malloc failure is
899 // due to these other factors, or due to genuine memory or address space
900 // exhaustion. The fact that it only allocates space from the "tiny" free list
901 // means that it's likely that a failure will not be due to memory exhaustion.
902 // Similarly, these constraints on batch_malloc mean that callers must always
903 // be expecting to receive less memory than was requested, even in situations
904 // where memory pressure is not a concern. Finally, the only public interface
905 // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
906 // system's malloc implementation. It's unlikely that anyone's even heard of
909 // === C++ operator new ===
911 // Yes, operator new does call through to malloc, but this will catch failures
912 // that our imperfect handling of malloc cannot.
914 std::set_new_handler(oom_killer_new);
916 // === Core Foundation CFAllocators ===
918 // This will not catch allocation done by custom allocators, but will catch
919 // all allocation done by system-provided ones.
921 CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
922 !g_old_cfallocator_malloc_zone)
923 << "Old allocators unexpectedly non-null";
925 bool cf_allocator_internals_known = CanGetContextForCFAllocator();
927 if (cf_allocator_internals_known) {
928 CFAllocatorContext* context =
929 ContextForCFAllocator(kCFAllocatorSystemDefault);
930 CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
931 g_old_cfallocator_system_default = context->allocate;
932 CHECK(g_old_cfallocator_system_default)
933 << "Failed to get kCFAllocatorSystemDefault allocation function.";
934 context->allocate = oom_killer_cfallocator_system_default;
936 context = ContextForCFAllocator(kCFAllocatorMalloc);
937 CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
938 g_old_cfallocator_malloc = context->allocate;
939 CHECK(g_old_cfallocator_malloc)
940 << "Failed to get kCFAllocatorMalloc allocation function.";
941 context->allocate = oom_killer_cfallocator_malloc;
943 context = ContextForCFAllocator(kCFAllocatorMallocZone);
944 CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
945 g_old_cfallocator_malloc_zone = context->allocate;
946 CHECK(g_old_cfallocator_malloc_zone)
947 << "Failed to get kCFAllocatorMallocZone allocation function.";
948 context->allocate = oom_killer_cfallocator_malloc_zone;
950 NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
951 "CFAllocator will not result in termination. http://crbug.com/45650");
954 // === Cocoa NSObject allocation ===
956 // Note that both +[NSObject new] and +[NSObject alloc] call through to
957 // +[NSObject allocWithZone:].
959 CHECK(!g_old_allocWithZone)
960 << "Old allocator unexpectedly non-null";
962 Class nsobject_class = [NSObject class];
963 Method orig_method = class_getClassMethod(nsobject_class,
964 @selector(allocWithZone:));
965 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
966 method_getImplementation(orig_method));
967 CHECK(g_old_allocWithZone)
968 << "Failed to get allocWithZone allocation function.";
969 method_setImplementation(orig_method,
970 reinterpret_cast<IMP>(oom_killer_allocWithZone));
973 ProcessId GetParentProcessId(ProcessHandle process) {
974 struct kinfo_proc info;
975 size_t length = sizeof(struct kinfo_proc);
976 int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process };
977 if (sysctl(mib, 4, &info, &length, NULL, 0) < 0) {
978 PLOG(ERROR) << "sysctl";
983 return info.kp_eproc.e_ppid;