1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/process_util.h"
7 #import <Cocoa/Cocoa.h>
8 #include <crt_externs.h>
11 #include <mach/mach.h>
12 #include <mach/mach_init.h>
13 #include <mach/mach_vm.h>
14 #include <mach/shared_region.h>
15 #include <mach/task.h>
16 #include <mach-o/nlist.h>
17 #include <malloc/malloc.h>
18 #import <objc/runtime.h>
21 #include <sys/event.h>
23 #include <sys/sysctl.h>
24 #include <sys/types.h>
30 #include "base/debug/debugger.h"
31 #include "base/eintr_wrapper.h"
32 #include "base/file_util.h"
33 #include "base/hash_tables.h"
34 #include "base/lazy_instance.h"
35 #include "base/logging.h"
36 #include "base/mac/mac_util.h"
37 #include "base/string_util.h"
38 #include "base/sys_info.h"
39 #include "base/threading/thread_local.h"
40 #include "third_party/apple_apsl/CFBase.h"
41 #include "third_party/apple_apsl/malloc.h"
42 #include "third_party/mach_override/mach_override.h"
46 void RestoreDefaultExceptionHandler() {
47 // This function is tailored to remove the Breakpad exception handler.
48 // exception_mask matches s_exception_mask in
49 // breakpad/src/client/mac/handler/exception_handler.cc
50 const exception_mask_t exception_mask = EXC_MASK_BAD_ACCESS |
51 EXC_MASK_BAD_INSTRUCTION |
55 // Setting the exception port to MACH_PORT_NULL may not be entirely
56 // kosher to restore the default exception handler, but in practice,
57 // it results in the exception port being set to Apple Crash Reporter,
58 // the desired behavior.
59 task_set_exception_ports(mach_task_self(), exception_mask, MACH_PORT_NULL,
60 EXCEPTION_DEFAULT, THREAD_STATE_NONE);
63 ProcessIterator::ProcessIterator(const ProcessFilter* filter)
64 : index_of_kinfo_proc_(0),
66 // Get a snapshot of all of my processes (yes, as we loop it can go stale, but
67 // but trying to find where we were in a constantly changing list is basically
70 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID, geteuid() };
72 // Since more processes could start between when we get the size and when
73 // we get the list, we do a loop to keep trying until we get it.
76 const int max_tries = 10;
78 // Get the size of the buffer
80 if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
81 DLOG(ERROR) << "failed to get the size needed for the process list";
82 kinfo_procs_.resize(0);
85 size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
86 // Leave some spare room for process table growth (more could show up
87 // between when we check and now)
88 num_of_kinfo_proc += 16;
89 kinfo_procs_.resize(num_of_kinfo_proc);
90 len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
91 // Load the list of processes
92 if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) < 0) {
93 // If we get a mem error, it just means we need a bigger buffer, so
94 // loop around again. Anything else is a real error and give up.
95 if (errno != ENOMEM) {
96 DLOG(ERROR) << "failed to get the process list";
97 kinfo_procs_.resize(0);
101 // Got the list, just make sure we're sized exactly right
102 size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
103 kinfo_procs_.resize(num_of_kinfo_proc);
107 } while (!done && (try_num++ < max_tries));
110 DLOG(ERROR) << "failed to collect the process list in a few tries";
111 kinfo_procs_.resize(0);
115 ProcessIterator::~ProcessIterator() {
118 bool ProcessIterator::CheckForNextProcess() {
120 for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
121 kinfo_proc& kinfo = kinfo_procs_[index_of_kinfo_proc_];
123 // Skip processes just awaiting collection
124 if ((kinfo.kp_proc.p_pid > 0) && (kinfo.kp_proc.p_stat == SZOMB))
127 int mib[] = { CTL_KERN, KERN_PROCARGS, kinfo.kp_proc.p_pid };
129 // Find out what size buffer we need.
131 if (sysctl(mib, arraysize(mib), NULL, &data_len, NULL, 0) < 0) {
132 DVPLOG(1) << "failed to figure out the buffer size for a commandline";
136 data.resize(data_len);
137 if (sysctl(mib, arraysize(mib), &data[0], &data_len, NULL, 0) < 0) {
138 DVPLOG(1) << "failed to fetch a commandline";
142 // |data| contains all the command line parameters of the process, separated
143 // by blocks of one or more null characters. We tokenize |data| into a
144 // vector of strings using '\0' as a delimiter and populate
145 // |entry_.cmd_line_args_|.
146 std::string delimiters;
147 delimiters.push_back('\0');
148 Tokenize(data, delimiters, &entry_.cmd_line_args_);
150 // |data| starts with the full executable path followed by a null character.
151 // We search for the first instance of '\0' and extract everything before it
152 // to populate |entry_.exe_file_|.
153 size_t exec_name_end = data.find('\0');
154 if (exec_name_end == std::string::npos) {
155 DLOG(ERROR) << "command line data didn't match expected format";
159 entry_.pid_ = kinfo.kp_proc.p_pid;
160 entry_.ppid_ = kinfo.kp_eproc.e_ppid;
161 entry_.gid_ = kinfo.kp_eproc.e_pgid;
162 size_t last_slash = data.rfind('/', exec_name_end);
163 if (last_slash == std::string::npos)
164 entry_.exe_file_.assign(data, 0, exec_name_end);
166 entry_.exe_file_.assign(data, last_slash + 1,
167 exec_name_end - last_slash - 1);
168 // Start w/ the next entry next time through
169 ++index_of_kinfo_proc_;
176 bool NamedProcessIterator::IncludeEntry() {
177 return (executable_name_ == entry().exe_file() &&
178 ProcessIterator::IncludeEntry());
182 // ------------------------------------------------------------------------
183 // NOTE: about ProcessMetrics
185 // Getting a mach task from a pid for another process requires permissions in
186 // general, so there doesn't really seem to be a way to do these (and spinning
187 // up ps to fetch each stats seems dangerous to put in a base api for anyone to
188 // call). Child processes ipc their port, so return something if available,
189 // otherwise return 0.
192 ProcessMetrics::ProcessMetrics(ProcessHandle process,
193 ProcessMetrics::PortProvider* port_provider)
196 last_system_time_(0),
197 port_provider_(port_provider) {
198 processor_count_ = SysInfo::NumberOfProcessors();
202 ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
203 ProcessHandle process,
204 ProcessMetrics::PortProvider* port_provider) {
205 return new ProcessMetrics(process, port_provider);
208 bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
212 static bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
213 if (task == MACH_PORT_NULL)
215 mach_msg_type_number_t count = TASK_BASIC_INFO_64_COUNT;
216 kern_return_t kr = task_info(task,
218 reinterpret_cast<task_info_t>(task_info_data),
220 // Most likely cause for failure: |task| is a zombie.
221 return kr == KERN_SUCCESS;
224 size_t ProcessMetrics::GetPagefileUsage() const {
225 task_basic_info_64 task_info_data;
226 if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
228 return task_info_data.virtual_size;
231 size_t ProcessMetrics::GetPeakPagefileUsage() const {
235 size_t ProcessMetrics::GetWorkingSetSize() const {
236 task_basic_info_64 task_info_data;
237 if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
239 return task_info_data.resident_size;
242 size_t ProcessMetrics::GetPeakWorkingSetSize() const {
246 static bool GetCPUTypeForProcess(pid_t pid, cpu_type_t* cpu_type) {
247 size_t len = sizeof(*cpu_type);
248 int result = sysctlbyname("sysctl.proc_cputype",
254 DPLOG(ERROR) << "sysctlbyname(""sysctl.proc_cputype"")";
261 static bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
262 if (type == CPU_TYPE_I386)
263 return addr >= SHARED_REGION_BASE_I386 &&
264 addr < (SHARED_REGION_BASE_I386 + SHARED_REGION_SIZE_I386);
265 else if (type == CPU_TYPE_X86_64)
266 return addr >= SHARED_REGION_BASE_X86_64 &&
267 addr < (SHARED_REGION_BASE_X86_64 + SHARED_REGION_SIZE_X86_64);
272 // This is a rough approximation of the algorithm that libtop uses.
273 // private_bytes is the size of private resident memory.
274 // shared_bytes is the size of shared resident memory.
275 bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
276 size_t* shared_bytes) {
278 size_t private_pages_count = 0;
279 size_t shared_pages_count = 0;
281 if (!private_bytes && !shared_bytes)
284 mach_port_t task = TaskForPid(process_);
285 if (task == MACH_PORT_NULL) {
286 DLOG(ERROR) << "Invalid process";
291 if (!GetCPUTypeForProcess(process_, &cpu_type))
294 // The same region can be referenced multiple times. To avoid double counting
295 // we need to keep track of which regions we've already counted.
296 base::hash_set<int> seen_objects;
298 // We iterate through each VM region in the task's address map. For shared
299 // memory we add up all the pages that are marked as shared. Like libtop we
300 // try to avoid counting pages that are also referenced by other tasks. Since
301 // we don't have access to the VM regions of other tasks the only hint we have
302 // is if the address is in the shared region area.
304 // Private memory is much simpler. We simply count the pages that are marked
305 // as private or copy on write (COW).
307 // See libtop_update_vm_regions in
308 // http://www.opensource.apple.com/source/top/top-67/libtop.c
309 mach_vm_size_t size = 0;
310 for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) {
311 vm_region_top_info_data_t info;
312 mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
313 mach_port_t object_name;
314 kr = mach_vm_region(task,
318 (vm_region_info_t)&info,
321 if (kr == KERN_INVALID_ADDRESS) {
322 // We're at the end of the address space.
324 } else if (kr != KERN_SUCCESS) {
325 DLOG(ERROR) << "Calling mach_vm_region failed with error: "
326 << mach_error_string(kr);
330 if (IsAddressInSharedRegion(address, cpu_type) &&
331 info.share_mode != SM_PRIVATE)
334 if (info.share_mode == SM_COW && info.ref_count == 1)
335 info.share_mode = SM_PRIVATE;
337 switch (info.share_mode) {
339 private_pages_count += info.private_pages_resident;
340 private_pages_count += info.shared_pages_resident;
343 private_pages_count += info.private_pages_resident;
346 if (seen_objects.count(info.obj_id) == 0) {
347 // Only count the first reference to this region.
348 seen_objects.insert(info.obj_id);
349 shared_pages_count += info.shared_pages_resident;
358 kr = host_page_size(task, &page_size);
359 if (kr != KERN_SUCCESS) {
360 DLOG(ERROR) << "Failed to fetch host page size, error: "
361 << mach_error_string(kr);
366 *private_bytes = private_pages_count * page_size;
368 *shared_bytes = shared_pages_count * page_size;
373 void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
376 bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
377 size_t priv = GetWorkingSetSize();
380 ws_usage->priv = priv / 1024;
381 ws_usage->shareable = 0;
382 ws_usage->shared = 0;
386 #define TIME_VALUE_TO_TIMEVAL(a, r) do { \
387 (r)->tv_sec = (a)->seconds; \
388 (r)->tv_usec = (a)->microseconds; \
391 double ProcessMetrics::GetCPUUsage() {
392 mach_port_t task = TaskForPid(process_);
393 if (task == MACH_PORT_NULL)
398 // Libtop explicitly loops over the threads (libtop_pinfo_update_cpu_usage()
399 // in libtop.c), but this is more concise and gives the same results:
400 task_thread_times_info thread_info_data;
401 mach_msg_type_number_t thread_info_count = TASK_THREAD_TIMES_INFO_COUNT;
403 TASK_THREAD_TIMES_INFO,
404 reinterpret_cast<task_info_t>(&thread_info_data),
406 if (kr != KERN_SUCCESS) {
407 // Most likely cause: |task| is a zombie.
411 task_basic_info_64 task_info_data;
412 if (!GetTaskInfo(task, &task_info_data))
415 /* Set total_time. */
416 // thread info contains live time...
417 struct timeval user_timeval, system_timeval, task_timeval;
418 TIME_VALUE_TO_TIMEVAL(&thread_info_data.user_time, &user_timeval);
419 TIME_VALUE_TO_TIMEVAL(&thread_info_data.system_time, &system_timeval);
420 timeradd(&user_timeval, &system_timeval, &task_timeval);
422 // ... task info contains terminated time.
423 TIME_VALUE_TO_TIMEVAL(&task_info_data.user_time, &user_timeval);
424 TIME_VALUE_TO_TIMEVAL(&task_info_data.system_time, &system_timeval);
425 timeradd(&user_timeval, &task_timeval, &task_timeval);
426 timeradd(&system_timeval, &task_timeval, &task_timeval);
429 int retval = gettimeofday(&now, NULL);
433 int64 time = TimeValToMicroseconds(now);
434 int64 task_time = TimeValToMicroseconds(task_timeval);
436 if ((last_system_time_ == 0) || (last_time_ == 0)) {
437 // First call, just set the last values.
438 last_system_time_ = task_time;
443 int64 system_time_delta = task_time - last_system_time_;
444 int64 time_delta = time - last_time_;
445 DCHECK_NE(0U, time_delta);
449 // We add time_delta / 2 so the result is rounded.
450 double cpu = static_cast<double>((system_time_delta * 100.0) / time_delta);
452 last_system_time_ = task_time;
458 mach_port_t ProcessMetrics::TaskForPid(ProcessHandle process) const {
459 mach_port_t task = MACH_PORT_NULL;
461 task = port_provider_->TaskForPid(process_);
462 if (task == MACH_PORT_NULL && process_ == getpid())
463 task = mach_task_self();
467 // ------------------------------------------------------------------------
469 // Bytes committed by the system.
470 size_t GetSystemCommitCharge() {
471 host_name_port_t host = mach_host_self();
472 mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
473 vm_statistics_data_t data;
474 kern_return_t kr = host_statistics(host, HOST_VM_INFO,
475 reinterpret_cast<host_info_t>(&data),
478 DLOG(WARNING) << "Failed to fetch host statistics.";
483 kr = host_page_size(host, &page_size);
485 DLOG(ERROR) << "Failed to fetch host page size.";
489 return (data.active_count * page_size) / 1024;
494 // Finds the library path for malloc() and thus the libC part of libSystem,
495 // which in Lion is in a separate image.
496 const char* LookUpLibCPath() {
497 const void* addr = reinterpret_cast<void*>(&malloc);
500 if (dladdr(addr, &info))
501 return info.dli_fname;
503 DLOG(WARNING) << "Could not find image path for malloc()";
507 typedef void(*malloc_error_break_t)(void);
508 malloc_error_break_t g_original_malloc_error_break = NULL;
510 // Returns the function pointer for malloc_error_break. This symbol is declared
511 // as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to
513 malloc_error_break_t LookUpMallocErrorBreak() {
515 const char* lib_c_path = LookUpLibCPath();
519 // Only need to look up two symbols, but nlist() requires a NULL-terminated
520 // array and takes no count.
522 bzero(&nl, sizeof(nl));
524 // The symbol to find.
525 nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break");
527 // A reference symbol by which the address of the desired symbol will be
529 nl[1].n_un.n_name = const_cast<char*>("_malloc");
531 int rv = nlist(lib_c_path, nl);
532 if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) {
536 // nlist() returns addresses as offsets in the image, not the instruction
537 // pointer in memory. Use the known in-memory address of malloc()
538 // to compute the offset for malloc_error_break().
539 uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc);
540 reference_addr -= nl[1].n_value;
541 reference_addr += nl[0].n_value;
543 return reinterpret_cast<malloc_error_break_t>(reference_addr);
544 #endif // ARCH_CPU_32_BITS
549 // Simple scoper that saves the current value of errno, resets it to 0, and on
550 // destruction puts the old value back. This is so that CrMallocErrorBreak can
551 // safely test errno free from the effects of other routines.
552 class ScopedClearErrno {
554 ScopedClearErrno() : old_errno_(errno) {
557 ~ScopedClearErrno() {
565 DISALLOW_COPY_AND_ASSIGN(ScopedClearErrno);
568 // Combines ThreadLocalBoolean with AutoReset. It would be convenient
569 // to compose ThreadLocalPointer<bool> with AutoReset<bool>, but that
570 // would require allocating some storage for the bool.
571 class ThreadLocalBooleanAutoReset {
573 ThreadLocalBooleanAutoReset(ThreadLocalBoolean* tlb, bool new_value)
575 original_value_(tlb->Get()) {
576 scoped_tlb_->Set(new_value);
578 ~ThreadLocalBooleanAutoReset() {
579 scoped_tlb_->Set(original_value_);
583 ThreadLocalBoolean* scoped_tlb_;
584 bool original_value_;
586 DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanAutoReset);
589 base::LazyInstance<ThreadLocalBoolean>::Leaky
590 g_unchecked_malloc = LAZY_INSTANCE_INITIALIZER;
592 void CrMallocErrorBreak() {
593 g_original_malloc_error_break();
595 // Out of memory is certainly not heap corruption, and not necessarily
596 // something for which the process should be terminated. Leave that decision
597 // to the OOM killer. The EBADF case comes up because the malloc library
598 // attempts to log to ASL (syslog) before calling this code, which fails
599 // accessing a Unix-domain socket because of sandboxing.
600 if (errno == ENOMEM || (errno == EBADF && g_unchecked_malloc.Get().Get()))
603 // A unit test checks this error message, so it needs to be in release builds.
605 "Terminating process due to a potential for future heap corruption";
607 // Crash by writing to NULL+errno to allow analyzing errno from
608 // crash dump info (setting a breakpad key would re-enter the malloc
609 // library). Max documented errno in intro(2) is actually 102, but
610 // it really just needs to be "small" to stay on the right vm page.
611 const int kMaxErrno = 256;
612 char* volatile death_ptr = NULL;
613 death_ptr += std::min(errno, kMaxErrno);
619 void EnableTerminationOnHeapCorruption() {
620 #ifdef ADDRESS_SANITIZER
621 // Don't do anything special on heap corruption, because it should be handled
622 // by AddressSanitizer.
625 malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak();
626 if (!malloc_error_break) {
627 DLOG(WARNING) << "Could not find malloc_error_break";
631 mach_error_t err = mach_override_ptr(
632 (void*)malloc_error_break,
633 (void*)&CrMallocErrorBreak,
634 (void**)&g_original_malloc_error_break);
637 DLOG(WARNING) << "Could not override malloc_error_break; error = " << err;
640 // ------------------------------------------------------------------------
644 bool g_oom_killer_enabled;
646 // === C malloc/calloc/valloc/realloc/posix_memalign ===
648 typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
650 typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
653 typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
655 typedef void (*free_type)(struct _malloc_zone_t* zone,
657 typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
660 typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
664 malloc_type g_old_malloc;
665 calloc_type g_old_calloc;
666 valloc_type g_old_valloc;
667 free_type g_old_free;
668 realloc_type g_old_realloc;
669 memalign_type g_old_memalign;
671 malloc_type g_old_malloc_purgeable;
672 calloc_type g_old_calloc_purgeable;
673 valloc_type g_old_valloc_purgeable;
674 free_type g_old_free_purgeable;
675 realloc_type g_old_realloc_purgeable;
676 memalign_type g_old_memalign_purgeable;
678 void* oom_killer_malloc(struct _malloc_zone_t* zone,
680 ScopedClearErrno clear_errno;
681 void* result = g_old_malloc(zone, size);
683 debug::BreakDebugger();
687 void* oom_killer_calloc(struct _malloc_zone_t* zone,
690 ScopedClearErrno clear_errno;
691 void* result = g_old_calloc(zone, num_items, size);
692 if (!result && num_items && size)
693 debug::BreakDebugger();
697 void* oom_killer_valloc(struct _malloc_zone_t* zone,
699 ScopedClearErrno clear_errno;
700 void* result = g_old_valloc(zone, size);
702 debug::BreakDebugger();
706 void oom_killer_free(struct _malloc_zone_t* zone,
708 ScopedClearErrno clear_errno;
709 g_old_free(zone, ptr);
712 void* oom_killer_realloc(struct _malloc_zone_t* zone,
715 ScopedClearErrno clear_errno;
716 void* result = g_old_realloc(zone, ptr, size);
718 debug::BreakDebugger();
722 void* oom_killer_memalign(struct _malloc_zone_t* zone,
725 ScopedClearErrno clear_errno;
726 void* result = g_old_memalign(zone, alignment, size);
727 // Only die if posix_memalign would have returned ENOMEM, since there are
728 // other reasons why NULL might be returned (see
729 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
730 if (!result && size && alignment >= sizeof(void*)
731 && (alignment & (alignment - 1)) == 0) {
732 debug::BreakDebugger();
737 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
739 ScopedClearErrno clear_errno;
740 void* result = g_old_malloc_purgeable(zone, size);
742 debug::BreakDebugger();
746 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
749 ScopedClearErrno clear_errno;
750 void* result = g_old_calloc_purgeable(zone, num_items, size);
751 if (!result && num_items && size)
752 debug::BreakDebugger();
756 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
758 ScopedClearErrno clear_errno;
759 void* result = g_old_valloc_purgeable(zone, size);
761 debug::BreakDebugger();
765 void oom_killer_free_purgeable(struct _malloc_zone_t* zone,
767 ScopedClearErrno clear_errno;
768 g_old_free_purgeable(zone, ptr);
771 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
774 ScopedClearErrno clear_errno;
775 void* result = g_old_realloc_purgeable(zone, ptr, size);
777 debug::BreakDebugger();
781 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
784 ScopedClearErrno clear_errno;
785 void* result = g_old_memalign_purgeable(zone, alignment, size);
786 // Only die if posix_memalign would have returned ENOMEM, since there are
787 // other reasons why NULL might be returned (see
788 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
789 if (!result && size && alignment >= sizeof(void*)
790 && (alignment & (alignment - 1)) == 0) {
791 debug::BreakDebugger();
796 // === C++ operator new ===
798 void oom_killer_new() {
799 debug::BreakDebugger();
802 // === Core Foundation CFAllocators ===
804 bool CanGetContextForCFAllocator() {
806 IsOSDangerouslyLaterThanMountainLionForUseByCFAllocatorReplacement();
809 CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
810 if (base::mac::IsOSSnowLeopard()) {
811 ChromeCFAllocatorLeopards* our_allocator =
812 const_cast<ChromeCFAllocatorLeopards*>(
813 reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator));
814 return &our_allocator->_context;
815 } else if (base::mac::IsOSLion() || base::mac::IsOSMountainLion()) {
816 ChromeCFAllocatorLions* our_allocator =
817 const_cast<ChromeCFAllocatorLions*>(
818 reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
819 return &our_allocator->_context;
825 CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
826 CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
827 CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
829 void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
832 void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
834 debug::BreakDebugger();
838 void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
841 void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
843 debug::BreakDebugger();
847 void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
850 void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
852 debug::BreakDebugger();
856 // === Cocoa NSObject allocation ===
858 typedef id (*allocWithZone_t)(id, SEL, NSZone*);
859 allocWithZone_t g_old_allocWithZone;
861 id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
863 id result = g_old_allocWithZone(self, _cmd, zone);
865 debug::BreakDebugger();
871 void* UncheckedMalloc(size_t size) {
873 ScopedClearErrno clear_errno;
874 ThreadLocalBooleanAutoReset flag(g_unchecked_malloc.Pointer(), true);
875 return g_old_malloc(malloc_default_zone(), size);
880 void EnableTerminationOnOutOfMemory() {
881 if (g_oom_killer_enabled)
884 g_oom_killer_enabled = true;
886 // === C malloc/calloc/valloc/realloc/posix_memalign ===
888 // This approach is not perfect, as requests for amounts of memory larger than
889 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
890 // still fail with a NULL rather than dying (see
891 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
892 // Unfortunately, it's the best we can do. Also note that this does not affect
893 // allocations from non-default zones.
895 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
896 !g_old_memalign) << "Old allocators unexpectedly non-null";
898 CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
899 !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
900 !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
902 #if !defined(ADDRESS_SANITIZER)
903 // Don't do anything special on OOM for the malloc zones replaced by
904 // AddressSanitizer, as modifying or protecting them may not work correctly.
906 // See http://trac.webkit.org/changeset/53362/trunk/Tools/DumpRenderTree/mac
907 bool zone_allocators_protected = base::mac::IsOSLionOrLater();
909 ChromeMallocZone* default_zone =
910 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
911 ChromeMallocZone* purgeable_zone =
912 reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
914 vm_address_t page_start_default = 0;
915 vm_address_t page_start_purgeable = 0;
916 vm_size_t len_default = 0;
917 vm_size_t len_purgeable = 0;
918 if (zone_allocators_protected) {
919 page_start_default = reinterpret_cast<vm_address_t>(default_zone) &
920 static_cast<vm_size_t>(~(getpagesize() - 1));
921 len_default = reinterpret_cast<vm_address_t>(default_zone) -
922 page_start_default + sizeof(ChromeMallocZone);
923 mprotect(reinterpret_cast<void*>(page_start_default), len_default,
924 PROT_READ | PROT_WRITE);
926 if (purgeable_zone) {
927 page_start_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) &
928 static_cast<vm_size_t>(~(getpagesize() - 1));
929 len_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) -
930 page_start_purgeable + sizeof(ChromeMallocZone);
931 mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
932 PROT_READ | PROT_WRITE);
938 g_old_malloc = default_zone->malloc;
939 g_old_calloc = default_zone->calloc;
940 g_old_valloc = default_zone->valloc;
941 g_old_free = default_zone->free;
942 g_old_realloc = default_zone->realloc;
943 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free &&
945 << "Failed to get system allocation functions.";
947 default_zone->malloc = oom_killer_malloc;
948 default_zone->calloc = oom_killer_calloc;
949 default_zone->valloc = oom_killer_valloc;
950 default_zone->free = oom_killer_free;
951 default_zone->realloc = oom_killer_realloc;
953 if (default_zone->version >= 5) {
954 g_old_memalign = default_zone->memalign;
956 default_zone->memalign = oom_killer_memalign;
959 // Purgeable zone (if it exists)
961 if (purgeable_zone) {
962 g_old_malloc_purgeable = purgeable_zone->malloc;
963 g_old_calloc_purgeable = purgeable_zone->calloc;
964 g_old_valloc_purgeable = purgeable_zone->valloc;
965 g_old_free_purgeable = purgeable_zone->free;
966 g_old_realloc_purgeable = purgeable_zone->realloc;
967 CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
968 g_old_valloc_purgeable && g_old_free_purgeable &&
969 g_old_realloc_purgeable)
970 << "Failed to get system allocation functions.";
972 purgeable_zone->malloc = oom_killer_malloc_purgeable;
973 purgeable_zone->calloc = oom_killer_calloc_purgeable;
974 purgeable_zone->valloc = oom_killer_valloc_purgeable;
975 purgeable_zone->free = oom_killer_free_purgeable;
976 purgeable_zone->realloc = oom_killer_realloc_purgeable;
978 if (purgeable_zone->version >= 5) {
979 g_old_memalign_purgeable = purgeable_zone->memalign;
980 if (g_old_memalign_purgeable)
981 purgeable_zone->memalign = oom_killer_memalign_purgeable;
985 if (zone_allocators_protected) {
986 mprotect(reinterpret_cast<void*>(page_start_default), len_default,
988 if (purgeable_zone) {
989 mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
995 // === C malloc_zone_batch_malloc ===
997 // batch_malloc is omitted because the default malloc zone's implementation
998 // only supports batch_malloc for "tiny" allocations from the free list. It
999 // will fail for allocations larger than "tiny", and will only allocate as
1000 // many blocks as it's able to from the free list. These factors mean that it
1001 // can return less than the requested memory even in a non-out-of-memory
1002 // situation. There's no good way to detect whether a batch_malloc failure is
1003 // due to these other factors, or due to genuine memory or address space
1004 // exhaustion. The fact that it only allocates space from the "tiny" free list
1005 // means that it's likely that a failure will not be due to memory exhaustion.
1006 // Similarly, these constraints on batch_malloc mean that callers must always
1007 // be expecting to receive less memory than was requested, even in situations
1008 // where memory pressure is not a concern. Finally, the only public interface
1009 // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
1010 // system's malloc implementation. It's unlikely that anyone's even heard of
1013 // === C++ operator new ===
1015 // Yes, operator new does call through to malloc, but this will catch failures
1016 // that our imperfect handling of malloc cannot.
1018 std::set_new_handler(oom_killer_new);
1020 #ifndef ADDRESS_SANITIZER
1021 // === Core Foundation CFAllocators ===
1023 // This will not catch allocation done by custom allocators, but will catch
1024 // all allocation done by system-provided ones.
1026 CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
1027 !g_old_cfallocator_malloc_zone)
1028 << "Old allocators unexpectedly non-null";
1030 bool cf_allocator_internals_known = CanGetContextForCFAllocator();
1032 if (cf_allocator_internals_known) {
1033 CFAllocatorContext* context =
1034 ContextForCFAllocator(kCFAllocatorSystemDefault);
1035 CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
1036 g_old_cfallocator_system_default = context->allocate;
1037 CHECK(g_old_cfallocator_system_default)
1038 << "Failed to get kCFAllocatorSystemDefault allocation function.";
1039 context->allocate = oom_killer_cfallocator_system_default;
1041 context = ContextForCFAllocator(kCFAllocatorMalloc);
1042 CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
1043 g_old_cfallocator_malloc = context->allocate;
1044 CHECK(g_old_cfallocator_malloc)
1045 << "Failed to get kCFAllocatorMalloc allocation function.";
1046 context->allocate = oom_killer_cfallocator_malloc;
1048 context = ContextForCFAllocator(kCFAllocatorMallocZone);
1049 CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
1050 g_old_cfallocator_malloc_zone = context->allocate;
1051 CHECK(g_old_cfallocator_malloc_zone)
1052 << "Failed to get kCFAllocatorMallocZone allocation function.";
1053 context->allocate = oom_killer_cfallocator_malloc_zone;
1055 NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
1056 "CFAllocator will not result in termination. http://crbug.com/45650");
1060 // === Cocoa NSObject allocation ===
1062 // Note that both +[NSObject new] and +[NSObject alloc] call through to
1063 // +[NSObject allocWithZone:].
1065 CHECK(!g_old_allocWithZone)
1066 << "Old allocator unexpectedly non-null";
1068 Class nsobject_class = [NSObject class];
1069 Method orig_method = class_getClassMethod(nsobject_class,
1070 @selector(allocWithZone:));
1071 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
1072 method_getImplementation(orig_method));
1073 CHECK(g_old_allocWithZone)
1074 << "Failed to get allocWithZone allocation function.";
1075 method_setImplementation(orig_method,
1076 reinterpret_cast<IMP>(oom_killer_allocWithZone));
1079 ProcessId GetParentProcessId(ProcessHandle process) {
1080 struct kinfo_proc info;
1081 size_t length = sizeof(struct kinfo_proc);
1082 int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process };
1083 if (sysctl(mib, 4, &info, &length, NULL, 0) < 0) {
1084 DPLOG(ERROR) << "sysctl";
1089 return info.kp_eproc.e_ppid;
1094 const int kWaitBeforeKillSeconds = 2;
1096 // Reap |child| process. This call blocks until completion.
1097 void BlockingReap(pid_t child) {
1098 const pid_t result = HANDLE_EINTR(waitpid(child, NULL, 0));
1100 DPLOG(ERROR) << "waitpid(" << child << ", NULL, 0)";
1104 // Waits for |timeout| seconds for the given |child| to exit and reap it. If
1105 // the child doesn't exit within the time specified, kills it.
1107 // This function takes two approaches: first, it tries to use kqueue to
1108 // observe when the process exits. kevent can monitor a kqueue with a
1109 // timeout, so this method is preferred to wait for a specified period of
1110 // time. Once the kqueue indicates the process has exited, waitpid will reap
1111 // the exited child. If the kqueue doesn't provide an exit event notification,
1112 // before the timeout expires, or if the kqueue fails or misbehaves, the
1113 // process will be mercilessly killed and reaped.
1115 // A child process passed to this function may be in one of several states:
1116 // running, terminated and not yet reaped, and (apparently, and unfortunately)
1117 // terminated and already reaped. Normally, a process will at least have been
1118 // asked to exit before this function is called, but this is not required.
1119 // If a process is terminating and unreaped, there may be a window between the
1120 // time that kqueue will no longer recognize it and when it becomes an actual
1121 // zombie that a non-blocking (WNOHANG) waitpid can reap. This condition is
1122 // detected when kqueue indicates that the process is not running and a
1123 // non-blocking waitpid fails to reap the process but indicates that it is
1124 // still running. In this event, a blocking attempt to reap the process
1125 // collects the known-dying child, preventing zombies from congregating.
1127 // In the event that the kqueue misbehaves entirely, as it might under a
1128 // EMFILE condition ("too many open files", or out of file descriptors), this
1129 // function will forcibly kill and reap the child without delay. This
1130 // eliminates another potential zombie vector. (If you're out of file
1131 // descriptors, you're probably deep into something else, but that doesn't
1132 // mean that zombies be allowed to kick you while you're down.)
1134 // The fact that this function seemingly can be called to wait on a child
1135 // that's not only already terminated but already reaped is a bit of a
1136 // problem: a reaped child's pid can be reclaimed and may refer to a distinct
1137 // process in that case. The fact that this function can seemingly be called
1138 // to wait on a process that's not even a child is also a problem: kqueue will
1139 // work in that case, but waitpid won't, and killing a non-child might not be
1140 // the best approach.
1141 void WaitForChildToDie(pid_t child, int timeout) {
1143 DCHECK(timeout > 0);
1145 // DON'T ADD ANY EARLY RETURNS TO THIS FUNCTION without ensuring that
1146 // |child| has been reaped. Specifically, even if a kqueue, kevent, or other
1147 // call fails, this function should fall back to the last resort of trying
1148 // to kill and reap the process. Not observing this rule will resurrect
1153 int kq = HANDLE_EINTR(kqueue());
1155 DPLOG(ERROR) << "kqueue()";
1157 file_util::ScopedFD auto_close_kq(&kq);
1159 struct kevent change = {0};
1160 EV_SET(&change, child, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL);
1161 result = HANDLE_EINTR(kevent(kq, &change, 1, NULL, 0, NULL));
1164 if (errno != ESRCH) {
1165 DPLOG(ERROR) << "kevent (setup " << child << ")";
1167 // At this point, one of the following has occurred:
1168 // 1. The process has died but has not yet been reaped.
1169 // 2. The process has died and has already been reaped.
1170 // 3. The process is in the process of dying. It's no longer
1171 // kqueueable, but it may not be waitable yet either. Mark calls
1172 // this case the "zombie death race".
1174 result = HANDLE_EINTR(waitpid(child, NULL, WNOHANG));
1177 // A positive result indicates case 1. waitpid succeeded and reaped
1178 // the child. A result of -1 indicates case 2. The child has already
1179 // been reaped. In both of these cases, no further action is
1184 // |result| is 0, indicating case 3. The process will be waitable in
1185 // short order. Fall back out of the kqueue code to kill it (for good
1186 // measure) and reap it.
1189 // Keep track of the elapsed time to be able to restart kevent if it's
1191 TimeDelta remaining_delta = TimeDelta::FromSeconds(timeout);
1192 Time deadline = Time::Now() + remaining_delta;
1194 struct kevent event = {0};
1195 while (remaining_delta.InMilliseconds() > 0) {
1196 const struct timespec remaining_timespec = remaining_delta.ToTimeSpec();
1197 result = kevent(kq, NULL, 0, &event, 1, &remaining_timespec);
1198 if (result == -1 && errno == EINTR) {
1199 remaining_delta = deadline - Time::Now();
1207 DPLOG(ERROR) << "kevent (wait " << child << ")";
1208 } else if (result > 1) {
1209 DLOG(ERROR) << "kevent (wait " << child << "): unexpected result "
1211 } else if (result == 1) {
1212 if ((event.fflags & NOTE_EXIT) &&
1213 (event.ident == static_cast<uintptr_t>(child))) {
1214 // The process is dead or dying. This won't block for long, if at
1216 BlockingReap(child);
1219 DLOG(ERROR) << "kevent (wait " << child
1220 << "): unexpected event: fflags=" << event.fflags
1221 << ", ident=" << event.ident;
1227 // The child is still alive, or is very freshly dead. Be sure by sending it
1228 // a signal. This is safe even if it's freshly dead, because it will be a
1229 // zombie (or on the way to zombiedom) and kill will return 0 even if the
1230 // signal is not delivered to a live process.
1231 result = kill(child, SIGKILL);
1233 DPLOG(ERROR) << "kill(" << child << ", SIGKILL)";
1235 // The child is definitely on the way out now. BlockingReap won't need to
1236 // wait for long, if at all.
1237 BlockingReap(child);
1243 void EnsureProcessTerminated(ProcessHandle process) {
1244 WaitForChildToDie(process, kWaitBeforeKillSeconds);