Bug 1910362 - Create new Nimbus helper r=aaronmt,ohorvath
[gecko.git] / xpcom / base / nsMemoryReporterManager.cpp
blobc9e2fed09e056787db557dcf8053a757452de49f
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "nsMemoryReporterManager.h"
9 #include "nsAtomTable.h"
10 #include "nsCOMPtr.h"
11 #include "nsCOMArray.h"
12 #include "nsPrintfCString.h"
13 #include "nsProxyRelease.h"
14 #include "nsServiceManagerUtils.h"
15 #include "nsITimer.h"
16 #include "nsThreadManager.h"
17 #include "nsThreadUtils.h"
18 #include "nsPIDOMWindow.h"
19 #include "nsIObserverService.h"
20 #include "nsIOService.h"
21 #include "nsIGlobalObject.h"
22 #include "nsIXPConnect.h"
23 #ifdef MOZ_GECKO_PROFILER
24 # include "GeckoProfilerReporter.h"
25 #endif
26 #if defined(XP_UNIX) || defined(MOZ_DMD)
27 # include "nsMemoryInfoDumper.h"
28 #endif
29 #include "nsNetCID.h"
30 #include "nsThread.h"
31 #include "VRProcessManager.h"
32 #include "mozilla/Attributes.h"
33 #include "mozilla/MemoryReportingProcess.h"
34 #include "mozilla/PodOperations.h"
35 #include "mozilla/Preferences.h"
36 #include "mozilla/RDDProcessManager.h"
37 #include "mozilla/ResultExtensions.h"
38 #include "mozilla/Services.h"
39 #include "mozilla/Telemetry.h"
40 #include "mozilla/UniquePtrExtensions.h"
41 #include "mozilla/dom/MemoryReportTypes.h"
42 #include "mozilla/dom/ContentParent.h"
43 #include "mozilla/gfx/GPUProcessManager.h"
44 #include "mozilla/ipc/UtilityProcessManager.h"
45 #include "mozilla/ipc/FileDescriptorUtils.h"
46 #ifdef MOZ_PHC
47 # include "PHC.h"
48 #endif
50 #ifdef MOZ_WIDGET_ANDROID
51 # include "mozilla/java/GeckoAppShellWrappers.h"
52 # include "mozilla/jni/Utils.h"
53 #endif
55 #ifdef XP_WIN
56 # include "mozilla/MemoryInfo.h"
58 # include <process.h>
59 # ifndef getpid
60 # define getpid _getpid
61 # endif
62 #else
63 # include <unistd.h>
64 #endif
66 using namespace mozilla;
67 using namespace mozilla::ipc;
68 using namespace dom;
70 #if defined(XP_LINUX)
72 # include "mozilla/MemoryMapping.h"
74 # include <malloc.h>
75 # include <string.h>
76 # include <stdlib.h>
78 [[nodiscard]] static nsresult GetProcSelfStatmField(int aField, int64_t* aN) {
79 // There are more than two fields, but we're only interested in the first
80 // two.
81 static const int MAX_FIELD = 2;
82 size_t fields[MAX_FIELD];
83 MOZ_ASSERT(aField < MAX_FIELD, "bad field number");
84 FILE* f = fopen("/proc/self/statm", "r");
85 if (f) {
86 int nread = fscanf(f, "%zu %zu", &fields[0], &fields[1]);
87 fclose(f);
88 if (nread == MAX_FIELD) {
89 *aN = fields[aField] * getpagesize();
90 return NS_OK;
93 return NS_ERROR_FAILURE;
96 [[nodiscard]] static nsresult GetProcSelfSmapsPrivate(int64_t* aN, pid_t aPid) {
97 // You might be tempted to calculate USS by subtracting the "shared" value
98 // from the "resident" value in /proc/<pid>/statm. But at least on Linux,
99 // statm's "shared" value actually counts pages backed by files, which has
100 // little to do with whether the pages are actually shared. /proc/self/smaps
101 // on the other hand appears to give us the correct information.
103 nsTArray<MemoryMapping> mappings(1024);
104 MOZ_TRY(GetMemoryMappings(mappings, aPid));
106 int64_t amount = 0;
107 for (auto& mapping : mappings) {
108 amount += mapping.Private_Clean();
109 amount += mapping.Private_Dirty();
111 *aN = amount;
112 return NS_OK;
115 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
116 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
117 return GetProcSelfStatmField(0, aN);
120 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
121 return GetProcSelfStatmField(1, aN);
124 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
125 return ResidentDistinguishedAmount(aN);
128 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
129 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
130 int64_t* aN, pid_t aPid = 0) {
131 return GetProcSelfSmapsPrivate(aN, aPid);
134 # ifdef HAVE_MALLINFO
135 # define HAVE_SYSTEM_HEAP_REPORTER 1
136 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
137 struct mallinfo info = mallinfo();
139 // The documentation in the glibc man page makes it sound like |uordblks|
140 // would suffice, but that only gets the small allocations that are put in
141 // the brk heap. We need |hblkhd| as well to get the larger allocations
142 // that are mmapped.
144 // The fields in |struct mallinfo| are all |int|, <sigh>, so it is
145 // unreliable if memory usage gets high. However, the system heap size on
146 // Linux should usually be zero (so long as jemalloc is enabled) so that
147 // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
148 // adding them to provide a small amount of extra overflow protection.
149 *aSizeOut = size_t(info.hblkhd) + size_t(info.uordblks);
150 return NS_OK;
152 # endif
154 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
155 defined(__OpenBSD__) || defined(__FreeBSD_kernel__)
157 # include <sys/param.h>
158 # include <sys/sysctl.h>
159 # if defined(__DragonFly__) || defined(__FreeBSD__) || \
160 defined(__FreeBSD_kernel__)
161 # include <sys/user.h>
162 # endif
164 # include <unistd.h>
166 # if defined(__NetBSD__)
167 # undef KERN_PROC
168 # define KERN_PROC KERN_PROC2
169 # define KINFO_PROC struct kinfo_proc2
170 # else
171 # define KINFO_PROC struct kinfo_proc
172 # endif
174 # if defined(__DragonFly__)
175 # define KP_SIZE(kp) (kp.kp_vm_map_size)
176 # define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
177 # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
178 # define KP_SIZE(kp) (kp.ki_size)
179 # define KP_RSS(kp) (kp.ki_rssize * getpagesize())
180 # elif defined(__NetBSD__)
181 # define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
182 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
183 # elif defined(__OpenBSD__)
184 # define KP_SIZE(kp) \
185 ((kp.p_vm_dsize + kp.p_vm_ssize + kp.p_vm_tsize) * getpagesize())
186 # define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
187 # endif
189 [[nodiscard]] static nsresult GetKinfoProcSelf(KINFO_PROC* aProc) {
190 # if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
191 static LazyLogModule sPledgeLog("SandboxPledge");
192 MOZ_LOG(sPledgeLog, LogLevel::Debug,
193 ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__));
194 return NS_ERROR_FAILURE;
195 # endif
196 int mib[] = {
197 // clang-format off
198 CTL_KERN,
199 KERN_PROC,
200 KERN_PROC_PID,
201 getpid(),
202 # if defined(__NetBSD__) || defined(__OpenBSD__)
203 sizeof(KINFO_PROC),
205 # endif
206 // clang-format on
208 u_int miblen = sizeof(mib) / sizeof(mib[0]);
209 size_t size = sizeof(KINFO_PROC);
210 if (sysctl(mib, miblen, aProc, &size, nullptr, 0)) {
211 return NS_ERROR_FAILURE;
213 return NS_OK;
216 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
217 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
218 KINFO_PROC proc;
219 nsresult rv = GetKinfoProcSelf(&proc);
220 if (NS_SUCCEEDED(rv)) {
221 *aN = KP_SIZE(proc);
223 return rv;
226 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
227 KINFO_PROC proc;
228 nsresult rv = GetKinfoProcSelf(&proc);
229 if (NS_SUCCEEDED(rv)) {
230 *aN = KP_RSS(proc);
232 return rv;
235 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
236 return ResidentDistinguishedAmount(aN);
239 # ifdef __FreeBSD__
240 # include <libutil.h>
241 # include <algorithm>
243 [[nodiscard]] static nsresult GetKinfoVmentrySelf(int64_t* aPrss,
244 uint64_t* aMaxreg) {
245 int cnt;
246 struct kinfo_vmentry* vmmap;
247 struct kinfo_vmentry* kve;
248 if (!(vmmap = kinfo_getvmmap(getpid(), &cnt))) {
249 return NS_ERROR_FAILURE;
251 if (aPrss) {
252 *aPrss = 0;
254 if (aMaxreg) {
255 *aMaxreg = 0;
258 for (int i = 0; i < cnt; i++) {
259 kve = &vmmap[i];
260 if (aPrss) {
261 *aPrss += kve->kve_private_resident;
263 if (aMaxreg) {
264 *aMaxreg = std::max(*aMaxreg, kve->kve_end - kve->kve_start);
268 free(vmmap);
269 return NS_OK;
272 # define HAVE_PRIVATE_REPORTER 1
273 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
274 int64_t priv;
275 nsresult rv = GetKinfoVmentrySelf(&priv, nullptr);
276 NS_ENSURE_SUCCESS(rv, rv);
277 *aN = priv * getpagesize();
278 return NS_OK;
281 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
282 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
283 int64_t* aN) {
284 uint64_t biggestRegion;
285 nsresult rv = GetKinfoVmentrySelf(nullptr, &biggestRegion);
286 if (NS_SUCCEEDED(rv)) {
287 *aN = biggestRegion;
289 return NS_OK;
291 # endif // FreeBSD
293 #elif defined(SOLARIS)
295 # include <procfs.h>
296 # include <fcntl.h>
297 # include <unistd.h>
299 static void XMappingIter(int64_t& aVsize, int64_t& aResident,
300 int64_t& aShared) {
301 aVsize = -1;
302 aResident = -1;
303 aShared = -1;
304 int mapfd = open("/proc/self/xmap", O_RDONLY);
305 struct stat st;
306 prxmap_t* prmapp = nullptr;
307 if (mapfd >= 0) {
308 if (!fstat(mapfd, &st)) {
309 int nmap = st.st_size / sizeof(prxmap_t);
310 while (1) {
311 // stat(2) on /proc/<pid>/xmap returns an incorrect value,
312 // prior to the release of Solaris 11.
313 // Here is a workaround for it.
314 nmap *= 2;
315 prmapp = (prxmap_t*)malloc((nmap + 1) * sizeof(prxmap_t));
316 if (!prmapp) {
317 // out of memory
318 break;
320 int n = pread(mapfd, prmapp, (nmap + 1) * sizeof(prxmap_t), 0);
321 if (n < 0) {
322 break;
324 if (nmap >= n / sizeof(prxmap_t)) {
325 aVsize = 0;
326 aResident = 0;
327 aShared = 0;
328 for (int i = 0; i < n / sizeof(prxmap_t); i++) {
329 aVsize += prmapp[i].pr_size;
330 aResident += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
331 if (prmapp[i].pr_mflags & MA_SHARED) {
332 aShared += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
335 break;
337 free(prmapp);
339 free(prmapp);
341 close(mapfd);
345 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
346 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
347 int64_t vsize, resident, shared;
348 XMappingIter(vsize, resident, shared);
349 if (vsize == -1) {
350 return NS_ERROR_FAILURE;
352 *aN = vsize;
353 return NS_OK;
356 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
357 int64_t vsize, resident, shared;
358 XMappingIter(vsize, resident, shared);
359 if (resident == -1) {
360 return NS_ERROR_FAILURE;
362 *aN = resident;
363 return NS_OK;
366 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
367 return ResidentDistinguishedAmount(aN);
370 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
371 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(int64_t* aN) {
372 int64_t vsize, resident, shared;
373 XMappingIter(vsize, resident, shared);
374 if (resident == -1) {
375 return NS_ERROR_FAILURE;
377 *aN = resident - shared;
378 return NS_OK;
381 #elif defined(XP_MACOSX)
383 # include <mach/mach_init.h>
384 # include <mach/mach_vm.h>
385 # include <mach/shared_region.h>
386 # include <mach/task.h>
387 # include <sys/sysctl.h>
389 [[nodiscard]] static bool GetTaskBasicInfo(struct task_basic_info* aTi) {
390 mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
391 kern_return_t kr =
392 task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)aTi, &count);
393 return kr == KERN_SUCCESS;
396 // The VSIZE figure on Mac includes huge amounts of shared memory and is always
397 // absurdly high, eg. 2GB+ even at start-up. But both 'top' and 'ps' report
398 // it, so we might as well too.
399 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
400 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
401 task_basic_info ti;
402 if (!GetTaskBasicInfo(&ti)) {
403 return NS_ERROR_FAILURE;
405 *aN = ti.virtual_size;
406 return NS_OK;
409 // If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
410 // pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
411 // an accurate result. The OS will take away MADV_FREE'd pages when there's
412 // memory pressure, so ideally, they shouldn't count against our RSS.
414 // Purging these pages can take a long time for some users (see bug 789975),
415 // so we provide the option to get the RSS without purging first.
416 [[nodiscard]] static nsresult ResidentDistinguishedAmountHelper(int64_t* aN,
417 bool aDoPurge) {
418 # ifdef HAVE_JEMALLOC_STATS
419 if (aDoPurge) {
420 Telemetry::AutoTimer<Telemetry::MEMORY_FREE_PURGED_PAGES_MS> timer;
421 jemalloc_purge_freed_pages();
423 # endif
425 task_basic_info ti;
426 if (!GetTaskBasicInfo(&ti)) {
427 return NS_ERROR_FAILURE;
429 *aN = ti.resident_size;
430 return NS_OK;
433 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
434 return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ false);
437 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
438 return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ true);
441 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
443 static bool InSharedRegion(mach_vm_address_t aAddr, cpu_type_t aType) {
444 mach_vm_address_t base;
445 mach_vm_address_t size;
447 switch (aType) {
448 case CPU_TYPE_ARM:
449 base = SHARED_REGION_BASE_ARM;
450 size = SHARED_REGION_SIZE_ARM;
451 break;
452 case CPU_TYPE_ARM64:
453 base = SHARED_REGION_BASE_ARM64;
454 size = SHARED_REGION_SIZE_ARM64;
455 break;
456 case CPU_TYPE_I386:
457 base = SHARED_REGION_BASE_I386;
458 size = SHARED_REGION_SIZE_I386;
459 break;
460 case CPU_TYPE_X86_64:
461 base = SHARED_REGION_BASE_X86_64;
462 size = SHARED_REGION_SIZE_X86_64;
463 break;
464 default:
465 return false;
468 return base <= aAddr && aAddr < (base + size);
471 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
472 int64_t* aN, mach_port_t aPort = 0) {
473 if (!aN) {
474 return NS_ERROR_FAILURE;
477 cpu_type_t cpu_type;
478 size_t len = sizeof(cpu_type);
479 if (sysctlbyname("sysctl.proc_cputype", &cpu_type, &len, NULL, 0) != 0) {
480 return NS_ERROR_FAILURE;
483 // Roughly based on libtop_update_vm_regions in
484 // http://www.opensource.apple.com/source/top/top-100.1.2/libtop.c
485 size_t privatePages = 0;
486 mach_vm_size_t topSize = 0;
487 for (mach_vm_address_t addr = MACH_VM_MIN_ADDRESS;; addr += topSize) {
488 vm_region_top_info_data_t topInfo;
489 mach_msg_type_number_t topInfoCount = VM_REGION_TOP_INFO_COUNT;
490 mach_port_t topObjectName;
492 kern_return_t kr = mach_vm_region(
493 aPort ? aPort : mach_task_self(), &addr, &topSize, VM_REGION_TOP_INFO,
494 reinterpret_cast<vm_region_info_t>(&topInfo), &topInfoCount,
495 &topObjectName);
496 if (kr == KERN_INVALID_ADDRESS) {
497 // Done iterating VM regions.
498 break;
499 } else if (kr != KERN_SUCCESS) {
500 return NS_ERROR_FAILURE;
503 if (InSharedRegion(addr, cpu_type) && topInfo.share_mode != SM_PRIVATE) {
504 continue;
507 switch (topInfo.share_mode) {
508 case SM_LARGE_PAGE:
509 // NB: Large pages are not shareable and always resident.
510 case SM_PRIVATE:
511 privatePages += topInfo.private_pages_resident;
512 privatePages += topInfo.shared_pages_resident;
513 break;
514 case SM_COW:
515 privatePages += topInfo.private_pages_resident;
516 if (topInfo.ref_count == 1) {
517 // Treat copy-on-write pages as private if they only have one
518 // reference.
519 privatePages += topInfo.shared_pages_resident;
521 break;
522 case SM_SHARED: {
523 // Using mprotect() or similar to protect a page in the middle of a
524 // mapping can create aliased mappings. They look like shared mappings
525 // to the VM_REGION_TOP_INFO interface, so re-check with
526 // VM_REGION_EXTENDED_INFO.
528 mach_vm_size_t exSize = 0;
529 vm_region_extended_info_data_t exInfo;
530 mach_msg_type_number_t exInfoCount = VM_REGION_EXTENDED_INFO_COUNT;
531 mach_port_t exObjectName;
532 kr = mach_vm_region(aPort ? aPort : mach_task_self(), &addr, &exSize,
533 VM_REGION_EXTENDED_INFO,
534 reinterpret_cast<vm_region_info_t>(&exInfo),
535 &exInfoCount, &exObjectName);
536 if (kr == KERN_INVALID_ADDRESS) {
537 // Done iterating VM regions.
538 break;
539 } else if (kr != KERN_SUCCESS) {
540 return NS_ERROR_FAILURE;
543 if (exInfo.share_mode == SM_PRIVATE_ALIASED) {
544 privatePages += exInfo.pages_resident;
546 break;
548 default:
549 break;
553 vm_size_t pageSize;
554 if (host_page_size(aPort ? aPort : mach_task_self(), &pageSize) !=
555 KERN_SUCCESS) {
556 pageSize = PAGE_SIZE;
559 *aN = privatePages * pageSize;
560 return NS_OK;
563 [[nodiscard]] static nsresult PhysicalFootprintAmount(int64_t* aN,
564 mach_port_t aPort = 0) {
565 MOZ_ASSERT(aN);
567 // The phys_footprint value (introduced in 10.11) of the TASK_VM_INFO data
568 // matches the value in the 'Memory' column of the Activity Monitor.
569 task_vm_info_data_t task_vm_info;
570 mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
571 kern_return_t kr = task_info(aPort ? aPort : mach_task_self(), TASK_VM_INFO,
572 (task_info_t)&task_vm_info, &count);
573 if (kr != KERN_SUCCESS) {
574 return NS_ERROR_FAILURE;
577 *aN = task_vm_info.phys_footprint;
578 return NS_OK;
581 #elif defined(XP_WIN)
583 # include <windows.h>
584 # include <psapi.h>
585 # include <algorithm>
587 # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
588 [[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
589 MEMORYSTATUSEX s;
590 s.dwLength = sizeof(s);
592 if (!GlobalMemoryStatusEx(&s)) {
593 return NS_ERROR_FAILURE;
596 *aN = s.ullTotalVirtual - s.ullAvailVirtual;
597 return NS_OK;
600 [[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
601 PROCESS_MEMORY_COUNTERS pmc;
602 pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS);
604 if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
605 return NS_ERROR_FAILURE;
608 *aN = pmc.WorkingSetSize;
609 return NS_OK;
612 [[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
613 return ResidentDistinguishedAmount(aN);
616 # define HAVE_RESIDENT_UNIQUE_REPORTER 1
618 [[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
619 int64_t* aN, HANDLE aProcess = nullptr) {
620 // Determine how many entries we need.
621 PSAPI_WORKING_SET_INFORMATION tmp;
622 DWORD tmpSize = sizeof(tmp);
623 memset(&tmp, 0, tmpSize);
625 HANDLE proc = aProcess ? aProcess : GetCurrentProcess();
626 QueryWorkingSet(proc, &tmp, tmpSize);
628 // Fudge the size in case new entries are added between calls.
629 size_t entries = tmp.NumberOfEntries * 2;
631 if (!entries) {
632 return NS_ERROR_FAILURE;
635 DWORD infoArraySize = tmpSize + (entries * sizeof(PSAPI_WORKING_SET_BLOCK));
636 UniqueFreePtr<PSAPI_WORKING_SET_INFORMATION> infoArray(
637 static_cast<PSAPI_WORKING_SET_INFORMATION*>(malloc(infoArraySize)));
639 if (!infoArray) {
640 return NS_ERROR_FAILURE;
643 if (!QueryWorkingSet(proc, infoArray.get(), infoArraySize)) {
644 return NS_ERROR_FAILURE;
647 entries = static_cast<size_t>(infoArray->NumberOfEntries);
648 size_t privatePages = 0;
649 for (size_t i = 0; i < entries; i++) {
650 // Count shared pages that only one process is using as private.
651 if (!infoArray->WorkingSetInfo[i].Shared ||
652 infoArray->WorkingSetInfo[i].ShareCount <= 1) {
653 privatePages++;
657 SYSTEM_INFO si;
658 GetSystemInfo(&si);
660 *aN = privatePages * si.dwPageSize;
661 return NS_OK;
664 # define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
665 [[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
666 int64_t* aN) {
667 SIZE_T biggestRegion = 0;
668 MEMORY_BASIC_INFORMATION vmemInfo = {0};
669 for (size_t currentAddress = 0;;) {
670 if (!VirtualQuery((LPCVOID)currentAddress, &vmemInfo, sizeof(vmemInfo))) {
671 // Something went wrong, just return whatever we've got already.
672 break;
675 if (vmemInfo.State == MEM_FREE) {
676 biggestRegion = std::max(biggestRegion, vmemInfo.RegionSize);
679 SIZE_T lastAddress = currentAddress;
680 currentAddress += vmemInfo.RegionSize;
682 // If we overflow, we've examined all of the address space.
683 if (currentAddress < lastAddress) {
684 break;
688 *aN = biggestRegion;
689 return NS_OK;
692 # define HAVE_PRIVATE_REPORTER 1
693 [[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
694 PROCESS_MEMORY_COUNTERS_EX pmcex;
695 pmcex.cb = sizeof(PROCESS_MEMORY_COUNTERS_EX);
697 if (!GetProcessMemoryInfo(GetCurrentProcess(),
698 (PPROCESS_MEMORY_COUNTERS)&pmcex, sizeof(pmcex))) {
699 return NS_ERROR_FAILURE;
702 *aN = pmcex.PrivateUsage;
703 return NS_OK;
706 # define HAVE_SYSTEM_HEAP_REPORTER 1
707 // Windows can have multiple separate heaps, but we should not touch non-default
708 // heaps because they may be destroyed at anytime while we hold a handle. So we
709 // count only the default heap.
710 [[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
711 HANDLE heap = GetProcessHeap();
713 NS_ENSURE_TRUE(HeapLock(heap), NS_ERROR_FAILURE);
715 int64_t heapSize = 0;
716 PROCESS_HEAP_ENTRY entry;
717 entry.lpData = nullptr;
718 while (HeapWalk(heap, &entry)) {
719 // We don't count entry.cbOverhead, because we just want to measure the
720 // space available to the program.
721 if (entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) {
722 heapSize += entry.cbData;
726 // Check this result only after unlocking the heap, so that we don't leave
727 // the heap locked if there was an error.
728 DWORD lastError = GetLastError();
730 // I have no idea how things would proceed if unlocking this heap failed...
731 NS_ENSURE_TRUE(HeapUnlock(heap), NS_ERROR_FAILURE);
733 NS_ENSURE_TRUE(lastError == ERROR_NO_MORE_ITEMS, NS_ERROR_FAILURE);
735 *aSizeOut = heapSize;
736 return NS_OK;
739 struct SegmentKind {
740 DWORD mState;
741 DWORD mType;
742 DWORD mProtect;
743 int mIsStack;
746 struct SegmentEntry : public PLDHashEntryHdr {
747 static PLDHashNumber HashKey(const void* aKey) {
748 auto kind = static_cast<const SegmentKind*>(aKey);
749 return mozilla::HashGeneric(kind->mState, kind->mType, kind->mProtect,
750 kind->mIsStack);
753 static bool MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey) {
754 auto kind = static_cast<const SegmentKind*>(aKey);
755 auto entry = static_cast<const SegmentEntry*>(aEntry);
756 return kind->mState == entry->mKind.mState &&
757 kind->mType == entry->mKind.mType &&
758 kind->mProtect == entry->mKind.mProtect &&
759 kind->mIsStack == entry->mKind.mIsStack;
762 static void InitEntry(PLDHashEntryHdr* aEntry, const void* aKey) {
763 auto kind = static_cast<const SegmentKind*>(aKey);
764 auto entry = static_cast<SegmentEntry*>(aEntry);
765 entry->mKind = *kind;
766 entry->mCount = 0;
767 entry->mSize = 0;
770 static const PLDHashTableOps Ops;
772 SegmentKind mKind; // The segment kind.
773 uint32_t mCount; // The number of segments of this kind.
774 size_t mSize; // The combined size of segments of this kind.
777 /* static */ const PLDHashTableOps SegmentEntry::Ops = {
778 SegmentEntry::HashKey, SegmentEntry::MatchEntry,
779 PLDHashTable::MoveEntryStub, PLDHashTable::ClearEntryStub,
780 SegmentEntry::InitEntry};
782 class WindowsAddressSpaceReporter final : public nsIMemoryReporter {
783 ~WindowsAddressSpaceReporter() {}
785 public:
786 NS_DECL_ISUPPORTS
788 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
789 nsISupports* aData, bool aAnonymize) override {
790 // First iterate over all the segments and record how many of each kind
791 // there were and their aggregate sizes. We use a hash table for this
792 // because there are a couple of dozen different kinds possible.
794 PLDHashTable table(&SegmentEntry::Ops, sizeof(SegmentEntry));
795 MEMORY_BASIC_INFORMATION info = {0};
796 bool isPrevSegStackGuard = false;
797 for (size_t currentAddress = 0;;) {
798 if (!VirtualQuery((LPCVOID)currentAddress, &info, sizeof(info))) {
799 // Something went wrong, just return whatever we've got already.
800 break;
803 size_t size = info.RegionSize;
805 // Note that |type| and |protect| are ignored in some cases.
806 DWORD state = info.State;
807 DWORD type =
808 (state == MEM_RESERVE || state == MEM_COMMIT) ? info.Type : 0;
809 DWORD protect = (state == MEM_COMMIT) ? info.Protect : 0;
810 bool isStack = isPrevSegStackGuard && state == MEM_COMMIT &&
811 type == MEM_PRIVATE && protect == PAGE_READWRITE;
813 SegmentKind kind = {state, type, protect, isStack ? 1 : 0};
814 auto entry =
815 static_cast<SegmentEntry*>(table.Add(&kind, mozilla::fallible));
816 if (entry) {
817 entry->mCount += 1;
818 entry->mSize += size;
821 isPrevSegStackGuard = info.State == MEM_COMMIT &&
822 info.Type == MEM_PRIVATE &&
823 info.Protect == (PAGE_READWRITE | PAGE_GUARD);
825 size_t lastAddress = currentAddress;
826 currentAddress += size;
828 // If we overflow, we've examined all of the address space.
829 if (currentAddress < lastAddress) {
830 break;
834 // Then iterate over the hash table and report the details for each segment
835 // kind.
837 for (auto iter = table.Iter(); !iter.Done(); iter.Next()) {
838 // For each range of pages, we consider one or more of its State, Type
839 // and Protect values. These are documented at
840 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
841 // (for State and Type) and
842 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
843 // (for Protect).
845 // Not all State values have accompanying Type and Protection values.
846 bool doType = false;
847 bool doProtect = false;
849 auto entry = static_cast<const SegmentEntry*>(iter.Get());
851 nsCString path("address-space");
853 switch (entry->mKind.mState) {
854 case MEM_FREE:
855 path.AppendLiteral("/free");
856 break;
858 case MEM_RESERVE:
859 path.AppendLiteral("/reserved");
860 doType = true;
861 break;
863 case MEM_COMMIT:
864 path.AppendLiteral("/commit");
865 doType = true;
866 doProtect = true;
867 break;
869 default:
870 // Should be impossible, but handle it just in case.
871 path.AppendLiteral("/???");
872 break;
875 if (doType) {
876 switch (entry->mKind.mType) {
877 case MEM_IMAGE:
878 path.AppendLiteral("/image");
879 break;
881 case MEM_MAPPED:
882 path.AppendLiteral("/mapped");
883 break;
885 case MEM_PRIVATE:
886 path.AppendLiteral("/private");
887 break;
889 default:
890 // Should be impossible, but handle it just in case.
891 path.AppendLiteral("/???");
892 break;
896 if (doProtect) {
897 DWORD protect = entry->mKind.mProtect;
898 // Basic attributes. Exactly one of these should be set.
899 if (protect & PAGE_EXECUTE) {
900 path.AppendLiteral("/execute");
902 if (protect & PAGE_EXECUTE_READ) {
903 path.AppendLiteral("/execute-read");
905 if (protect & PAGE_EXECUTE_READWRITE) {
906 path.AppendLiteral("/execute-readwrite");
908 if (protect & PAGE_EXECUTE_WRITECOPY) {
909 path.AppendLiteral("/execute-writecopy");
911 if (protect & PAGE_NOACCESS) {
912 path.AppendLiteral("/noaccess");
914 if (protect & PAGE_READONLY) {
915 path.AppendLiteral("/readonly");
917 if (protect & PAGE_READWRITE) {
918 path.AppendLiteral("/readwrite");
920 if (protect & PAGE_WRITECOPY) {
921 path.AppendLiteral("/writecopy");
924 // Modifiers. At most one of these should be set.
925 if (protect & PAGE_GUARD) {
926 path.AppendLiteral("+guard");
928 if (protect & PAGE_NOCACHE) {
929 path.AppendLiteral("+nocache");
931 if (protect & PAGE_WRITECOMBINE) {
932 path.AppendLiteral("+writecombine");
935 // Annotate likely stack segments, too.
936 if (entry->mKind.mIsStack) {
937 path.AppendLiteral("+stack");
941 // Append the segment count.
942 path.AppendPrintf("(segments=%u)", entry->mCount);
944 aHandleReport->Callback(""_ns, path, KIND_OTHER, UNITS_BYTES,
945 entry->mSize, "From MEMORY_BASIC_INFORMATION."_ns,
946 aData);
949 return NS_OK;
952 NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter, nsIMemoryReporter)
954 #endif // XP_<PLATFORM>
956 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
957 class VsizeMaxContiguousReporter final : public nsIMemoryReporter {
958 ~VsizeMaxContiguousReporter() {}
960 public:
961 NS_DECL_ISUPPORTS
963 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
964 nsISupports* aData, bool aAnonymize) override {
965 int64_t amount;
966 if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount))) {
967 MOZ_COLLECT_REPORT(
968 "vsize-max-contiguous", KIND_OTHER, UNITS_BYTES, amount,
969 "Size of the maximum contiguous block of available virtual memory.");
971 return NS_OK;
974 NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter, nsIMemoryReporter)
975 #endif
977 #ifdef HAVE_PRIVATE_REPORTER
978 class PrivateReporter final : public nsIMemoryReporter {
979 ~PrivateReporter() {}
981 public:
982 NS_DECL_ISUPPORTS
984 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
985 nsISupports* aData, bool aAnonymize) override {
986 int64_t amount;
987 if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount))) {
988 // clang-format off
989 MOZ_COLLECT_REPORT(
990 "private", KIND_OTHER, UNITS_BYTES, amount,
991 "Memory that cannot be shared with other processes, including memory that is "
992 "committed and marked MEM_PRIVATE, data that is not mapped, and executable "
993 "pages that have been written to.");
994 // clang-format on
996 return NS_OK;
999 NS_IMPL_ISUPPORTS(PrivateReporter, nsIMemoryReporter)
1000 #endif
1002 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1003 class VsizeReporter final : public nsIMemoryReporter {
1004 ~VsizeReporter() = default;
1006 public:
1007 NS_DECL_ISUPPORTS
1009 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1010 nsISupports* aData, bool aAnonymize) override {
1011 int64_t amount;
1012 if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount))) {
1013 // clang-format off
1014 MOZ_COLLECT_REPORT(
1015 "vsize", KIND_OTHER, UNITS_BYTES, amount,
1016 "Memory mapped by the process, including code and data segments, the heap, "
1017 "thread stacks, memory explicitly mapped by the process via mmap and similar "
1018 "operations, and memory shared with other processes. This is the vsize figure "
1019 "as reported by 'top' and 'ps'. This figure is of limited use on Mac, where "
1020 "processes share huge amounts of memory with one another. But even on other "
1021 "operating systems, 'resident' is a much better measure of the memory "
1022 "resources used by the process.");
1023 // clang-format on
1025 return NS_OK;
1028 NS_IMPL_ISUPPORTS(VsizeReporter, nsIMemoryReporter)
1030 class ResidentReporter final : public nsIMemoryReporter {
1031 ~ResidentReporter() = default;
1033 public:
1034 NS_DECL_ISUPPORTS
1036 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1037 nsISupports* aData, bool aAnonymize) override {
1038 int64_t amount;
1039 if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount))) {
1040 // clang-format off
1041 MOZ_COLLECT_REPORT(
1042 "resident", KIND_OTHER, UNITS_BYTES, amount,
1043 "Memory mapped by the process that is present in physical memory, also known "
1044 "as the resident set size (RSS). This is the best single figure to use when "
1045 "considering the memory resources used by the process, but it depends both on "
1046 "other processes being run and details of the OS kernel and so is best used "
1047 "for comparing the memory usage of a single process at different points in "
1048 "time.");
1049 // clang-format on
1051 return NS_OK;
1054 NS_IMPL_ISUPPORTS(ResidentReporter, nsIMemoryReporter)
1056 #endif // HAVE_VSIZE_AND_RESIDENT_REPORTERS
1058 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1059 class ResidentUniqueReporter final : public nsIMemoryReporter {
1060 ~ResidentUniqueReporter() = default;
1062 public:
1063 NS_DECL_ISUPPORTS
1065 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1066 nsISupports* aData, bool aAnonymize) override {
1067 int64_t amount = 0;
1068 // clang-format off
1069 if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount))) {
1070 MOZ_COLLECT_REPORT(
1071 "resident-unique", KIND_OTHER, UNITS_BYTES, amount,
1072 "Memory mapped by the process that is present in physical memory and not "
1073 "shared with any other processes. This is also known as the process's unique "
1074 "set size (USS). This is the amount of RAM we'd expect to be freed if we "
1075 "closed this process.");
1077 #ifdef XP_MACOSX
1078 if (NS_SUCCEEDED(PhysicalFootprintAmount(&amount))) {
1079 MOZ_COLLECT_REPORT(
1080 "resident-phys-footprint", KIND_OTHER, UNITS_BYTES, amount,
1081 "Memory footprint reported by MacOS's task_info API's phys_footprint field. "
1082 "This matches the memory column in Activity Monitor.");
1084 #endif
1085 // clang-format on
1086 return NS_OK;
1089 NS_IMPL_ISUPPORTS(ResidentUniqueReporter, nsIMemoryReporter)
1091 #endif // HAVE_RESIDENT_UNIQUE_REPORTER
1093 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1095 class SystemHeapReporter final : public nsIMemoryReporter {
1096 ~SystemHeapReporter() = default;
1098 public:
1099 NS_DECL_ISUPPORTS
1101 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1102 nsISupports* aData, bool aAnonymize) override {
1103 int64_t amount;
1104 if (NS_SUCCEEDED(SystemHeapSize(&amount))) {
1105 // clang-format off
1106 MOZ_COLLECT_REPORT(
1107 "system-heap-allocated", KIND_OTHER, UNITS_BYTES, amount,
1108 "Memory used by the system allocator that is currently allocated to the "
1109 "application. This is distinct from the jemalloc heap that Firefox uses for "
1110 "most or all of its heap allocations. Ideally this number is zero, but "
1111 "on some platforms we cannot force every heap allocation through jemalloc.");
1112 // clang-format on
1114 return NS_OK;
1117 NS_IMPL_ISUPPORTS(SystemHeapReporter, nsIMemoryReporter)
1118 #endif // HAVE_SYSTEM_HEAP_REPORTER
1120 #ifdef XP_UNIX
1122 # include <sys/resource.h>
1124 # define HAVE_RESIDENT_PEAK_REPORTER 1
1126 [[nodiscard]] static nsresult ResidentPeakDistinguishedAmount(int64_t* aN) {
1127 struct rusage usage;
1128 if (0 == getrusage(RUSAGE_SELF, &usage)) {
1129 // The units for ru_maxrrs:
1130 // - Mac: bytes
1131 // - Solaris: pages? But some sources it actually always returns 0, so
1132 // check for that
1133 // - Linux, {Net/Open/Free}BSD, DragonFly: KiB
1134 # ifdef XP_MACOSX
1135 *aN = usage.ru_maxrss;
1136 # elif defined(SOLARIS)
1137 *aN = usage.ru_maxrss * getpagesize();
1138 # else
1139 *aN = usage.ru_maxrss * 1024;
1140 # endif
1141 if (*aN > 0) {
1142 return NS_OK;
1145 return NS_ERROR_FAILURE;
1148 class ResidentPeakReporter final : public nsIMemoryReporter {
1149 ~ResidentPeakReporter() = default;
1151 public:
1152 NS_DECL_ISUPPORTS
1154 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1155 nsISupports* aData, bool aAnonymize) override {
1156 int64_t amount = 0;
1157 if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount))) {
1158 MOZ_COLLECT_REPORT(
1159 "resident-peak", KIND_OTHER, UNITS_BYTES, amount,
1160 "The peak 'resident' value for the lifetime of the process.");
1162 return NS_OK;
1165 NS_IMPL_ISUPPORTS(ResidentPeakReporter, nsIMemoryReporter)
1167 # define HAVE_PAGE_FAULT_REPORTERS 1
1169 class PageFaultsSoftReporter final : public nsIMemoryReporter {
1170 ~PageFaultsSoftReporter() = default;
1172 public:
1173 NS_DECL_ISUPPORTS
1175 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1176 nsISupports* aData, bool aAnonymize) override {
1177 struct rusage usage;
1178 int err = getrusage(RUSAGE_SELF, &usage);
1179 if (err == 0) {
1180 int64_t amount = usage.ru_minflt;
1181 // clang-format off
1182 MOZ_COLLECT_REPORT(
1183 "page-faults-soft", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1184 "The number of soft page faults (also known as 'minor page faults') that "
1185 "have occurred since the process started. A soft page fault occurs when the "
1186 "process tries to access a page which is present in physical memory but is "
1187 "not mapped into the process's address space. For instance, a process might "
1188 "observe soft page faults when it loads a shared library which is already "
1189 "present in physical memory. A process may experience many thousands of soft "
1190 "page faults even when the machine has plenty of available physical memory, "
1191 "and because the OS services a soft page fault without accessing the disk, "
1192 "they impact performance much less than hard page faults.");
1193 // clang-format on
1195 return NS_OK;
1198 NS_IMPL_ISUPPORTS(PageFaultsSoftReporter, nsIMemoryReporter)
1200 [[nodiscard]] static nsresult PageFaultsHardDistinguishedAmount(
1201 int64_t* aAmount) {
1202 struct rusage usage;
1203 int err = getrusage(RUSAGE_SELF, &usage);
1204 if (err != 0) {
1205 return NS_ERROR_FAILURE;
1207 *aAmount = usage.ru_majflt;
1208 return NS_OK;
1211 class PageFaultsHardReporter final : public nsIMemoryReporter {
1212 ~PageFaultsHardReporter() = default;
1214 public:
1215 NS_DECL_ISUPPORTS
1217 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1218 nsISupports* aData, bool aAnonymize) override {
1219 int64_t amount = 0;
1220 if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount))) {
1221 // clang-format off
1222 MOZ_COLLECT_REPORT(
1223 "page-faults-hard", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1224 "The number of hard page faults (also known as 'major page faults') that have "
1225 "occurred since the process started. A hard page fault occurs when a process "
1226 "tries to access a page which is not present in physical memory. The "
1227 "operating system must access the disk in order to fulfill a hard page fault. "
1228 "When memory is plentiful, you should see very few hard page faults. But if "
1229 "the process tries to use more memory than your machine has available, you "
1230 "may see many thousands of hard page faults. Because accessing the disk is up "
1231 "to a million times slower than accessing RAM, the program may run very "
1232 "slowly when it is experiencing more than 100 or so hard page faults a "
1233 "second.");
1234 // clang-format on
1236 return NS_OK;
1239 NS_IMPL_ISUPPORTS(PageFaultsHardReporter, nsIMemoryReporter)
1241 #endif // XP_UNIX
1244 ** memory reporter implementation for jemalloc and OSX malloc,
1245 ** to obtain info on total memory in use (that we know about,
1246 ** at least -- on OSX, there are sometimes other zones in use).
1249 #ifdef HAVE_JEMALLOC_STATS
1251 static size_t HeapOverhead(const jemalloc_stats_t& aStats) {
1252 return aStats.waste + aStats.bookkeeping + aStats.pages_dirty +
1253 aStats.bin_unused;
1256 // This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
1257 // 100x for the percentage.
1259 // static
1260 int64_t nsMemoryReporterManager::HeapOverheadFraction(
1261 const jemalloc_stats_t& aStats) {
1262 size_t heapOverhead = HeapOverhead(aStats);
1263 size_t heapCommitted = aStats.allocated + heapOverhead;
1264 return int64_t(10000 * (heapOverhead / (double)heapCommitted));
1267 class JemallocHeapReporter final : public nsIMemoryReporter {
1268 ~JemallocHeapReporter() = default;
1270 public:
1271 NS_DECL_ISUPPORTS
1273 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1274 nsISupports* aData, bool aAnonymize) override {
1275 jemalloc_stats_t stats;
1276 const size_t num_bins = jemalloc_stats_num_bins();
1277 nsTArray<jemalloc_bin_stats_t> bin_stats(num_bins);
1278 bin_stats.SetLength(num_bins);
1279 jemalloc_stats(&stats, bin_stats.Elements());
1281 // clang-format off
1282 MOZ_COLLECT_REPORT(
1283 "heap/committed/allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1284 "Memory mapped by the heap allocator that is currently allocated to the "
1285 "application. This may exceed the amount of memory requested by the "
1286 "application because the allocator regularly rounds up request sizes. (The "
1287 "exact amount requested is not recorded.)");
1289 MOZ_COLLECT_REPORT(
1290 "heap-allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1291 "The same as 'heap/committed/allocated'.");
1293 // We mark this and the other heap/committed/overhead reporters as KIND_NONHEAP
1294 // because KIND_HEAP memory means "counted in heap-allocated", which
1295 // this is not.
1296 for (auto& bin : bin_stats) {
1297 MOZ_ASSERT(bin.size);
1298 nsPrintfCString path("heap/committed/bin-unused/bin-%zu",
1299 bin.size);
1300 aHandleReport->Callback(EmptyCString(), path, KIND_NONHEAP, UNITS_BYTES,
1301 bin.bytes_unused,
1302 nsLiteralCString(
1303 "Unused bytes in all runs of all bins for this size class"),
1304 aData);
1307 if (stats.waste > 0) {
1308 MOZ_COLLECT_REPORT(
1309 "heap/committed/waste", KIND_NONHEAP, UNITS_BYTES,
1310 stats.waste,
1311 "Committed bytes which do not correspond to an active allocation and which the "
1312 "allocator is not intentionally keeping alive (i.e., not "
1313 "'heap/{bookkeeping,unused-pages,bin-unused}').");
1316 MOZ_COLLECT_REPORT(
1317 "heap/committed/bookkeeping", KIND_NONHEAP, UNITS_BYTES,
1318 stats.bookkeeping,
1319 "Committed bytes which the heap allocator uses for internal data structures.");
1321 MOZ_COLLECT_REPORT(
1322 "heap/committed/unused-pages/dirty", KIND_NONHEAP, UNITS_BYTES,
1323 stats.pages_dirty,
1324 "Memory which the allocator could return to the operating system, but hasn't. "
1325 "The allocator keeps this memory around as an optimization, so it doesn't "
1326 "have to ask the OS the next time it needs to fulfill a request. This value "
1327 "is typically not larger than a few megabytes.");
1329 MOZ_COLLECT_REPORT(
1330 "heap/decommitted/unused-pages/fresh", KIND_OTHER, UNITS_BYTES, stats.pages_fresh,
1331 "Amount of memory currently mapped but has never been used.");
1332 // A duplicate entry in the decommitted part of the tree.
1333 MOZ_COLLECT_REPORT(
1334 "decommitted/heap/unused-pages/fresh", KIND_OTHER, UNITS_BYTES, stats.pages_fresh,
1335 "Amount of memory currently mapped but has never been used.");
1337 // On MacOS madvised memory is still counted in the resident set until the OS
1338 // actually decommits it.
1339 #ifdef XP_MACOSX
1340 #define MADVISED_GROUP "committed"
1341 #else
1342 #define MADVISED_GROUP "decommitted"
1343 #endif
1344 MOZ_COLLECT_REPORT(
1345 "heap/" MADVISED_GROUP "/unused-pages/madvised", KIND_OTHER, UNITS_BYTES,
1346 stats.pages_madvised,
1347 "Amount of memory currently mapped, not used and that the OS should remove "
1348 "from the application's resident set.");
1349 // A duplicate entry in the decommitted part of the tree.
1350 MOZ_COLLECT_REPORT(
1351 "decommitted/heap/unused-pages/madvised", KIND_OTHER, UNITS_BYTES, stats.pages_madvised,
1352 "Amount of memory currently mapped, not used and that the OS should remove "
1353 "from the application's resident set.");
1356 size_t decommitted = stats.mapped - stats.allocated - stats.waste - stats.pages_dirty - stats.pages_fresh - stats.bookkeeping - stats.bin_unused;
1357 MOZ_COLLECT_REPORT(
1358 "heap/decommitted/unmapped", KIND_OTHER, UNITS_BYTES, decommitted,
1359 "Amount of memory currently mapped but not committed, "
1360 "neither in physical memory nor paged to disk.");
1361 MOZ_COLLECT_REPORT(
1362 "decommitted/heap/decommitted", KIND_OTHER, UNITS_BYTES, decommitted,
1363 "Amount of memory currently mapped but not committed, "
1364 "neither in physical memory nor paged to disk.");
1366 MOZ_COLLECT_REPORT(
1367 "heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize,
1368 "Size of chunks.");
1370 #ifdef MOZ_PHC
1371 mozilla::phc::MemoryUsage usage;
1372 mozilla::phc::PHCMemoryUsage(usage);
1374 MOZ_COLLECT_REPORT(
1375 "explicit/phc/metadata", KIND_NONHEAP, UNITS_BYTES,
1376 usage.mMetadataBytes,
1377 "Memory used by PHC to store stacks and other metadata for each allocation");
1378 MOZ_COLLECT_REPORT(
1379 "explicit/phc/fragmentation", KIND_NONHEAP, UNITS_BYTES,
1380 usage.mFragmentationBytes,
1381 "The amount of memory lost due to rounding up allocations to the next page "
1382 "size. "
1383 "This is also known as 'internal fragmentation'. "
1384 "Note that all allocators have some internal fragmentation, there may still "
1385 "be some internal fragmentation without PHC.");
1386 #endif
1388 // clang-format on
1390 return NS_OK;
1393 NS_IMPL_ISUPPORTS(JemallocHeapReporter, nsIMemoryReporter)
1395 #endif // HAVE_JEMALLOC_STATS
1397 // Why is this here? At first glance, you'd think it could be defined and
1398 // registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
1399 // However, the obvious time to register it is when the table is initialized,
1400 // and that happens before XPCOM components are initialized, which means the
1401 // RegisterStrongMemoryReporter call fails. So instead we do it here.
1402 class AtomTablesReporter final : public nsIMemoryReporter {
1403 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1405 ~AtomTablesReporter() = default;
1407 public:
1408 NS_DECL_ISUPPORTS
1410 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1411 nsISupports* aData, bool aAnonymize) override {
1412 AtomsSizes sizes;
1413 NS_AddSizeOfAtoms(MallocSizeOf, sizes);
1415 MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP, UNITS_BYTES,
1416 sizes.mTable, "Memory used by the atom table.");
1418 MOZ_COLLECT_REPORT(
1419 "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP, UNITS_BYTES,
1420 sizes.mDynamicAtoms,
1421 "Memory used by dynamic atom objects and chars (which are stored "
1422 "at the end of each atom object).");
1424 return NS_OK;
1427 NS_IMPL_ISUPPORTS(AtomTablesReporter, nsIMemoryReporter)
1429 class ThreadsReporter final : public nsIMemoryReporter {
1430 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1431 ~ThreadsReporter() = default;
1433 public:
1434 NS_DECL_ISUPPORTS
1436 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1437 nsISupports* aData, bool aAnonymize) override {
1438 #ifdef XP_LINUX
1439 nsTArray<MemoryMapping> mappings(1024);
1440 MOZ_TRY(GetMemoryMappings(mappings));
1441 #endif
1443 // Enumerating over active threads requires holding a lock, so we collect
1444 // info on all threads, and then call our reporter callbacks after releasing
1445 // the lock.
1446 struct ThreadData {
1447 nsCString mName;
1448 uint32_t mThreadId;
1449 size_t mPrivateSize;
1451 AutoTArray<ThreadData, 32> threads;
1453 size_t eventQueueSizes = 0;
1454 size_t wrapperSizes = 0;
1455 size_t threadCount = 0;
1458 nsThreadManager& tm = nsThreadManager::get();
1459 OffTheBooksMutexAutoLock lock(tm.ThreadListMutex());
1460 for (auto* thread : tm.ThreadList()) {
1461 threadCount++;
1462 eventQueueSizes += thread->SizeOfEventQueues(MallocSizeOf);
1463 wrapperSizes += thread->ShallowSizeOfIncludingThis(MallocSizeOf);
1465 if (!thread->StackBase()) {
1466 continue;
1469 #if defined(XP_LINUX)
1470 int idx = mappings.BinaryIndexOf(thread->StackBase());
1471 if (idx < 0) {
1472 continue;
1474 // Referenced() is the combined size of all pages in the region which
1475 // have ever been touched, and are therefore consuming memory. For stack
1476 // regions, these pages are guaranteed to be un-shared unless we fork
1477 // after creating threads (which we don't).
1478 size_t privateSize = mappings[idx].Referenced();
1480 // On Linux, we have to be very careful matching memory regions to
1481 // thread stacks.
1483 // To begin with, the kernel only reports VM stats for regions of all
1484 // adjacent pages with the same flags, protection, and backing file.
1485 // There's no way to get finer-grained usage information for a subset of
1486 // those pages.
1488 // Stack segments always have a guard page at the bottom of the stack
1489 // (assuming we only support stacks that grow down), so there's no
1490 // danger of them being merged with other stack regions. At the top,
1491 // there's no protection page, and no way to allocate one without using
1492 // pthreads directly and allocating our own stacks. So we get around the
1493 // problem by adding an extra VM flag (NOHUGEPAGES) to our stack region,
1494 // which we don't expect to be set on any heap regions. But this is not
1495 // fool-proof.
1497 // A second kink is that different C libraries (and different versions
1498 // thereof) report stack base locations and sizes differently with
1499 // regard to the guard page. For the libraries that include the guard
1500 // page in the stack size base pointer, we need to adjust those values
1501 // to compensate. But it's possible that our logic will get out of sync
1502 // with library changes, or someone will compile with an unexpected
1503 // library.
1506 // The upshot of all of this is that there may be configurations that
1507 // our special cases don't cover. And if there are, we want to know
1508 // about it. So assert that total size of the memory region we're
1509 // reporting actually matches the allocated size of the thread stack.
1510 # ifndef ANDROID
1511 MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(),
1512 "Mapping region size doesn't match stack allocation size");
1513 # endif
1514 #elif defined(XP_WIN)
1515 auto memInfo =
1516 MemoryInfo::Get(thread->StackBase(), thread->StackSize());
1517 size_t privateSize = memInfo.Committed();
1518 #else
1519 size_t privateSize = thread->StackSize();
1520 MOZ_ASSERT_UNREACHABLE(
1521 "Shouldn't have stack base pointer on this "
1522 "platform");
1523 #endif
1525 nsCString threadName;
1526 thread->GetThreadName(threadName);
1527 threads.AppendElement(ThreadData{
1528 std::move(threadName),
1529 thread->ThreadId(),
1530 // On Linux, it's possible (but unlikely) that our stack region will
1531 // have been merged with adjacent heap regions, in which case we'll
1532 // get combined size information for both. So we take the minimum of
1533 // the reported private size and the requested stack size to avoid
1534 // the possible of majorly over-reporting in that case.
1535 std::min(privateSize, thread->StackSize()),
1540 for (auto& thread : threads) {
1541 nsPrintfCString path("explicit/threads/stacks/%s (tid=%u)",
1542 thread.mName.get(), thread.mThreadId);
1544 aHandleReport->Callback(
1545 ""_ns, path, KIND_NONHEAP, UNITS_BYTES, thread.mPrivateSize,
1546 nsLiteralCString("The sizes of thread stacks which have been "
1547 "committed to memory."),
1548 aData);
1551 MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP,
1552 UNITS_BYTES, eventQueueSizes,
1553 "The sizes of nsThread event queues and observers.");
1555 MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP,
1556 UNITS_BYTES, wrapperSizes,
1557 "The sizes of nsThread/PRThread wrappers.");
1559 #if defined(XP_WIN)
1560 // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
1561 // that's 12K. For 64 bit, it's 24K.
1563 // See
1564 // https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
1565 constexpr size_t kKernelSize = (sizeof(void*) == 8 ? 24 : 12) * 1024;
1566 #elif defined(XP_LINUX)
1567 // On Linux, kernel stacks are usually 8K. However, on x86, they are
1568 // allocated virtually, and start out at 4K. They may grow to 8K, but we
1569 // have no way of knowing which ones do, so all we can do is guess.
1570 # if defined(__x86_64__) || defined(__i386__)
1571 constexpr size_t kKernelSize = 4 * 1024;
1572 # else
1573 constexpr size_t kKernelSize = 8 * 1024;
1574 # endif
1575 #elif defined(XP_MACOSX)
1576 // On Darwin, kernel stacks are 16K:
1578 // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
1579 constexpr size_t kKernelSize = 16 * 1024;
1580 #else
1581 // Elsewhere, just assume that kernel stacks require at least 8K.
1582 constexpr size_t kKernelSize = 8 * 1024;
1583 #endif
1585 MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP,
1586 UNITS_BYTES, threadCount * kKernelSize,
1587 "The total kernel overhead for all active threads.");
1589 return NS_OK;
1592 NS_IMPL_ISUPPORTS(ThreadsReporter, nsIMemoryReporter)
1594 #ifdef DEBUG
1596 // Ideally, this would be implemented in BlockingResourceBase.cpp.
1597 // However, this ends up breaking the linking step of various unit tests due
1598 // to adding a new dependency to libdmd for a commonly used feature (mutexes)
1599 // in DMD builds. So instead we do it here.
1600 class DeadlockDetectorReporter final : public nsIMemoryReporter {
1601 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1603 ~DeadlockDetectorReporter() = default;
1605 public:
1606 NS_DECL_ISUPPORTS
1608 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1609 nsISupports* aData, bool aAnonymize) override {
1610 MOZ_COLLECT_REPORT(
1611 "explicit/deadlock-detector", KIND_HEAP, UNITS_BYTES,
1612 BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf),
1613 "Memory used by the deadlock detector.");
1615 return NS_OK;
1618 NS_IMPL_ISUPPORTS(DeadlockDetectorReporter, nsIMemoryReporter)
1620 #endif
1622 #ifdef MOZ_DMD
1624 namespace mozilla {
1625 namespace dmd {
1627 class DMDReporter final : public nsIMemoryReporter {
1628 public:
1629 NS_DECL_ISUPPORTS
1631 NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1632 nsISupports* aData, bool aAnonymize) override {
1633 dmd::Sizes sizes;
1634 dmd::SizeOf(&sizes);
1636 MOZ_COLLECT_REPORT(
1637 "explicit/dmd/stack-traces/used", KIND_HEAP, UNITS_BYTES,
1638 sizes.mStackTracesUsed,
1639 "Memory used by stack traces which correspond to at least "
1640 "one heap block DMD is tracking.");
1642 MOZ_COLLECT_REPORT(
1643 "explicit/dmd/stack-traces/unused", KIND_HEAP, UNITS_BYTES,
1644 sizes.mStackTracesUnused,
1645 "Memory used by stack traces which don't correspond to any heap "
1646 "blocks DMD is currently tracking.");
1648 MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP,
1649 UNITS_BYTES, sizes.mStackTraceTable,
1650 "Memory used by DMD's stack trace table.");
1652 MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP, UNITS_BYTES,
1653 sizes.mLiveBlockTable,
1654 "Memory used by DMD's live block table.");
1656 MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP, UNITS_BYTES,
1657 sizes.mDeadBlockTable,
1658 "Memory used by DMD's dead block list.");
1660 return NS_OK;
1663 private:
1664 ~DMDReporter() = default;
1666 NS_IMPL_ISUPPORTS(DMDReporter, nsIMemoryReporter)
1668 } // namespace dmd
1669 } // namespace mozilla
1671 #endif // MOZ_DMD
1673 #ifdef MOZ_WIDGET_ANDROID
1674 class AndroidMemoryReporter final : public nsIMemoryReporter {
1675 public:
1676 NS_DECL_ISUPPORTS
1678 AndroidMemoryReporter() = default;
1680 NS_IMETHOD
1681 CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData,
1682 bool aAnonymize) override {
1683 if (!jni::IsAvailable() || jni::GetAPIVersion() < 23) {
1684 return NS_OK;
1687 int32_t heap = java::GeckoAppShell::GetMemoryUsage("summary.java-heap"_ns);
1688 if (heap > 0) {
1689 MOZ_COLLECT_REPORT("java-heap", KIND_OTHER, UNITS_BYTES, heap * 1024,
1690 "The private Java Heap usage");
1692 return NS_OK;
1695 private:
1696 ~AndroidMemoryReporter() = default;
1699 NS_IMPL_ISUPPORTS(AndroidMemoryReporter, nsIMemoryReporter)
1700 #endif
1703 ** nsMemoryReporterManager implementation
1706 NS_IMPL_ISUPPORTS(nsMemoryReporterManager, nsIMemoryReporterManager,
1707 nsIMemoryReporter)
1709 NS_IMETHODIMP
1710 nsMemoryReporterManager::Init() {
1711 if (!NS_IsMainThread()) {
1712 MOZ_CRASH();
1715 // Under normal circumstances this function is only called once. However,
1716 // we've (infrequently) seen memory report dumps in crash reports that
1717 // suggest that this function is sometimes called multiple times. That in
1718 // turn means that multiple reporters of each kind are registered, which
1719 // leads to duplicated reports of individual measurements such as "resident",
1720 // "vsize", etc.
1722 // It's unclear how these multiple calls can occur. The only plausible theory
1723 // so far is badly-written extensions, because this function is callable from
1724 // JS code via nsIMemoryReporter.idl.
1726 // Whatever the cause, it's a bad thing. So we protect against it with the
1727 // following check.
1728 static bool isInited = false;
1729 if (isInited) {
1730 NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
1731 return NS_OK;
1733 isInited = true;
1736 // Add a series of low-level reporters meant to be executed in order and
1737 // before any other reporters. These reporters are never released until
1738 // the manager dies (at process shutdown). Note that this currently only
1739 // works for reporters expecting to be executed sync.
1741 // Note that we explicitly handle our self-reporting inside
1742 // GetReportsForThisProcessExtended, such that we do not need to register
1743 // ourself to any array/table here.
1745 mozilla::MutexAutoLock autoLock(mMutex);
1747 #ifdef HAVE_JEMALLOC_STATS
1748 mStrongEternalReporters->AppendElement(new JemallocHeapReporter());
1749 #endif
1751 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1752 mStrongEternalReporters->AppendElement(new VsizeReporter());
1753 mStrongEternalReporters->AppendElement(new ResidentReporter());
1754 #endif
1756 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
1757 mStrongEternalReporters->AppendElement(new VsizeMaxContiguousReporter());
1758 #endif
1760 #ifdef HAVE_RESIDENT_PEAK_REPORTER
1761 mStrongEternalReporters->AppendElement(new ResidentPeakReporter());
1762 #endif
1764 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1765 mStrongEternalReporters->AppendElement(new ResidentUniqueReporter());
1766 #endif
1768 #ifdef HAVE_PAGE_FAULT_REPORTERS
1769 mStrongEternalReporters->AppendElement(new PageFaultsSoftReporter());
1770 mStrongEternalReporters->AppendElement(new PageFaultsHardReporter());
1771 #endif
1773 #ifdef HAVE_PRIVATE_REPORTER
1774 mStrongEternalReporters->AppendElement(new PrivateReporter());
1775 #endif
1777 #ifdef HAVE_SYSTEM_HEAP_REPORTER
1778 mStrongEternalReporters->AppendElement(new SystemHeapReporter());
1779 #endif
1781 mStrongEternalReporters->AppendElement(new AtomTablesReporter());
1783 mStrongEternalReporters->AppendElement(new ThreadsReporter());
1785 #ifdef DEBUG
1786 mStrongEternalReporters->AppendElement(new DeadlockDetectorReporter());
1787 #endif
1789 #ifdef MOZ_GECKO_PROFILER
1790 // We have to register this here rather than in profiler_init() because
1791 // profiler_init() runs prior to nsMemoryReporterManager's creation.
1792 mStrongEternalReporters->AppendElement(new GeckoProfilerReporter());
1793 #endif
1795 #ifdef MOZ_DMD
1796 mStrongEternalReporters->AppendElement(new mozilla::dmd::DMDReporter());
1797 #endif
1799 #ifdef XP_WIN
1800 mStrongEternalReporters->AppendElement(new WindowsAddressSpaceReporter());
1801 #endif
1803 #ifdef MOZ_WIDGET_ANDROID
1804 mStrongEternalReporters->AppendElement(new AndroidMemoryReporter());
1805 #endif
1806 } // autoLock(mMutex);
1808 #ifdef XP_UNIX
1809 nsMemoryInfoDumper::Initialize();
1810 #endif
1812 return NS_OK;
1815 nsMemoryReporterManager::nsMemoryReporterManager()
1816 : mMutex("nsMemoryReporterManager::mMutex"),
1817 mIsRegistrationBlocked(false),
1818 mStrongEternalReporters(new StrongReportersArray()),
1819 mStrongReporters(new StrongReportersTable()),
1820 mWeakReporters(new WeakReportersTable()),
1821 mSavedStrongEternalReporters(nullptr),
1822 mSavedStrongReporters(nullptr),
1823 mSavedWeakReporters(nullptr),
1824 mNextGeneration(1),
1825 mPendingProcessesState(nullptr),
1826 mPendingReportersState(nullptr)
1827 #ifdef HAVE_JEMALLOC_STATS
1829 mThreadPool(do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID))
1830 #endif
1834 nsMemoryReporterManager::~nsMemoryReporterManager() {
1835 NS_ASSERTION(!mSavedStrongEternalReporters,
1836 "failed to restore eternal reporters");
1837 NS_ASSERTION(!mSavedStrongReporters, "failed to restore strong reporters");
1838 NS_ASSERTION(!mSavedWeakReporters, "failed to restore weak reporters");
1841 NS_IMETHODIMP
1842 nsMemoryReporterManager::CollectReports(nsIHandleReportCallback* aHandleReport,
1843 nsISupports* aData, bool aAnonymize) {
1844 size_t n = MallocSizeOf(this);
1846 mozilla::MutexAutoLock autoLock(mMutex);
1847 n += mStrongEternalReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1848 n += mStrongReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1849 n += mWeakReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1850 // Note that we do not include the mSaved<X>Reporters here, as during
1851 // normal operations they are always nullptr and during testing we want to
1852 // hide the saved variants entirely.
1855 MOZ_COLLECT_REPORT("explicit/memory-reporter-manager", KIND_HEAP, UNITS_BYTES,
1856 n, "Memory used by the memory reporter infrastructure.");
1858 return NS_OK;
1861 #ifdef DEBUG_CHILD_PROCESS_MEMORY_REPORTING
1862 # define MEMORY_REPORTING_LOG(format, ...) \
1863 printf_stderr("++++ MEMORY REPORTING: " format, ##__VA_ARGS__);
1864 #else
1865 # define MEMORY_REPORTING_LOG(...)
1866 #endif
1868 NS_IMETHODIMP
1869 nsMemoryReporterManager::GetReports(
1870 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1871 nsIFinishReportingCallback* aFinishReporting,
1872 nsISupports* aFinishReportingData, bool aAnonymize) {
1873 return GetReportsExtended(aHandleReport, aHandleReportData, aFinishReporting,
1874 aFinishReportingData, aAnonymize,
1875 /* minimize = */ false,
1876 /* DMDident = */ u""_ns);
1879 NS_IMETHODIMP
1880 nsMemoryReporterManager::GetReportsExtended(
1881 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1882 nsIFinishReportingCallback* aFinishReporting,
1883 nsISupports* aFinishReportingData, bool aAnonymize, bool aMinimize,
1884 const nsAString& aDMDDumpIdent) {
1885 nsresult rv;
1887 // Memory reporters are not necessarily threadsafe, so this function must
1888 // be called from the main thread.
1889 if (!NS_IsMainThread()) {
1890 MOZ_CRASH();
1893 uint32_t generation = mNextGeneration++;
1895 if (mPendingProcessesState) {
1896 // A request is in flight. Don't start another one. And don't report
1897 // an error; just ignore it, and let the in-flight request finish.
1898 MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n", generation,
1899 mPendingProcessesState->mGeneration);
1900 return NS_OK;
1903 MEMORY_REPORTING_LOG("GetReports (gen=%u)\n", generation);
1905 uint32_t concurrency = Preferences::GetUint("memory.report_concurrency", 1);
1906 MOZ_ASSERT(concurrency >= 1);
1907 if (concurrency < 1) {
1908 concurrency = 1;
1910 mPendingProcessesState = new PendingProcessesState(
1911 generation, aAnonymize, aMinimize, concurrency, aHandleReport,
1912 aHandleReportData, aFinishReporting, aFinishReportingData, aDMDDumpIdent);
1914 if (aMinimize) {
1915 nsCOMPtr<nsIRunnable> callback =
1916 NewRunnableMethod("nsMemoryReporterManager::StartGettingReports", this,
1917 &nsMemoryReporterManager::StartGettingReports);
1918 rv = MinimizeMemoryUsage(callback);
1919 } else {
1920 rv = StartGettingReports();
1922 return rv;
1925 // MainThread only
1926 nsresult nsMemoryReporterManager::StartGettingReports() {
1927 PendingProcessesState* s = mPendingProcessesState;
1928 nsresult rv;
1930 // Get reports for this process.
1931 FILE* parentDMDFile = nullptr;
1932 #ifdef MOZ_DMD
1933 if (!s->mDMDDumpIdent.IsEmpty()) {
1934 rv = nsMemoryInfoDumper::OpenDMDFile(s->mDMDDumpIdent, getpid(),
1935 &parentDMDFile);
1936 if (NS_WARN_IF(NS_FAILED(rv))) {
1937 // Proceed with the memory report as if DMD were disabled.
1938 parentDMDFile = nullptr;
1941 #endif
1943 // This is async.
1944 GetReportsForThisProcessExtended(
1945 s->mHandleReport, s->mHandleReportData, s->mAnonymize, parentDMDFile,
1946 s->mFinishReporting, s->mFinishReportingData);
1948 nsTArray<dom::ContentParent*> childWeakRefs;
1949 dom::ContentParent::GetAll(childWeakRefs);
1950 if (!childWeakRefs.IsEmpty()) {
1951 // Request memory reports from child processes. This happens
1952 // after the parent report so that the parent's main thread will
1953 // be free to process the child reports, instead of causing them
1954 // to be buffered and consume (possibly scarce) memory.
1956 for (size_t i = 0; i < childWeakRefs.Length(); ++i) {
1957 s->mChildrenPending.AppendElement(childWeakRefs[i]);
1961 if (gfx::GPUProcessManager* gpu = gfx::GPUProcessManager::Get()) {
1962 if (RefPtr<MemoryReportingProcess> proc = gpu->GetProcessMemoryReporter()) {
1963 s->mChildrenPending.AppendElement(proc.forget());
1967 if (RDDProcessManager* rdd = RDDProcessManager::Get()) {
1968 if (RefPtr<MemoryReportingProcess> proc = rdd->GetProcessMemoryReporter()) {
1969 s->mChildrenPending.AppendElement(proc.forget());
1973 if (gfx::VRProcessManager* vr = gfx::VRProcessManager::Get()) {
1974 if (RefPtr<MemoryReportingProcess> proc = vr->GetProcessMemoryReporter()) {
1975 s->mChildrenPending.AppendElement(proc.forget());
1979 if (!IsRegistrationBlocked() && net::gIOService) {
1980 if (RefPtr<MemoryReportingProcess> proc =
1981 net::gIOService->GetSocketProcessMemoryReporter()) {
1982 s->mChildrenPending.AppendElement(proc.forget());
1986 if (!IsRegistrationBlocked()) {
1987 if (RefPtr<UtilityProcessManager> utility =
1988 UtilityProcessManager::GetIfExists()) {
1989 for (RefPtr<UtilityProcessParent>& parent :
1990 utility->GetAllProcessesProcessParent()) {
1991 if (RefPtr<MemoryReportingProcess> proc =
1992 utility->GetProcessMemoryReporter(parent)) {
1993 s->mChildrenPending.AppendElement(proc.forget());
1999 if (!s->mChildrenPending.IsEmpty()) {
2000 nsCOMPtr<nsITimer> timer;
2001 rv = NS_NewTimerWithFuncCallback(
2002 getter_AddRefs(timer), TimeoutCallback, this, kTimeoutLengthMS,
2003 nsITimer::TYPE_ONE_SHOT,
2004 "nsMemoryReporterManager::StartGettingReports");
2005 if (NS_WARN_IF(NS_FAILED(rv))) {
2006 FinishReporting();
2007 return rv;
2010 MOZ_ASSERT(!s->mTimer);
2011 s->mTimer.swap(timer);
2014 return NS_OK;
2017 void nsMemoryReporterManager::DispatchReporter(
2018 nsIMemoryReporter* aReporter, bool aIsAsync,
2019 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
2020 bool aAnonymize) {
2021 MOZ_ASSERT(mPendingReportersState);
2023 // Grab refs to everything used in the lambda function.
2024 RefPtr<nsMemoryReporterManager> self = this;
2025 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2026 nsCOMPtr<nsIHandleReportCallback> handleReport = aHandleReport;
2027 nsCOMPtr<nsISupports> handleReportData = aHandleReportData;
2029 nsCOMPtr<nsIRunnable> event = NS_NewRunnableFunction(
2030 "nsMemoryReporterManager::DispatchReporter",
2031 [self, reporter, aIsAsync, handleReport, handleReportData, aAnonymize]() {
2032 reporter->CollectReports(handleReport, handleReportData, aAnonymize);
2033 if (!aIsAsync) {
2034 self->EndReport();
2038 NS_DispatchToMainThread(event);
2039 mPendingReportersState->mReportsPending++;
2042 NS_IMETHODIMP
2043 nsMemoryReporterManager::GetReportsForThisProcessExtended(
2044 nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
2045 bool aAnonymize, FILE* aDMDFile,
2046 nsIFinishReportingCallback* aFinishReporting,
2047 nsISupports* aFinishReportingData) {
2048 // Memory reporters are not necessarily threadsafe, so this function must
2049 // be called from the main thread.
2050 if (!NS_IsMainThread()) {
2051 MOZ_CRASH();
2054 if (NS_WARN_IF(mPendingReportersState)) {
2055 // Report is already in progress.
2056 return NS_ERROR_IN_PROGRESS;
2059 #ifdef MOZ_DMD
2060 if (aDMDFile) {
2061 // Clear DMD's reportedness state before running the memory
2062 // reporters, to avoid spurious twice-reported warnings.
2063 dmd::ClearReports();
2065 #else
2066 MOZ_ASSERT(!aDMDFile);
2067 #endif
2069 mPendingReportersState = new PendingReportersState(
2070 aFinishReporting, aFinishReportingData, aDMDFile);
2073 mozilla::MutexAutoLock autoLock(mMutex);
2075 // We process our own, most sensible reporters before anyone else to avoid
2076 // them measuring changes caused by other reporters' dynamic structures.
2077 // Note that all eternal reporters need to be sync, too.
2078 for (const auto& entry : *mStrongEternalReporters) {
2079 DispatchReporter(entry, false, aHandleReport, aHandleReportData,
2080 aAnonymize);
2082 // Process our self-reporting (not in any array/table). Note that when
2083 // we test, we expect to execute only reporters in the swapped-in tables.
2084 if (!mIsRegistrationBlocked) {
2085 DispatchReporter(this, false, aHandleReport, aHandleReportData,
2086 aAnonymize);
2089 // Now process additional reporters. Note that these are executed in an
2090 // unforeseeable order (due to the hashtables being keyed on pointers).
2091 for (const auto& entry : *mStrongReporters) {
2092 DispatchReporter(entry.GetKey(), entry.GetData(), aHandleReport,
2093 aHandleReportData, aAnonymize);
2095 for (const auto& entry : *mWeakReporters) {
2096 nsCOMPtr<nsIMemoryReporter> reporter = entry.GetKey();
2097 DispatchReporter(reporter, entry.GetData(), aHandleReport,
2098 aHandleReportData, aAnonymize);
2102 return NS_OK;
2105 // MainThread only
2106 NS_IMETHODIMP
2107 nsMemoryReporterManager::EndReport() {
2108 if (--mPendingReportersState->mReportsPending == 0) {
2109 #ifdef MOZ_DMD
2110 if (mPendingReportersState->mDMDFile) {
2111 nsMemoryInfoDumper::DumpDMDToFile(mPendingReportersState->mDMDFile);
2113 #endif
2114 if (mPendingProcessesState) {
2115 // This is the parent process.
2116 EndProcessReport(mPendingProcessesState->mGeneration, true);
2117 } else {
2118 mPendingReportersState->mFinishReporting->Callback(
2119 mPendingReportersState->mFinishReportingData);
2122 delete mPendingReportersState;
2123 mPendingReportersState = nullptr;
2126 return NS_OK;
2129 nsMemoryReporterManager::PendingProcessesState*
2130 nsMemoryReporterManager::GetStateForGeneration(uint32_t aGeneration) {
2131 // Memory reporting only happens on the main thread.
2132 MOZ_RELEASE_ASSERT(NS_IsMainThread());
2134 PendingProcessesState* s = mPendingProcessesState;
2136 if (!s) {
2137 // If we reach here, then:
2139 // - A child process reported back too late, and no subsequent request
2140 // is in flight.
2142 // So there's nothing to be done. Just ignore it.
2143 MEMORY_REPORTING_LOG("HandleChildReports: no request in flight (aGen=%u)\n",
2144 aGeneration);
2145 return nullptr;
2148 if (aGeneration != s->mGeneration) {
2149 // If we reach here, a child process must have reported back, too late,
2150 // while a subsequent (higher-numbered) request is in flight. Again,
2151 // ignore it.
2152 MOZ_ASSERT(aGeneration < s->mGeneration);
2153 MEMORY_REPORTING_LOG(
2154 "HandleChildReports: gen mismatch (aGen=%u, s->gen=%u)\n", aGeneration,
2155 s->mGeneration);
2156 return nullptr;
2159 return s;
2162 // This function has no return value. If something goes wrong, there's no
2163 // clear place to report the problem to, but that's ok -- we will end up
2164 // hitting the timeout and executing TimeoutCallback().
2165 void nsMemoryReporterManager::HandleChildReport(
2166 uint32_t aGeneration, const dom::MemoryReport& aChildReport) {
2167 PendingProcessesState* s = GetStateForGeneration(aGeneration);
2168 if (!s) {
2169 return;
2172 // Child reports should have a non-empty process.
2173 MOZ_ASSERT(!aChildReport.process().IsEmpty());
2175 // If the call fails, ignore and continue.
2176 s->mHandleReport->Callback(aChildReport.process(), aChildReport.path(),
2177 aChildReport.kind(), aChildReport.units(),
2178 aChildReport.amount(), aChildReport.desc(),
2179 s->mHandleReportData);
2182 /* static */
2183 bool nsMemoryReporterManager::StartChildReport(
2184 mozilla::MemoryReportingProcess* aChild,
2185 const PendingProcessesState* aState) {
2186 if (!aChild->IsAlive()) {
2187 MEMORY_REPORTING_LOG(
2188 "StartChildReports (gen=%u): child exited before"
2189 " its report was started\n",
2190 aState->mGeneration);
2191 return false;
2194 Maybe<mozilla::ipc::FileDescriptor> dmdFileDesc;
2195 #ifdef MOZ_DMD
2196 if (!aState->mDMDDumpIdent.IsEmpty()) {
2197 FILE* dmdFile = nullptr;
2198 nsresult rv = nsMemoryInfoDumper::OpenDMDFile(aState->mDMDDumpIdent,
2199 aChild->Pid(), &dmdFile);
2200 if (NS_WARN_IF(NS_FAILED(rv))) {
2201 // Proceed with the memory report as if DMD were disabled.
2202 dmdFile = nullptr;
2204 if (dmdFile) {
2205 dmdFileDesc = Some(mozilla::ipc::FILEToFileDescriptor(dmdFile));
2206 fclose(dmdFile);
2209 #endif
2210 return aChild->SendRequestMemoryReport(
2211 aState->mGeneration, aState->mAnonymize, aState->mMinimize, dmdFileDesc);
2214 void nsMemoryReporterManager::EndProcessReport(uint32_t aGeneration,
2215 bool aSuccess) {
2216 PendingProcessesState* s = GetStateForGeneration(aGeneration);
2217 if (!s) {
2218 return;
2221 MOZ_ASSERT(s->mNumProcessesRunning > 0);
2222 s->mNumProcessesRunning--;
2223 s->mNumProcessesCompleted++;
2224 MEMORY_REPORTING_LOG(
2225 "HandleChildReports (aGen=%u): process %u %s"
2226 " (%u running, %u pending)\n",
2227 aGeneration, s->mNumProcessesCompleted,
2228 aSuccess ? "completed" : "exited during report", s->mNumProcessesRunning,
2229 static_cast<unsigned>(s->mChildrenPending.Length()));
2231 // Start pending children up to the concurrency limit.
2232 while (s->mNumProcessesRunning < s->mConcurrencyLimit &&
2233 !s->mChildrenPending.IsEmpty()) {
2234 // Pop last element from s->mChildrenPending
2235 const RefPtr<MemoryReportingProcess> nextChild =
2236 s->mChildrenPending.PopLastElement();
2237 // Start report (if the child is still alive).
2238 if (StartChildReport(nextChild, s)) {
2239 ++s->mNumProcessesRunning;
2240 MEMORY_REPORTING_LOG(
2241 "HandleChildReports (aGen=%u): started child report"
2242 " (%u running, %u pending)\n",
2243 aGeneration, s->mNumProcessesRunning,
2244 static_cast<unsigned>(s->mChildrenPending.Length()));
2248 // If all the child processes (if any) have reported, we can cancel
2249 // the timer (if started) and finish up. Otherwise, just return.
2250 if (s->mNumProcessesRunning == 0) {
2251 MOZ_ASSERT(s->mChildrenPending.IsEmpty());
2252 if (s->mTimer) {
2253 s->mTimer->Cancel();
2255 FinishReporting();
2259 /* static */
2260 void nsMemoryReporterManager::TimeoutCallback(nsITimer* aTimer, void* aData) {
2261 nsMemoryReporterManager* mgr = static_cast<nsMemoryReporterManager*>(aData);
2262 PendingProcessesState* s = mgr->mPendingProcessesState;
2264 // Release assert because: if the pointer is null we're about to
2265 // crash regardless of DEBUG, and this way the compiler doesn't
2266 // complain about unused variables.
2267 MOZ_RELEASE_ASSERT(s, "mgr->mPendingProcessesState");
2268 MEMORY_REPORTING_LOG("TimeoutCallback (s->gen=%u; %u running, %u pending)\n",
2269 s->mGeneration, s->mNumProcessesRunning,
2270 static_cast<unsigned>(s->mChildrenPending.Length()));
2272 // We don't bother sending any kind of cancellation message to the child
2273 // processes that haven't reported back.
2274 mgr->FinishReporting();
2277 nsresult nsMemoryReporterManager::FinishReporting() {
2278 // Memory reporting only happens on the main thread.
2279 if (!NS_IsMainThread()) {
2280 MOZ_CRASH();
2283 MOZ_ASSERT(mPendingProcessesState);
2284 MEMORY_REPORTING_LOG("FinishReporting (s->gen=%u; %u processes reported)\n",
2285 mPendingProcessesState->mGeneration,
2286 mPendingProcessesState->mNumProcessesCompleted);
2288 // Call this before deleting |mPendingProcessesState|. That way, if
2289 // |mFinishReportData| calls GetReports(), it will silently abort, as
2290 // required.
2291 nsresult rv = mPendingProcessesState->mFinishReporting->Callback(
2292 mPendingProcessesState->mFinishReportingData);
2294 delete mPendingProcessesState;
2295 mPendingProcessesState = nullptr;
2296 return rv;
2299 nsMemoryReporterManager::PendingProcessesState::PendingProcessesState(
2300 uint32_t aGeneration, bool aAnonymize, bool aMinimize,
2301 uint32_t aConcurrencyLimit, nsIHandleReportCallback* aHandleReport,
2302 nsISupports* aHandleReportData,
2303 nsIFinishReportingCallback* aFinishReporting,
2304 nsISupports* aFinishReportingData, const nsAString& aDMDDumpIdent)
2305 : mGeneration(aGeneration),
2306 mAnonymize(aAnonymize),
2307 mMinimize(aMinimize),
2308 mNumProcessesRunning(1), // reporting starts with the parent
2309 mNumProcessesCompleted(0),
2310 mConcurrencyLimit(aConcurrencyLimit),
2311 mHandleReport(aHandleReport),
2312 mHandleReportData(aHandleReportData),
2313 mFinishReporting(aFinishReporting),
2314 mFinishReportingData(aFinishReportingData),
2315 mDMDDumpIdent(aDMDDumpIdent) {}
2317 static void CrashIfRefcountIsZero(nsISupports* aObj) {
2318 // This will probably crash if the object's refcount is 0.
2319 uint32_t refcnt = NS_ADDREF(aObj);
2320 if (refcnt <= 1) {
2321 MOZ_CRASH("CrashIfRefcountIsZero: refcount is zero");
2323 NS_RELEASE(aObj);
2326 nsresult nsMemoryReporterManager::RegisterReporterHelper(
2327 nsIMemoryReporter* aReporter, bool aForce, bool aStrong, bool aIsAsync) {
2328 // This method is thread-safe.
2329 mozilla::MutexAutoLock autoLock(mMutex);
2331 if (mIsRegistrationBlocked && !aForce) {
2332 return NS_ERROR_FAILURE;
2335 if (mStrongReporters->Contains(aReporter) ||
2336 mWeakReporters->Contains(aReporter)) {
2337 return NS_ERROR_FAILURE;
2340 // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
2341 // a kung fu death grip before calling PutEntry. Otherwise, if PutEntry
2342 // addref'ed and released |aReporter| before finally addref'ing it for
2343 // good, it would free aReporter! The kung fu death grip could itself be
2344 // problematic if PutEntry didn't addref |aReporter| (because then when the
2345 // death grip goes out of scope, we would delete the reporter). In debug
2346 // mode, we check that this doesn't happen.
2348 // If |aStrong| is false, we require that |aReporter| have a non-zero
2349 // refcnt.
2351 if (aStrong) {
2352 nsCOMPtr<nsIMemoryReporter> kungFuDeathGrip = aReporter;
2353 mStrongReporters->InsertOrUpdate(aReporter, aIsAsync);
2354 CrashIfRefcountIsZero(aReporter);
2355 } else {
2356 CrashIfRefcountIsZero(aReporter);
2357 nsCOMPtr<nsIXPConnectWrappedJS> jsComponent = do_QueryInterface(aReporter);
2358 if (jsComponent) {
2359 // We cannot allow non-native reporters (WrappedJS), since we'll be
2360 // holding onto a raw pointer, which would point to the wrapper,
2361 // and that wrapper is likely to go away as soon as this register
2362 // call finishes. This would then lead to subsequent crashes in
2363 // CollectReports().
2364 return NS_ERROR_XPC_BAD_CONVERT_JS;
2366 mWeakReporters->InsertOrUpdate(aReporter, aIsAsync);
2369 return NS_OK;
2372 NS_IMETHODIMP
2373 nsMemoryReporterManager::RegisterStrongReporter(nsIMemoryReporter* aReporter) {
2374 return RegisterReporterHelper(aReporter, /* force = */ false,
2375 /* strong = */ true,
2376 /* async = */ false);
2379 NS_IMETHODIMP
2380 nsMemoryReporterManager::RegisterStrongAsyncReporter(
2381 nsIMemoryReporter* aReporter) {
2382 return RegisterReporterHelper(aReporter, /* force = */ false,
2383 /* strong = */ true,
2384 /* async = */ true);
2387 NS_IMETHODIMP
2388 nsMemoryReporterManager::RegisterWeakReporter(nsIMemoryReporter* aReporter) {
2389 return RegisterReporterHelper(aReporter, /* force = */ false,
2390 /* strong = */ false,
2391 /* async = */ false);
2394 NS_IMETHODIMP
2395 nsMemoryReporterManager::RegisterWeakAsyncReporter(
2396 nsIMemoryReporter* aReporter) {
2397 return RegisterReporterHelper(aReporter, /* force = */ false,
2398 /* strong = */ false,
2399 /* async = */ true);
2402 NS_IMETHODIMP
2403 nsMemoryReporterManager::RegisterStrongReporterEvenIfBlocked(
2404 nsIMemoryReporter* aReporter) {
2405 return RegisterReporterHelper(aReporter, /* force = */ true,
2406 /* strong = */ true,
2407 /* async = */ false);
2410 NS_IMETHODIMP
2411 nsMemoryReporterManager::UnregisterStrongReporter(
2412 nsIMemoryReporter* aReporter) {
2413 // This method is thread-safe.
2414 mozilla::MutexAutoLock autoLock(mMutex);
2416 MOZ_ASSERT(!mWeakReporters->Contains(aReporter));
2418 if (mStrongReporters->Contains(aReporter)) {
2419 mStrongReporters->Remove(aReporter);
2420 return NS_OK;
2423 // We don't register new reporters when the block is in place, but we do
2424 // unregister existing reporters. This is so we don't keep holding strong
2425 // references that these reporters aren't expecting (which can keep them
2426 // alive longer than intended).
2427 if (mSavedStrongReporters && mSavedStrongReporters->Contains(aReporter)) {
2428 mSavedStrongReporters->Remove(aReporter);
2429 return NS_OK;
2432 return NS_ERROR_FAILURE;
2435 NS_IMETHODIMP
2436 nsMemoryReporterManager::UnregisterWeakReporter(nsIMemoryReporter* aReporter) {
2437 // This method is thread-safe.
2438 mozilla::MutexAutoLock autoLock(mMutex);
2440 MOZ_ASSERT(!mStrongReporters->Contains(aReporter));
2442 if (mWeakReporters->Contains(aReporter)) {
2443 mWeakReporters->Remove(aReporter);
2444 return NS_OK;
2447 // We don't register new reporters when the block is in place, but we do
2448 // unregister existing reporters. This is so we don't keep holding weak
2449 // references that the old reporters aren't expecting (which can end up as
2450 // dangling pointers that lead to use-after-frees).
2451 if (mSavedWeakReporters && mSavedWeakReporters->Contains(aReporter)) {
2452 mSavedWeakReporters->Remove(aReporter);
2453 return NS_OK;
2456 return NS_ERROR_FAILURE;
2459 NS_IMETHODIMP
2460 nsMemoryReporterManager::BlockRegistrationAndHideExistingReporters() {
2461 // This method is thread-safe.
2462 mozilla::MutexAutoLock autoLock(mMutex);
2463 if (mIsRegistrationBlocked) {
2464 return NS_ERROR_FAILURE;
2466 mIsRegistrationBlocked = true;
2468 // Hide the existing reporters, saving them for later restoration.
2469 MOZ_ASSERT(!mSavedStrongEternalReporters);
2470 MOZ_ASSERT(!mSavedStrongReporters);
2471 MOZ_ASSERT(!mSavedWeakReporters);
2472 mSavedStrongEternalReporters.swap(mStrongEternalReporters);
2473 mSavedStrongReporters.swap(mStrongReporters);
2474 mSavedWeakReporters.swap(mWeakReporters);
2475 mStrongEternalReporters.reset(new StrongReportersArray());
2476 mStrongReporters.reset(new StrongReportersTable());
2477 mWeakReporters.reset(new WeakReportersTable());
2479 return NS_OK;
2482 NS_IMETHODIMP
2483 nsMemoryReporterManager::UnblockRegistrationAndRestoreOriginalReporters() {
2484 // This method is thread-safe.
2485 mozilla::MutexAutoLock autoLock(mMutex);
2486 if (!mIsRegistrationBlocked) {
2487 return NS_ERROR_FAILURE;
2490 // Banish the current reporters, and restore the hidden ones.
2491 mStrongEternalReporters = std::move(mSavedStrongEternalReporters);
2492 mStrongReporters = std::move(mSavedStrongReporters);
2493 mWeakReporters = std::move(mSavedWeakReporters);
2495 mIsRegistrationBlocked = false;
2496 return NS_OK;
2499 NS_IMETHODIMP
2500 nsMemoryReporterManager::GetVsize(int64_t* aVsize) {
2501 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2502 return VsizeDistinguishedAmount(aVsize);
2503 #else
2504 *aVsize = 0;
2505 return NS_ERROR_NOT_AVAILABLE;
2506 #endif
2509 NS_IMETHODIMP
2510 nsMemoryReporterManager::GetVsizeMaxContiguous(int64_t* aAmount) {
2511 #ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
2512 return VsizeMaxContiguousDistinguishedAmount(aAmount);
2513 #else
2514 *aAmount = 0;
2515 return NS_ERROR_NOT_AVAILABLE;
2516 #endif
2519 NS_IMETHODIMP
2520 nsMemoryReporterManager::GetResident(int64_t* aAmount) {
2521 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2522 return ResidentDistinguishedAmount(aAmount);
2523 #else
2524 *aAmount = 0;
2525 return NS_ERROR_NOT_AVAILABLE;
2526 #endif
2529 NS_IMETHODIMP
2530 nsMemoryReporterManager::GetResidentFast(int64_t* aAmount) {
2531 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2532 return ResidentFastDistinguishedAmount(aAmount);
2533 #else
2534 *aAmount = 0;
2535 return NS_ERROR_NOT_AVAILABLE;
2536 #endif
2539 /*static*/
2540 int64_t nsMemoryReporterManager::ResidentFast() {
2541 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2542 int64_t amount;
2543 nsresult rv = ResidentFastDistinguishedAmount(&amount);
2544 NS_ENSURE_SUCCESS(rv, 0);
2545 return amount;
2546 #else
2547 return 0;
2548 #endif
2551 NS_IMETHODIMP
2552 nsMemoryReporterManager::GetResidentPeak(int64_t* aAmount) {
2553 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2554 return ResidentPeakDistinguishedAmount(aAmount);
2555 #else
2556 *aAmount = 0;
2557 return NS_ERROR_NOT_AVAILABLE;
2558 #endif
2561 /*static*/
2562 int64_t nsMemoryReporterManager::ResidentPeak() {
2563 #ifdef HAVE_RESIDENT_PEAK_REPORTER
2564 int64_t amount = 0;
2565 nsresult rv = ResidentPeakDistinguishedAmount(&amount);
2566 NS_ENSURE_SUCCESS(rv, 0);
2567 return amount;
2568 #else
2569 return 0;
2570 #endif
2573 NS_IMETHODIMP
2574 nsMemoryReporterManager::GetResidentUnique(int64_t* aAmount) {
2575 #ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2576 return ResidentUniqueDistinguishedAmount(aAmount);
2577 #else
2578 *aAmount = 0;
2579 return NS_ERROR_NOT_AVAILABLE;
2580 #endif
2583 #ifdef XP_MACOSX
2584 /*static*/
2585 int64_t nsMemoryReporterManager::PhysicalFootprint(mach_port_t aPort) {
2586 int64_t amount = 0;
2587 nsresult rv = PhysicalFootprintAmount(&amount, aPort);
2588 NS_ENSURE_SUCCESS(rv, 0);
2589 return amount;
2591 #endif
2593 typedef
2594 #ifdef XP_WIN
2595 HANDLE
2596 #elif XP_MACOSX
2597 mach_port_t
2598 #elif XP_LINUX
2599 pid_t
2600 #else
2601 int /*dummy type */
2602 #endif
2603 ResidentUniqueArg;
2605 #if defined(XP_WIN) || defined(XP_MACOSX) || defined(XP_LINUX)
2607 /*static*/
2608 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg aProcess) {
2609 int64_t amount = 0;
2610 nsresult rv = ResidentUniqueDistinguishedAmount(&amount, aProcess);
2611 NS_ENSURE_SUCCESS(rv, 0);
2612 return amount;
2615 #else
2617 /*static*/
2618 int64_t nsMemoryReporterManager::ResidentUnique(ResidentUniqueArg) {
2619 # ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2620 int64_t amount = 0;
2621 nsresult rv = ResidentUniqueDistinguishedAmount(&amount);
2622 NS_ENSURE_SUCCESS(rv, 0);
2623 return amount;
2624 # else
2625 return 0;
2626 # endif
2629 #endif // XP_{WIN, MACOSX, LINUX, *}
2631 #ifdef HAVE_JEMALLOC_STATS
2632 // static
2633 size_t nsMemoryReporterManager::HeapAllocated(const jemalloc_stats_t& aStats) {
2634 return aStats.allocated;
2636 #endif
2638 NS_IMETHODIMP
2639 nsMemoryReporterManager::GetHeapAllocated(int64_t* aAmount) {
2640 #ifdef HAVE_JEMALLOC_STATS
2641 jemalloc_stats_t stats;
2642 jemalloc_stats(&stats);
2643 *aAmount = HeapAllocated(stats);
2644 return NS_OK;
2645 #else
2646 *aAmount = 0;
2647 return NS_ERROR_NOT_AVAILABLE;
2648 #endif
2651 // This has UNITS_PERCENTAGE, so it is multiplied by 100x.
2652 NS_IMETHODIMP
2653 nsMemoryReporterManager::GetHeapOverheadFraction(int64_t* aAmount) {
2654 #ifdef HAVE_JEMALLOC_STATS
2655 jemalloc_stats_t stats;
2656 jemalloc_stats(&stats);
2657 *aAmount = HeapOverheadFraction(stats);
2658 return NS_OK;
2659 #else
2660 *aAmount = 0;
2661 return NS_ERROR_NOT_AVAILABLE;
2662 #endif
2665 [[nodiscard]] static nsresult GetInfallibleAmount(InfallibleAmountFn aAmountFn,
2666 int64_t* aAmount) {
2667 if (aAmountFn) {
2668 *aAmount = aAmountFn();
2669 return NS_OK;
2671 *aAmount = 0;
2672 return NS_ERROR_NOT_AVAILABLE;
2675 NS_IMETHODIMP
2676 nsMemoryReporterManager::GetJSMainRuntimeGCHeap(int64_t* aAmount) {
2677 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeGCHeap, aAmount);
2680 NS_IMETHODIMP
2681 nsMemoryReporterManager::GetJSMainRuntimeTemporaryPeak(int64_t* aAmount) {
2682 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeTemporaryPeak, aAmount);
2685 NS_IMETHODIMP
2686 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsSystem(int64_t* aAmount) {
2687 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsSystem,
2688 aAmount);
2691 NS_IMETHODIMP
2692 nsMemoryReporterManager::GetJSMainRuntimeCompartmentsUser(int64_t* aAmount) {
2693 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeCompartmentsUser,
2694 aAmount);
2697 NS_IMETHODIMP
2698 nsMemoryReporterManager::GetJSMainRuntimeRealmsSystem(int64_t* aAmount) {
2699 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsSystem, aAmount);
2702 NS_IMETHODIMP
2703 nsMemoryReporterManager::GetJSMainRuntimeRealmsUser(int64_t* aAmount) {
2704 return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsUser, aAmount);
2707 NS_IMETHODIMP
2708 nsMemoryReporterManager::GetImagesContentUsedUncompressed(int64_t* aAmount) {
2709 return GetInfallibleAmount(mAmountFns.mImagesContentUsedUncompressed,
2710 aAmount);
2713 NS_IMETHODIMP
2714 nsMemoryReporterManager::GetStorageSQLite(int64_t* aAmount) {
2715 return GetInfallibleAmount(mAmountFns.mStorageSQLite, aAmount);
2718 NS_IMETHODIMP
2719 nsMemoryReporterManager::GetLowMemoryEventsPhysical(int64_t* aAmount) {
2720 return GetInfallibleAmount(mAmountFns.mLowMemoryEventsPhysical, aAmount);
2723 NS_IMETHODIMP
2724 nsMemoryReporterManager::GetGhostWindows(int64_t* aAmount) {
2725 return GetInfallibleAmount(mAmountFns.mGhostWindows, aAmount);
2728 NS_IMETHODIMP
2729 nsMemoryReporterManager::GetPageFaultsHard(int64_t* aAmount) {
2730 #ifdef HAVE_PAGE_FAULT_REPORTERS
2731 return PageFaultsHardDistinguishedAmount(aAmount);
2732 #else
2733 *aAmount = 0;
2734 return NS_ERROR_NOT_AVAILABLE;
2735 #endif
2738 NS_IMETHODIMP
2739 nsMemoryReporterManager::GetHasMozMallocUsableSize(bool* aHas) {
2740 void* p = malloc(16);
2741 if (!p) {
2742 return NS_ERROR_OUT_OF_MEMORY;
2744 size_t usable = moz_malloc_usable_size(p);
2745 free(p);
2746 *aHas = !!(usable > 0);
2747 return NS_OK;
2750 NS_IMETHODIMP
2751 nsMemoryReporterManager::GetIsDMDEnabled(bool* aIsEnabled) {
2752 #ifdef MOZ_DMD
2753 *aIsEnabled = true;
2754 #else
2755 *aIsEnabled = false;
2756 #endif
2757 return NS_OK;
2760 NS_IMETHODIMP
2761 nsMemoryReporterManager::GetIsDMDRunning(bool* aIsRunning) {
2762 #ifdef MOZ_DMD
2763 *aIsRunning = dmd::IsRunning();
2764 #else
2765 *aIsRunning = false;
2766 #endif
2767 return NS_OK;
2770 namespace {
2773 * This runnable lets us implement
2774 * nsIMemoryReporterManager::MinimizeMemoryUsage(). We fire a heap-minimize
2775 * notification, spin the event loop, and repeat this process a few times.
2777 * When this sequence finishes, we invoke the callback function passed to the
2778 * runnable's constructor.
2780 class MinimizeMemoryUsageRunnable : public Runnable {
2781 public:
2782 explicit MinimizeMemoryUsageRunnable(nsIRunnable* aCallback)
2783 : mozilla::Runnable("MinimizeMemoryUsageRunnable"),
2784 mCallback(aCallback),
2785 mRemainingIters(sNumIters) {}
2787 NS_IMETHOD Run() override {
2788 nsCOMPtr<nsIObserverService> os = services::GetObserverService();
2789 if (!os) {
2790 return NS_ERROR_FAILURE;
2793 if (mRemainingIters == 0) {
2794 os->NotifyObservers(nullptr, "after-minimize-memory-usage",
2795 u"MinimizeMemoryUsageRunnable");
2796 if (mCallback) {
2797 mCallback->Run();
2799 return NS_OK;
2802 os->NotifyObservers(nullptr, "memory-pressure", u"heap-minimize");
2803 mRemainingIters--;
2804 NS_DispatchToMainThread(this);
2806 return NS_OK;
2809 private:
2810 // Send sNumIters heap-minimize notifications, spinning the event
2811 // loop after each notification (see bug 610166 comment 12 for an
2812 // explanation), because one notification doesn't cut it.
2813 static const uint32_t sNumIters = 3;
2815 nsCOMPtr<nsIRunnable> mCallback;
2816 uint32_t mRemainingIters;
2819 } // namespace
2821 NS_IMETHODIMP
2822 nsMemoryReporterManager::MinimizeMemoryUsage(nsIRunnable* aCallback) {
2823 RefPtr<MinimizeMemoryUsageRunnable> runnable =
2824 new MinimizeMemoryUsageRunnable(aCallback);
2826 return NS_DispatchToMainThread(runnable);
2829 NS_IMETHODIMP
2830 nsMemoryReporterManager::SizeOfTab(mozIDOMWindowProxy* aTopWindow,
2831 int64_t* aJSObjectsSize,
2832 int64_t* aJSStringsSize,
2833 int64_t* aJSOtherSize, int64_t* aDomSize,
2834 int64_t* aStyleSize, int64_t* aOtherSize,
2835 int64_t* aTotalSize, double* aJSMilliseconds,
2836 double* aNonJSMilliseconds) {
2837 nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aTopWindow);
2838 auto* piWindow = nsPIDOMWindowOuter::From(aTopWindow);
2839 if (NS_WARN_IF(!global) || NS_WARN_IF(!piWindow)) {
2840 return NS_ERROR_FAILURE;
2843 TimeStamp t1 = TimeStamp::Now();
2845 // Measure JS memory consumption (and possibly some non-JS consumption, via
2846 // |jsPrivateSize|).
2847 size_t jsObjectsSize, jsStringsSize, jsPrivateSize, jsOtherSize;
2848 nsresult rv = mSizeOfTabFns.mJS(global->GetGlobalJSObject(), &jsObjectsSize,
2849 &jsStringsSize, &jsPrivateSize, &jsOtherSize);
2850 if (NS_WARN_IF(NS_FAILED(rv))) {
2851 return rv;
2854 TimeStamp t2 = TimeStamp::Now();
2856 // Measure non-JS memory consumption.
2857 size_t domSize, styleSize, otherSize;
2858 rv = mSizeOfTabFns.mNonJS(piWindow, &domSize, &styleSize, &otherSize);
2859 if (NS_WARN_IF(NS_FAILED(rv))) {
2860 return rv;
2863 TimeStamp t3 = TimeStamp::Now();
2865 *aTotalSize = 0;
2866 #define DO(aN, n) \
2868 *aN = (n); \
2869 *aTotalSize += (n); \
2871 DO(aJSObjectsSize, jsObjectsSize);
2872 DO(aJSStringsSize, jsStringsSize);
2873 DO(aJSOtherSize, jsOtherSize);
2874 DO(aDomSize, jsPrivateSize + domSize);
2875 DO(aStyleSize, styleSize);
2876 DO(aOtherSize, otherSize);
2877 #undef DO
2879 *aJSMilliseconds = (t2 - t1).ToMilliseconds();
2880 *aNonJSMilliseconds = (t3 - t2).ToMilliseconds();
2882 return NS_OK;
2885 namespace mozilla {
2887 #define GET_MEMORY_REPORTER_MANAGER(mgr) \
2888 RefPtr<nsMemoryReporterManager> mgr = \
2889 nsMemoryReporterManager::GetOrCreate(); \
2890 if (!mgr) { \
2891 return NS_ERROR_FAILURE; \
2894 nsresult RegisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2895 // Hold a strong reference to the argument to make sure it gets released if
2896 // we return early below.
2897 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2898 GET_MEMORY_REPORTER_MANAGER(mgr)
2899 return mgr->RegisterStrongReporter(reporter);
2902 nsresult RegisterStrongAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2903 // Hold a strong reference to the argument to make sure it gets released if
2904 // we return early below.
2905 nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2906 GET_MEMORY_REPORTER_MANAGER(mgr)
2907 return mgr->RegisterStrongAsyncReporter(reporter);
2910 nsresult RegisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2911 GET_MEMORY_REPORTER_MANAGER(mgr)
2912 return mgr->RegisterWeakReporter(aReporter);
2915 nsresult RegisterWeakAsyncMemoryReporter(nsIMemoryReporter* aReporter) {
2916 GET_MEMORY_REPORTER_MANAGER(mgr)
2917 return mgr->RegisterWeakAsyncReporter(aReporter);
2920 nsresult UnregisterStrongMemoryReporter(nsIMemoryReporter* aReporter) {
2921 GET_MEMORY_REPORTER_MANAGER(mgr)
2922 return mgr->UnregisterStrongReporter(aReporter);
2925 nsresult UnregisterWeakMemoryReporter(nsIMemoryReporter* aReporter) {
2926 GET_MEMORY_REPORTER_MANAGER(mgr)
2927 return mgr->UnregisterWeakReporter(aReporter);
2930 // Macro for generating functions that register distinguished amount functions
2931 // with the memory reporter manager.
2932 #define DEFINE_REGISTER_DISTINGUISHED_AMOUNT(kind, name) \
2933 nsresult Register##name##DistinguishedAmount(kind##AmountFn aAmountFn) { \
2934 GET_MEMORY_REPORTER_MANAGER(mgr) \
2935 mgr->mAmountFns.m##name = aAmountFn; \
2936 return NS_OK; \
2939 // Macro for generating functions that unregister distinguished amount
2940 // functions with the memory reporter manager.
2941 #define DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(name) \
2942 nsresult Unregister##name##DistinguishedAmount() { \
2943 GET_MEMORY_REPORTER_MANAGER(mgr) \
2944 mgr->mAmountFns.m##name = nullptr; \
2945 return NS_OK; \
2948 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeGCHeap)
2949 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeTemporaryPeak)
2950 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible,
2951 JSMainRuntimeCompartmentsSystem)
2952 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeCompartmentsUser)
2953 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsSystem)
2954 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsUser)
2956 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, ImagesContentUsedUncompressed)
2957 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(ImagesContentUsedUncompressed)
2959 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, StorageSQLite)
2960 DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(StorageSQLite)
2962 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsPhysical)
2964 DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, GhostWindows)
2966 #undef DEFINE_REGISTER_DISTINGUISHED_AMOUNT
2967 #undef DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT
2969 #define DEFINE_REGISTER_SIZE_OF_TAB(name) \
2970 nsresult Register##name##SizeOfTab(name##SizeOfTabFn aSizeOfTabFn) { \
2971 GET_MEMORY_REPORTER_MANAGER(mgr) \
2972 mgr->mSizeOfTabFns.m##name = aSizeOfTabFn; \
2973 return NS_OK; \
2976 DEFINE_REGISTER_SIZE_OF_TAB(JS);
2977 DEFINE_REGISTER_SIZE_OF_TAB(NonJS);
2979 #undef DEFINE_REGISTER_SIZE_OF_TAB
2981 #undef GET_MEMORY_REPORTER_MANAGER
2983 } // namespace mozilla