Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsMemoryReporterManager.h"
#include "nsAtomTable.h"
#include "nsCOMPtr.h"
#include "nsCOMArray.h"
#include "nsPrintfCString.h"
#include "nsProxyRelease.h"
#include "nsServiceManagerUtils.h"
#include "nsITimer.h"
#include "nsThreadUtils.h"
#include "nsPIDOMWindow.h"
#include "nsIObserverService.h"
#include "nsIOService.h"
#include "nsIGlobalObject.h"
#include "nsIXPConnect.h"
#ifdef MOZ_GECKO_PROFILER
# include "GeckoProfilerReporter.h"
#endif
#if defined(XP_UNIX) || defined(MOZ_DMD)
# include "nsMemoryInfoDumper.h"
#endif
#include "nsNetCID.h"
#include "nsThread.h"
#include "VRProcessManager.h"
#include "mozilla/Attributes.h"
#include "mozilla/MemoryReportingProcess.h"
#include "mozilla/PodOperations.h"
#include "mozilla/Preferences.h"
#include "mozilla/RDDProcessManager.h"
#include "mozilla/ResultExtensions.h"
#include "mozilla/Services.h"
#include "mozilla/Telemetry.h"
#include "mozilla/UniquePtrExtensions.h"
#include "mozilla/dom/MemoryReportTypes.h"
#include "mozilla/dom/ContentParent.h"
#include "mozilla/gfx/GPUProcessManager.h"
#include "mozilla/ipc/FileDescriptorUtils.h"
#ifdef XP_WIN
# include "mozilla/MemoryInfo.h"
# include <process.h>
# ifndef getpid
# define getpid _getpid
# endif
#else
# include <unistd.h>
#endif
using namespace mozilla;
using namespace dom;
#if defined(MOZ_MEMORY)
# define HAVE_JEMALLOC_STATS 1
# include "mozmemory.h"
#endif // MOZ_MEMORY
#if defined(XP_LINUX)
# include "mozilla/MemoryMapping.h"
# include <malloc.h>
# include <string.h>
# include <stdlib.h>
[[nodiscard]] static nsresult GetProcSelfStatmField(int aField, int64_t* aN) {
// There are more than two fields, but we're only interested in the first
// two.
static const int MAX_FIELD = 2;
size_t fields[MAX_FIELD];
MOZ_ASSERT(aField < MAX_FIELD, "bad field number");
FILE* f = fopen("/proc/self/statm", "r");
if (f) {
int nread = fscanf(f, "%zu %zu", &fields[0], &fields[1]);
fclose(f);
if (nread == MAX_FIELD) {
*aN = fields[aField] * getpagesize();
return NS_OK;
}
}
return NS_ERROR_FAILURE;
}
[[nodiscard]] static nsresult GetProcSelfSmapsPrivate(int64_t* aN, pid_t aPid) {
// You might be tempted to calculate USS by subtracting the "shared" value
// from the "resident" value in /proc/<pid>/statm. But at least on Linux,
// statm's "shared" value actually counts pages backed by files, which has
// little to do with whether the pages are actually shared. /proc/self/smaps
// on the other hand appears to give us the correct information.
nsTArray<MemoryMapping> mappings(1024);
MOZ_TRY(GetMemoryMappings(mappings, aPid));
int64_t amount = 0;
for (auto& mapping : mappings) {
amount += mapping.Private_Clean();
amount += mapping.Private_Dirty();
}
*aN = amount;
return NS_OK;
}
# define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
[[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
return GetProcSelfStatmField(0, aN);
}
[[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
return GetProcSelfStatmField(1, aN);
}
[[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
return ResidentDistinguishedAmount(aN);
}
# define HAVE_RESIDENT_UNIQUE_REPORTER 1
[[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
int64_t* aN, pid_t aPid = 0) {
return GetProcSelfSmapsPrivate(aN, aPid);
}
# ifdef HAVE_MALLINFO
# define HAVE_SYSTEM_HEAP_REPORTER 1
[[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
struct mallinfo info = mallinfo();
// The documentation in the glibc man page makes it sound like |uordblks|
// would suffice, but that only gets the small allocations that are put in
// the brk heap. We need |hblkhd| as well to get the larger allocations
// that are mmapped.
//
// The fields in |struct mallinfo| are all |int|, <sigh>, so it is
// unreliable if memory usage gets high. However, the system heap size on
// Linux should usually be zero (so long as jemalloc is enabled) so that
// shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
// adding them to provide a small amount of extra overflow protection.
*aSizeOut = size_t(info.hblkhd) + size_t(info.uordblks);
return NS_OK;
}
# endif
#elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
defined(__OpenBSD__) || defined(__FreeBSD_kernel__)
# include <sys/param.h>
# include <sys/sysctl.h>
# if defined(__DragonFly__) || defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__)
# include <sys/user.h>
# endif
# include <unistd.h>
# if defined(__NetBSD__)
# undef KERN_PROC
# define KERN_PROC KERN_PROC2
# define KINFO_PROC struct kinfo_proc2
# else
# define KINFO_PROC struct kinfo_proc
# endif
# if defined(__DragonFly__)
# define KP_SIZE(kp) (kp.kp_vm_map_size)
# define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
# define KP_SIZE(kp) (kp.ki_size)
# define KP_RSS(kp) (kp.ki_rssize * getpagesize())
# elif defined(__NetBSD__)
# define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
# define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
# elif defined(__OpenBSD__)
# define KP_SIZE(kp) \
((kp.p_vm_dsize + kp.p_vm_ssize + kp.p_vm_tsize) * getpagesize())
# define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
# endif
[[nodiscard]] static nsresult GetKinfoProcSelf(KINFO_PROC* aProc) {
# if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
static LazyLogModule sPledgeLog("SandboxPledge");
MOZ_LOG(sPledgeLog, LogLevel::Debug,
("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__));
return NS_ERROR_FAILURE;
# endif
int mib[] = {
CTL_KERN,
KERN_PROC,
KERN_PROC_PID,
getpid(),
# if defined(__NetBSD__) || defined(__OpenBSD__)
sizeof(KINFO_PROC),
1,
# endif
};
u_int miblen = sizeof(mib) / sizeof(mib[0]);
size_t size = sizeof(KINFO_PROC);
if (sysctl(mib, miblen, aProc, &size, nullptr, 0)) {
return NS_ERROR_FAILURE;
}
return NS_OK;
}
# define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
[[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
KINFO_PROC proc;
nsresult rv = GetKinfoProcSelf(&proc);
if (NS_SUCCEEDED(rv)) {
*aN = KP_SIZE(proc);
}
return rv;
}
[[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
KINFO_PROC proc;
nsresult rv = GetKinfoProcSelf(&proc);
if (NS_SUCCEEDED(rv)) {
*aN = KP_RSS(proc);
}
return rv;
}
[[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
return ResidentDistinguishedAmount(aN);
}
# ifdef __FreeBSD__
# include <libutil.h>
# include <algorithm>
[[nodiscard]] static nsresult GetKinfoVmentrySelf(int64_t* aPrss,
uint64_t* aMaxreg) {
int cnt;
struct kinfo_vmentry* vmmap;
struct kinfo_vmentry* kve;
if (!(vmmap = kinfo_getvmmap(getpid(), &cnt))) {
return NS_ERROR_FAILURE;
}
if (aPrss) {
*aPrss = 0;
}
if (aMaxreg) {
*aMaxreg = 0;
}
for (int i = 0; i < cnt; i++) {
kve = &vmmap[i];
if (aPrss) {
*aPrss += kve->kve_private_resident;
}
if (aMaxreg) {
*aMaxreg = std::max(*aMaxreg, kve->kve_end - kve->kve_start);
}
}
free(vmmap);
return NS_OK;
}
# define HAVE_PRIVATE_REPORTER 1
[[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
int64_t priv;
nsresult rv = GetKinfoVmentrySelf(&priv, nullptr);
NS_ENSURE_SUCCESS(rv, rv);
*aN = priv * getpagesize();
return NS_OK;
}
# define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
[[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
int64_t* aN) {
uint64_t biggestRegion;
nsresult rv = GetKinfoVmentrySelf(nullptr, &biggestRegion);
if (NS_SUCCEEDED(rv)) {
*aN = biggestRegion;
}
return NS_OK;
}
# endif // FreeBSD
#elif defined(SOLARIS)
# include <procfs.h>
# include <fcntl.h>
# include <unistd.h>
static void XMappingIter(int64_t& aVsize, int64_t& aResident,
int64_t& aShared) {
aVsize = -1;
aResident = -1;
aShared = -1;
int mapfd = open("/proc/self/xmap", O_RDONLY);
struct stat st;
prxmap_t* prmapp = nullptr;
if (mapfd >= 0) {
if (!fstat(mapfd, &st)) {
int nmap = st.st_size / sizeof(prxmap_t);
while (1) {
// stat(2) on /proc/<pid>/xmap returns an incorrect value,
// prior to the release of Solaris 11.
// Here is a workaround for it.
nmap *= 2;
prmapp = (prxmap_t*)malloc((nmap + 1) * sizeof(prxmap_t));
if (!prmapp) {
// out of memory
break;
}
int n = pread(mapfd, prmapp, (nmap + 1) * sizeof(prxmap_t), 0);
if (n < 0) {
break;
}
if (nmap >= n / sizeof(prxmap_t)) {
aVsize = 0;
aResident = 0;
aShared = 0;
for (int i = 0; i < n / sizeof(prxmap_t); i++) {
aVsize += prmapp[i].pr_size;
aResident += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
if (prmapp[i].pr_mflags & MA_SHARED) {
aShared += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
}
}
break;
}
free(prmapp);
}
free(prmapp);
}
close(mapfd);
}
}
# define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
[[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
int64_t vsize, resident, shared;
XMappingIter(vsize, resident, shared);
if (vsize == -1) {
return NS_ERROR_FAILURE;
}
*aN = vsize;
return NS_OK;
}
[[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
int64_t vsize, resident, shared;
XMappingIter(vsize, resident, shared);
if (resident == -1) {
return NS_ERROR_FAILURE;
}
*aN = resident;
return NS_OK;
}
[[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
return ResidentDistinguishedAmount(aN);
}
# define HAVE_RESIDENT_UNIQUE_REPORTER 1
[[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(int64_t* aN) {
int64_t vsize, resident, shared;
XMappingIter(vsize, resident, shared);
if (resident == -1) {
return NS_ERROR_FAILURE;
}
*aN = resident - shared;
return NS_OK;
}
#elif defined(XP_MACOSX)
# include <mach/mach_init.h>
# include <mach/mach_vm.h>
# include <mach/shared_region.h>
# include <mach/task.h>
# include <sys/sysctl.h>
[[nodiscard]] static bool GetTaskBasicInfo(struct task_basic_info* aTi) {
mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
kern_return_t kr =
task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)aTi, &count);
return kr == KERN_SUCCESS;
}
// The VSIZE figure on Mac includes huge amounts of shared memory and is always
// absurdly high, eg. 2GB+ even at start-up. But both 'top' and 'ps' report
// it, so we might as well too.
# define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
[[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
task_basic_info ti;
if (!GetTaskBasicInfo(&ti)) {
return NS_ERROR_FAILURE;
}
*aN = ti.virtual_size;
return NS_OK;
}
// If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
// pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
// an accurate result. The OS will take away MADV_FREE'd pages when there's
// memory pressure, so ideally, they shouldn't count against our RSS.
//
// Purging these pages can take a long time for some users (see bug 789975),
// so we provide the option to get the RSS without purging first.
[[nodiscard]] static nsresult ResidentDistinguishedAmountHelper(int64_t* aN,
bool aDoPurge) {
# ifdef HAVE_JEMALLOC_STATS
if (aDoPurge) {
Telemetry::AutoTimer<Telemetry::MEMORY_FREE_PURGED_PAGES_MS> timer;
jemalloc_purge_freed_pages();
}
# endif
task_basic_info ti;
if (!GetTaskBasicInfo(&ti)) {
return NS_ERROR_FAILURE;
}
*aN = ti.resident_size;
return NS_OK;
}
[[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ false);
}
[[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ true);
}
# define HAVE_RESIDENT_UNIQUE_REPORTER 1
static bool InSharedRegion(mach_vm_address_t aAddr, cpu_type_t aType) {
mach_vm_address_t base;
mach_vm_address_t size;
switch (aType) {
case CPU_TYPE_ARM:
base = SHARED_REGION_BASE_ARM;
size = SHARED_REGION_SIZE_ARM;
break;
case CPU_TYPE_I386:
base = SHARED_REGION_BASE_I386;
size = SHARED_REGION_SIZE_I386;
break;
case CPU_TYPE_X86_64:
base = SHARED_REGION_BASE_X86_64;
size = SHARED_REGION_SIZE_X86_64;
break;
default:
return false;
}
return base <= aAddr && aAddr < (base + size);
}
[[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
int64_t* aN, mach_port_t aPort = 0) {
if (!aN) {
return NS_ERROR_FAILURE;
}
cpu_type_t cpu_type;
size_t len = sizeof(cpu_type);
if (sysctlbyname("sysctl.proc_cputype", &cpu_type, &len, NULL, 0) != 0) {
return NS_ERROR_FAILURE;
}
// Roughly based on libtop_update_vm_regions in
size_t privatePages = 0;
mach_vm_size_t size = 0;
for (mach_vm_address_t addr = MACH_VM_MIN_ADDRESS;; addr += size) {
vm_region_top_info_data_t info;
mach_msg_type_number_t infoCount = VM_REGION_TOP_INFO_COUNT;
mach_port_t objectName;
kern_return_t kr = mach_vm_region(
aPort ? aPort : mach_task_self(), &addr, &size, VM_REGION_TOP_INFO,
reinterpret_cast<vm_region_info_t>(&info), &infoCount, &objectName);
if (kr == KERN_INVALID_ADDRESS) {
// Done iterating VM regions.
break;
} else if (kr != KERN_SUCCESS) {
return NS_ERROR_FAILURE;
}
if (InSharedRegion(addr, cpu_type) && info.share_mode != SM_PRIVATE) {
continue;
}
switch (info.share_mode) {
case SM_LARGE_PAGE:
// NB: Large pages are not shareable and always resident.
case SM_PRIVATE:
privatePages += info.private_pages_resident;
privatePages += info.shared_pages_resident;
break;
case SM_COW:
privatePages += info.private_pages_resident;
if (info.ref_count == 1) {
// Treat copy-on-write pages as private if they only have one
// reference.
privatePages += info.shared_pages_resident;
}
break;
case SM_SHARED:
default:
break;
}
}
vm_size_t pageSize;
if (host_page_size(aPort ? aPort : mach_task_self(), &pageSize) !=
KERN_SUCCESS) {
pageSize = PAGE_SIZE;
}
*aN = privatePages * pageSize;
return NS_OK;
}
#elif defined(XP_WIN)
# include <windows.h>
# include <psapi.h>
# include <algorithm>
# define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
[[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
MEMORYSTATUSEX s;
s.dwLength = sizeof(s);
if (!GlobalMemoryStatusEx(&s)) {
return NS_ERROR_FAILURE;
}
*aN = s.ullTotalVirtual - s.ullAvailVirtual;
return NS_OK;
}
[[nodiscard]] static nsresult ResidentDistinguishedAmount(int64_t* aN) {
PROCESS_MEMORY_COUNTERS pmc;
pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS);
if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
return NS_ERROR_FAILURE;
}
*aN = pmc.WorkingSetSize;
return NS_OK;
}
[[nodiscard]] static nsresult ResidentFastDistinguishedAmount(int64_t* aN) {
return ResidentDistinguishedAmount(aN);
}
# define HAVE_RESIDENT_UNIQUE_REPORTER 1
[[nodiscard]] static nsresult ResidentUniqueDistinguishedAmount(
int64_t* aN, HANDLE aProcess = nullptr) {
// Determine how many entries we need.
PSAPI_WORKING_SET_INFORMATION tmp;
DWORD tmpSize = sizeof(tmp);
memset(&tmp, 0, tmpSize);
HANDLE proc = aProcess ? aProcess : GetCurrentProcess();
QueryWorkingSet(proc, &tmp, tmpSize);
// Fudge the size in case new entries are added between calls.
size_t entries = tmp.NumberOfEntries * 2;
if (!entries) {
return NS_ERROR_FAILURE;
}
DWORD infoArraySize = tmpSize + (entries * sizeof(PSAPI_WORKING_SET_BLOCK));
UniqueFreePtr<PSAPI_WORKING_SET_INFORMATION> infoArray(
static_cast<PSAPI_WORKING_SET_INFORMATION*>(malloc(infoArraySize)));
if (!infoArray) {
return NS_ERROR_FAILURE;
}
if (!QueryWorkingSet(proc, infoArray.get(), infoArraySize)) {
return NS_ERROR_FAILURE;
}
entries = static_cast<size_t>(infoArray->NumberOfEntries);
size_t privatePages = 0;
for (size_t i = 0; i < entries; i++) {
// Count shared pages that only one process is using as private.
if (!infoArray->WorkingSetInfo[i].Shared ||
infoArray->WorkingSetInfo[i].ShareCount <= 1) {
privatePages++;
}
}
SYSTEM_INFO si;
GetSystemInfo(&si);
*aN = privatePages * si.dwPageSize;
return NS_OK;
}
# define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
[[nodiscard]] static nsresult VsizeMaxContiguousDistinguishedAmount(
int64_t* aN) {
SIZE_T biggestRegion = 0;
MEMORY_BASIC_INFORMATION vmemInfo = {0};
for (size_t currentAddress = 0;;) {
if (!VirtualQuery((LPCVOID)currentAddress, &vmemInfo, sizeof(vmemInfo))) {
// Something went wrong, just return whatever we've got already.
break;
}
if (vmemInfo.State == MEM_FREE) {
biggestRegion = std::max(biggestRegion, vmemInfo.RegionSize);
}
SIZE_T lastAddress = currentAddress;
currentAddress += vmemInfo.RegionSize;
// If we overflow, we've examined all of the address space.
if (currentAddress < lastAddress) {
break;
}
}
*aN = biggestRegion;
return NS_OK;
}
# define HAVE_PRIVATE_REPORTER 1
[[nodiscard]] static nsresult PrivateDistinguishedAmount(int64_t* aN) {
PROCESS_MEMORY_COUNTERS_EX pmcex;
pmcex.cb = sizeof(PROCESS_MEMORY_COUNTERS_EX);
if (!GetProcessMemoryInfo(GetCurrentProcess(),
(PPROCESS_MEMORY_COUNTERS)&pmcex, sizeof(pmcex))) {
return NS_ERROR_FAILURE;
}
*aN = pmcex.PrivateUsage;
return NS_OK;
}
# define HAVE_SYSTEM_HEAP_REPORTER 1
// Windows can have multiple separate heaps, but we should not touch non-default
// heaps because they may be destroyed at anytime while we hold a handle. So we
// count only the default heap.
[[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
HANDLE heap = GetProcessHeap();
NS_ENSURE_TRUE(HeapLock(heap), NS_ERROR_FAILURE);
int64_t heapSize = 0;
PROCESS_HEAP_ENTRY entry;
entry.lpData = nullptr;
while (HeapWalk(heap, &entry)) {
// We don't count entry.cbOverhead, because we just want to measure the
// space available to the program.
if (entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) {
heapSize += entry.cbData;
}
}
// Check this result only after unlocking the heap, so that we don't leave
// the heap locked if there was an error.
DWORD lastError = GetLastError();
// I have no idea how things would proceed if unlocking this heap failed...
NS_ENSURE_TRUE(HeapUnlock(heap), NS_ERROR_FAILURE);
NS_ENSURE_TRUE(lastError == ERROR_NO_MORE_ITEMS, NS_ERROR_FAILURE);
*aSizeOut = heapSize;
return NS_OK;
}
struct SegmentKind {
DWORD mState;
DWORD mType;
DWORD mProtect;
int mIsStack;
};
struct SegmentEntry : public PLDHashEntryHdr {
static PLDHashNumber HashKey(const void* aKey) {
auto kind = static_cast<const SegmentKind*>(aKey);
return mozilla::HashGeneric(kind->mState, kind->mType, kind->mProtect,
kind->mIsStack);
}
static bool MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey) {
auto kind = static_cast<const SegmentKind*>(aKey);
auto entry = static_cast<const SegmentEntry*>(aEntry);
return kind->mState == entry->mKind.mState &&
kind->mType == entry->mKind.mType &&
kind->mProtect == entry->mKind.mProtect &&
kind->mIsStack == entry->mKind.mIsStack;
}
static void InitEntry(PLDHashEntryHdr* aEntry, const void* aKey) {
auto kind = static_cast<const SegmentKind*>(aKey);
auto entry = static_cast<SegmentEntry*>(aEntry);
entry->mKind = *kind;
entry->mCount = 0;
entry->mSize = 0;
}
static const PLDHashTableOps Ops;
SegmentKind mKind; // The segment kind.
uint32_t mCount; // The number of segments of this kind.
size_t mSize; // The combined size of segments of this kind.
};
/* static */ const PLDHashTableOps SegmentEntry::Ops = {
SegmentEntry::HashKey, SegmentEntry::MatchEntry,
PLDHashTable::MoveEntryStub, PLDHashTable::ClearEntryStub,
SegmentEntry::InitEntry};
class WindowsAddressSpaceReporter final : public nsIMemoryReporter {
~WindowsAddressSpaceReporter() {}
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
// First iterate over all the segments and record how many of each kind
// there were and their aggregate sizes. We use a hash table for this
// because there are a couple of dozen different kinds possible.
PLDHashTable table(&SegmentEntry::Ops, sizeof(SegmentEntry));
MEMORY_BASIC_INFORMATION info = {0};
bool isPrevSegStackGuard = false;
for (size_t currentAddress = 0;;) {
if (!VirtualQuery((LPCVOID)currentAddress, &info, sizeof(info))) {
// Something went wrong, just return whatever we've got already.
break;
}
size_t size = info.RegionSize;
// Note that |type| and |protect| are ignored in some cases.
DWORD state = info.State;
DWORD type =
(state == MEM_RESERVE || state == MEM_COMMIT) ? info.Type : 0;
DWORD protect = (state == MEM_COMMIT) ? info.Protect : 0;
bool isStack = isPrevSegStackGuard && state == MEM_COMMIT &&
type == MEM_PRIVATE && protect == PAGE_READWRITE;
SegmentKind kind = {state, type, protect, isStack ? 1 : 0};
auto entry =
static_cast<SegmentEntry*>(table.Add(&kind, mozilla::fallible));
if (entry) {
entry->mCount += 1;
entry->mSize += size;
}
isPrevSegStackGuard = info.State == MEM_COMMIT &&
info.Type == MEM_PRIVATE &&
info.Protect == (PAGE_READWRITE | PAGE_GUARD);
size_t lastAddress = currentAddress;
currentAddress += size;
// If we overflow, we've examined all of the address space.
if (currentAddress < lastAddress) {
break;
}
}
// Then iterate over the hash table and report the details for each segment
// kind.
for (auto iter = table.Iter(); !iter.Done(); iter.Next()) {
// For each range of pages, we consider one or more of its State, Type
// and Protect values. These are documented at
// (for State and Type) and
// (for Protect).
//
// Not all State values have accompanying Type and Protection values.
bool doType = false;
bool doProtect = false;
auto entry = static_cast<const SegmentEntry*>(iter.Get());
nsCString path("address-space");
switch (entry->mKind.mState) {
case MEM_FREE:
path.AppendLiteral("/free");
break;
case MEM_RESERVE:
path.AppendLiteral("/reserved");
doType = true;
break;
case MEM_COMMIT:
path.AppendLiteral("/commit");
doType = true;
doProtect = true;
break;
default:
// Should be impossible, but handle it just in case.
path.AppendLiteral("/???");
break;
}
if (doType) {
switch (entry->mKind.mType) {
case MEM_IMAGE:
path.AppendLiteral("/image");
break;
case MEM_MAPPED:
path.AppendLiteral("/mapped");
break;
case MEM_PRIVATE:
path.AppendLiteral("/private");
break;
default:
// Should be impossible, but handle it just in case.
path.AppendLiteral("/???");
break;
}
}
if (doProtect) {
DWORD protect = entry->mKind.mProtect;
// Basic attributes. Exactly one of these should be set.
if (protect & PAGE_EXECUTE) {
path.AppendLiteral("/execute");
}
if (protect & PAGE_EXECUTE_READ) {
path.AppendLiteral("/execute-read");
}
if (protect & PAGE_EXECUTE_READWRITE) {
path.AppendLiteral("/execute-readwrite");
}
if (protect & PAGE_EXECUTE_WRITECOPY) {
path.AppendLiteral("/execute-writecopy");
}
if (protect & PAGE_NOACCESS) {
path.AppendLiteral("/noaccess");
}
if (protect & PAGE_READONLY) {
path.AppendLiteral("/readonly");
}
if (protect & PAGE_READWRITE) {
path.AppendLiteral("/readwrite");
}
if (protect & PAGE_WRITECOPY) {
path.AppendLiteral("/writecopy");
}
// Modifiers. At most one of these should be set.
if (protect & PAGE_GUARD) {
path.AppendLiteral("+guard");
}
if (protect & PAGE_NOCACHE) {
path.AppendLiteral("+nocache");
}
if (protect & PAGE_WRITECOMBINE) {
path.AppendLiteral("+writecombine");
}
// Annotate likely stack segments, too.
if (entry->mKind.mIsStack) {
path.AppendLiteral("+stack");
}
}
// Append the segment count.
path.AppendPrintf("(segments=%u)", entry->mCount);
aHandleReport->Callback(""_ns, path, KIND_OTHER, UNITS_BYTES,
entry->mSize, "From MEMORY_BASIC_INFORMATION."_ns,
aData);
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter, nsIMemoryReporter)
#endif // XP_<PLATFORM>
#ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
class VsizeMaxContiguousReporter final : public nsIMemoryReporter {
~VsizeMaxContiguousReporter() {}
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount;
if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount))) {
MOZ_COLLECT_REPORT(
"vsize-max-contiguous", KIND_OTHER, UNITS_BYTES, amount,
"Size of the maximum contiguous block of available virtual memory.");
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter, nsIMemoryReporter)
#endif
#ifdef HAVE_PRIVATE_REPORTER
class PrivateReporter final : public nsIMemoryReporter {
~PrivateReporter() {}
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount;
if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount))) {
// clang-format off
MOZ_COLLECT_REPORT(
"private", KIND_OTHER, UNITS_BYTES, amount,
"Memory that cannot be shared with other processes, including memory that is "
"committed and marked MEM_PRIVATE, data that is not mapped, and executable "
"pages that have been written to.");
// clang-format on
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(PrivateReporter, nsIMemoryReporter)
#endif
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
class VsizeReporter final : public nsIMemoryReporter {
~VsizeReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount;
if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount))) {
// clang-format off
MOZ_COLLECT_REPORT(
"vsize", KIND_OTHER, UNITS_BYTES, amount,
"Memory mapped by the process, including code and data segments, the heap, "
"thread stacks, memory explicitly mapped by the process via mmap and similar "
"operations, and memory shared with other processes. This is the vsize figure "
"as reported by 'top' and 'ps'. This figure is of limited use on Mac, where "
"processes share huge amounts of memory with one another. But even on other "
"operating systems, 'resident' is a much better measure of the memory "
"resources used by the process.");
// clang-format on
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(VsizeReporter, nsIMemoryReporter)
class ResidentReporter final : public nsIMemoryReporter {
~ResidentReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount;
if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount))) {
// clang-format off
MOZ_COLLECT_REPORT(
"resident", KIND_OTHER, UNITS_BYTES, amount,
"Memory mapped by the process that is present in physical memory, also known "
"as the resident set size (RSS). This is the best single figure to use when "
"considering the memory resources used by the process, but it depends both on "
"other processes being run and details of the OS kernel and so is best used "
"for comparing the memory usage of a single process at different points in "
"time.");
// clang-format on
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(ResidentReporter, nsIMemoryReporter)
#endif // HAVE_VSIZE_AND_RESIDENT_REPORTERS
#ifdef HAVE_RESIDENT_UNIQUE_REPORTER
class ResidentUniqueReporter final : public nsIMemoryReporter {
~ResidentUniqueReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount = 0;
if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount))) {
// clang-format off
MOZ_COLLECT_REPORT(
"resident-unique", KIND_OTHER, UNITS_BYTES, amount,
"Memory mapped by the process that is present in physical memory and not "
"shared with any other processes. This is also known as the process's unique "
"set size (USS). This is the amount of RAM we'd expect to be freed if we "
"closed this process.");
// clang-format on
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(ResidentUniqueReporter, nsIMemoryReporter)
#endif // HAVE_RESIDENT_UNIQUE_REPORTER
#ifdef HAVE_SYSTEM_HEAP_REPORTER
class SystemHeapReporter final : public nsIMemoryReporter {
~SystemHeapReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount;
if (NS_SUCCEEDED(SystemHeapSize(&amount))) {
// clang-format off
MOZ_COLLECT_REPORT(
"system-heap-allocated", KIND_OTHER, UNITS_BYTES, amount,
"Memory used by the system allocator that is currently allocated to the "
"application. This is distinct from the jemalloc heap that Firefox uses for "
"most or all of its heap allocations. Ideally this number is zero, but "
"on some platforms we cannot force every heap allocation through jemalloc.");
// clang-format on
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(SystemHeapReporter, nsIMemoryReporter)
#endif // HAVE_SYSTEM_HEAP_REPORTER
#ifdef XP_UNIX
# include <sys/resource.h>
# define HAVE_RESIDENT_PEAK_REPORTER 1
[[nodiscard]] static nsresult ResidentPeakDistinguishedAmount(int64_t* aN) {
struct rusage usage;
if (0 == getrusage(RUSAGE_SELF, &usage)) {
// The units for ru_maxrrs:
// - Mac: bytes
// - Solaris: pages? But some sources it actually always returns 0, so
// check for that
// - Linux, {Net/Open/Free}BSD, DragonFly: KiB
# ifdef XP_MACOSX
*aN = usage.ru_maxrss;
# elif defined(SOLARIS)
*aN = usage.ru_maxrss * getpagesize();
# else
*aN = usage.ru_maxrss * 1024;
# endif
if (*aN > 0) {
return NS_OK;
}
}
return NS_ERROR_FAILURE;
}
class ResidentPeakReporter final : public nsIMemoryReporter {
~ResidentPeakReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount = 0;
if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount))) {
MOZ_COLLECT_REPORT(
"resident-peak", KIND_OTHER, UNITS_BYTES, amount,
"The peak 'resident' value for the lifetime of the process.");
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(ResidentPeakReporter, nsIMemoryReporter)
# define HAVE_PAGE_FAULT_REPORTERS 1
class PageFaultsSoftReporter final : public nsIMemoryReporter {
~PageFaultsSoftReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
struct rusage usage;
int err = getrusage(RUSAGE_SELF, &usage);
if (err == 0) {
int64_t amount = usage.ru_minflt;
// clang-format off
MOZ_COLLECT_REPORT(
"page-faults-soft", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
"The number of soft page faults (also known as 'minor page faults') that "
"have occurred since the process started. A soft page fault occurs when the "
"process tries to access a page which is present in physical memory but is "
"not mapped into the process's address space. For instance, a process might "
"observe soft page faults when it loads a shared library which is already "
"present in physical memory. A process may experience many thousands of soft "
"page faults even when the machine has plenty of available physical memory, "
"and because the OS services a soft page fault without accessing the disk, "
"they impact performance much less than hard page faults.");
// clang-format on
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(PageFaultsSoftReporter, nsIMemoryReporter)
[[nodiscard]] static nsresult PageFaultsHardDistinguishedAmount(
int64_t* aAmount) {
struct rusage usage;
int err = getrusage(RUSAGE_SELF, &usage);
if (err != 0) {
return NS_ERROR_FAILURE;
}
*aAmount = usage.ru_majflt;
return NS_OK;
}
class PageFaultsHardReporter final : public nsIMemoryReporter {
~PageFaultsHardReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount = 0;
if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount))) {
// clang-format off
MOZ_COLLECT_REPORT(
"page-faults-hard", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
"The number of hard page faults (also known as 'major page faults') that have "
"occurred since the process started. A hard page fault occurs when a process "
"tries to access a page which is not present in physical memory. The "
"operating system must access the disk in order to fulfill a hard page fault. "
"When memory is plentiful, you should see very few hard page faults. But if "
"the process tries to use more memory than your machine has available, you "
"may see many thousands of hard page faults. Because accessing the disk is up "
"to a million times slower than accessing RAM, the program may run very "
"slowly when it is experiencing more than 100 or so hard page faults a "
"second.");
// clang-format on
}
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(PageFaultsHardReporter, nsIMemoryReporter)
#endif // XP_UNIX
/**
** memory reporter implementation for jemalloc and OSX malloc,
** to obtain info on total memory in use (that we know about,
** at least -- on OSX, there are sometimes other zones in use).
**/
#ifdef HAVE_JEMALLOC_STATS
static size_t HeapOverhead(jemalloc_stats_t* aStats) {
return aStats->waste + aStats->bookkeeping + aStats->page_cache +
aStats->bin_unused;
}
// This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
// 100x for the percentage.
static int64_t HeapOverheadFraction(jemalloc_stats_t* aStats) {
size_t heapOverhead = HeapOverhead(aStats);
size_t heapCommitted = aStats->allocated + heapOverhead;
return int64_t(10000 * (heapOverhead / (double)heapCommitted));
}
class JemallocHeapReporter final : public nsIMemoryReporter {
~JemallocHeapReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
jemalloc_stats_t stats;
jemalloc_bin_stats_t bin_stats[JEMALLOC_MAX_STATS_BINS];
jemalloc_stats(&stats, bin_stats);
// clang-format off
MOZ_COLLECT_REPORT(
"heap-committed/allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
"Memory mapped by the heap allocator that is currently allocated to the "
"application. This may exceed the amount of memory requested by the "
"application because the allocator regularly rounds up request sizes. (The "
"exact amount requested is not recorded.)");
MOZ_COLLECT_REPORT(
"heap-allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
"The same as 'heap-committed/allocated'.");
// We mark this and the other heap-overhead reporters as KIND_NONHEAP
// because KIND_HEAP memory means "counted in heap-allocated", which
// this is not.
for (auto& bin : bin_stats) {
if (!bin.size) {
continue;
}
nsPrintfCString path("explicit/heap-overhead/bin-unused/bin-%zu",
bin.size);
aHandleReport->Callback(EmptyCString(), path, KIND_NONHEAP, UNITS_BYTES,
bin.bytes_unused,
nsLiteralCString(
"Unused bytes in all runs of all bins for this size class"),
aData);
}
if (stats.waste > 0) {
MOZ_COLLECT_REPORT(
"explicit/heap-overhead/waste", KIND_NONHEAP, UNITS_BYTES,
stats.waste,
"Committed bytes which do not correspond to an active allocation and which the "
"allocator is not intentionally keeping alive (i.e., not "
"'explicit/heap-overhead/{bookkeeping,page-cache,bin-unused}').");
}
MOZ_COLLECT_REPORT(
"explicit/heap-overhead/bookkeeping", KIND_NONHEAP, UNITS_BYTES,
stats.bookkeeping,
"Committed bytes which the heap allocator uses for internal data structures.");
MOZ_COLLECT_REPORT(
"explicit/heap-overhead/page-cache", KIND_NONHEAP, UNITS_BYTES,
stats.page_cache,
"Memory which the allocator could return to the operating system, but hasn't. "
"The allocator keeps this memory around as an optimization, so it doesn't "
"have to ask the OS the next time it needs to fulfill a request. This value "
"is typically not larger than a few megabytes.");
MOZ_COLLECT_REPORT(
"heap-committed/overhead", KIND_OTHER, UNITS_BYTES,
HeapOverhead(&stats),
"The sum of 'explicit/heap-overhead/*'.");
MOZ_COLLECT_REPORT(
"heap-mapped", KIND_OTHER, UNITS_BYTES, stats.mapped,
"Amount of memory currently mapped. Includes memory that is uncommitted, i.e. "
"neither in physical memory nor paged to disk.");
MOZ_COLLECT_REPORT(
"heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize,
"Size of chunks.");
// clang-format on
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(JemallocHeapReporter, nsIMemoryReporter)
#endif // HAVE_JEMALLOC_STATS
// Why is this here? At first glance, you'd think it could be defined and
// registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
// However, the obvious time to register it is when the table is initialized,
// and that happens before XPCOM components are initialized, which means the
// RegisterStrongMemoryReporter call fails. So instead we do it here.
class AtomTablesReporter final : public nsIMemoryReporter {
MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
~AtomTablesReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
AtomsSizes sizes;
NS_AddSizeOfAtoms(MallocSizeOf, sizes);
MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP, UNITS_BYTES,
sizes.mTable, "Memory used by the atom table.");
MOZ_COLLECT_REPORT(
"explicit/atoms/dynamic-objects-and-chars", KIND_HEAP, UNITS_BYTES,
sizes.mDynamicAtoms,
"Memory used by dynamic atom objects and chars (which are stored "
"at the end of each atom object).");
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(AtomTablesReporter, nsIMemoryReporter)
class ThreadsReporter final : public nsIMemoryReporter {
MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
~ThreadsReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
#ifdef XP_LINUX
nsTArray<MemoryMapping> mappings(1024);
MOZ_TRY(GetMemoryMappings(mappings));
#endif
// Enumerating over active threads requires holding a lock, so we collect
// info on all threads, and then call our reporter callbacks after releasing
// the lock.
struct ThreadData {
nsCString mName;
uint32_t mThreadId;
size_t mPrivateSize;
};
AutoTArray<ThreadData, 32> threads;
size_t eventQueueSizes = 0;
size_t wrapperSizes = 0;
size_t threadCount = 0;
for (auto* thread : nsThread::Enumerate()) {
threadCount++;
eventQueueSizes += thread->SizeOfEventQueues(MallocSizeOf);
wrapperSizes += thread->ShallowSizeOfIncludingThis(MallocSizeOf);
if (!thread->StackBase()) {
continue;
}
#if defined(XP_LINUX)
int idx = mappings.BinaryIndexOf(thread->StackBase());
if (idx < 0) {
continue;
}
// Referenced() is the combined size of all pages in the region which have
// ever been touched, and are therefore consuming memory. For stack
// regions, these pages are guaranteed to be un-shared unless we fork
// after creating threads (which we don't).
size_t privateSize = mappings[idx].Referenced();
// On Linux, we have to be very careful matching memory regions to thread
// stacks.
//
// To begin with, the kernel only reports VM stats for regions of all
// adjacent pages with the same flags, protection, and backing file.
// There's no way to get finer-grained usage information for a subset of
// those pages.
//
// Stack segments always have a guard page at the bottom of the stack
// (assuming we only support stacks that grow down), so there's no danger
// of them being merged with other stack regions. At the top, there's no
// protection page, and no way to allocate one without using pthreads
// directly and allocating our own stacks. So we get around the problem by
// adding an extra VM flag (NOHUGEPAGES) to our stack region, which we
// don't expect to be set on any heap regions. But this is not fool-proof.
//
// A second kink is that different C libraries (and different versions
// thereof) report stack base locations and sizes differently with regard
// to the guard page. For the libraries that include the guard page in the
// stack size base pointer, we need to adjust those values to compensate.
// But it's possible that our logic will get out of sync with library
// changes, or someone will compile with an unexpected library.
//
//
// The upshot of all of this is that there may be configurations that our
// special cases don't cover. And if there are, we want to know about it.
// So assert that total size of the memory region we're reporting actually
// matches the allocated size of the thread stack.
# ifndef ANDROID
MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(),
"Mapping region size doesn't match stack allocation size");
# endif
#elif defined(XP_WIN)
auto memInfo = MemoryInfo::Get(thread->StackBase(), thread->StackSize());
size_t privateSize = memInfo.Committed();
#else
size_t privateSize = thread->StackSize();
MOZ_ASSERT_UNREACHABLE(
"Shouldn't have stack base pointer on this "
"platform");
#endif
threads.AppendElement(ThreadData{
nsCString(PR_GetThreadName(thread->GetPRThread())),
thread->ThreadId(),
// On Linux, it's possible (but unlikely) that our stack region will
// have been merged with adjacent heap regions, in which case we'll
// get combined size information for both. So we take the minimum of
// the reported private size and the requested stack size to avoid the
// possible of majorly over-reporting in that case.
std::min(privateSize, thread->StackSize()),
});
}
for (auto& thread : threads) {
nsPrintfCString path("explicit/threads/stacks/%s (tid=%u)",
thread.mName.get(), thread.mThreadId);
aHandleReport->Callback(
""_ns, path, KIND_NONHEAP, UNITS_BYTES, thread.mPrivateSize,
nsLiteralCString("The sizes of thread stacks which have been "
"committed to memory."),
aData);
}
MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP,
UNITS_BYTES, eventQueueSizes,
"The sizes of nsThread event queues and observers.");
MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP,
UNITS_BYTES, wrapperSizes,
"The sizes of nsThread/PRThread wrappers.");
#if defined(XP_WIN)
// Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
// that's 12K. For 64 bit, it's 24K.
//
// See
constexpr size_t kKernelSize = (sizeof(void*) == 8 ? 24 : 12) * 1024;
#elif defined(XP_LINUX)
// On Linux, kernel stacks are usually 8K. However, on x86, they are
// allocated virtually, and start out at 4K. They may grow to 8K, but we
// have no way of knowing which ones do, so all we can do is guess.
# if defined(__x86_64__) || defined(__i386__)
constexpr size_t kKernelSize = 4 * 1024;
# else
constexpr size_t kKernelSize = 8 * 1024;
# endif
#elif defined(XP_MACOSX)
// On Darwin, kernel stacks are 16K:
//
constexpr size_t kKernelSize = 16 * 1024;
#else
// Elsewhere, just assume that kernel stacks require at least 8K.
constexpr size_t kKernelSize = 8 * 1024;
#endif
MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP,
UNITS_BYTES, threadCount * kKernelSize,
"The total kernel overhead for all active threads.");
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(ThreadsReporter, nsIMemoryReporter)
#ifdef DEBUG
// Ideally, this would be implemented in BlockingResourceBase.cpp.
// However, this ends up breaking the linking step of various unit tests due
// to adding a new dependency to libdmd for a commonly used feature (mutexes)
// in DMD builds. So instead we do it here.
class DeadlockDetectorReporter final : public nsIMemoryReporter {
MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
~DeadlockDetectorReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
MOZ_COLLECT_REPORT(
"explicit/deadlock-detector", KIND_HEAP, UNITS_BYTES,
BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf),
"Memory used by the deadlock detector.");
return NS_OK;
}
};
NS_IMPL_ISUPPORTS(DeadlockDetectorReporter, nsIMemoryReporter)
#endif
#ifdef MOZ_DMD
namespace mozilla {
namespace dmd {
class DMDReporter final : public nsIMemoryReporter {
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
dmd::Sizes sizes;
dmd::SizeOf(&sizes);
MOZ_COLLECT_REPORT(
"explicit/dmd/stack-traces/used", KIND_HEAP, UNITS_BYTES,
sizes.mStackTracesUsed,
"Memory used by stack traces which correspond to at least "
"one heap block DMD is tracking.");
MOZ_COLLECT_REPORT(
"explicit/dmd/stack-traces/unused", KIND_HEAP, UNITS_BYTES,
sizes.mStackTracesUnused,
"Memory used by stack traces which don't correspond to any heap "
"blocks DMD is currently tracking.");
MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP,
UNITS_BYTES, sizes.mStackTraceTable,
"Memory used by DMD's stack trace table.");
MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP, UNITS_BYTES,
sizes.mLiveBlockTable,
"Memory used by DMD's live block table.");
MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP, UNITS_BYTES,
sizes.mDeadBlockTable,
"Memory used by DMD's dead block list.");
return NS_OK;
}
private:
~DMDReporter() = default;
};
NS_IMPL_ISUPPORTS(DMDReporter, nsIMemoryReporter)
} // namespace dmd
} // namespace mozilla
#endif // MOZ_DMD
/**
** nsMemoryReporterManager implementation
**/
NS_IMPL_ISUPPORTS(nsMemoryReporterManager, nsIMemoryReporterManager,
nsIMemoryReporter)
NS_IMETHODIMP
nsMemoryReporterManager::Init() {
if (!NS_IsMainThread()) {
MOZ_CRASH();
}
// Under normal circumstances this function is only called once. However,
// we've (infrequently) seen memory report dumps in crash reports that
// suggest that this function is sometimes called multiple times. That in
// turn means that multiple reporters of each kind are registered, which
// leads to duplicated reports of individual measurements such as "resident",
// "vsize", etc.
//
// It's unclear how these multiple calls can occur. The only plausible theory
// so far is badly-written extensions, because this function is callable from
// JS code via nsIMemoryReporter.idl.
//
// Whatever the cause, it's a bad thing. So we protect against it with the
// following check.
static bool isInited = false;
if (isInited) {
NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
return NS_OK;
}
isInited = true;
#ifdef HAVE_JEMALLOC_STATS
RegisterStrongReporter(new JemallocHeapReporter());
#endif
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
RegisterStrongReporter(new VsizeReporter());
RegisterStrongReporter(new ResidentReporter());
#endif
#ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
RegisterStrongReporter(new VsizeMaxContiguousReporter());
#endif
#ifdef HAVE_RESIDENT_PEAK_REPORTER
RegisterStrongReporter(new ResidentPeakReporter());
#endif
#ifdef HAVE_RESIDENT_UNIQUE_REPORTER
RegisterStrongReporter(new ResidentUniqueReporter());
#endif
#ifdef HAVE_PAGE_FAULT_REPORTERS
RegisterStrongReporter(new PageFaultsSoftReporter());
RegisterStrongReporter(new PageFaultsHardReporter());
#endif
#ifdef HAVE_PRIVATE_REPORTER
RegisterStrongReporter(new PrivateReporter());
#endif
#ifdef HAVE_SYSTEM_HEAP_REPORTER
RegisterStrongReporter(new SystemHeapReporter());
#endif
RegisterStrongReporter(new AtomTablesReporter());
RegisterStrongReporter(new ThreadsReporter());
#ifdef DEBUG
RegisterStrongReporter(new DeadlockDetectorReporter());
#endif
#ifdef MOZ_GECKO_PROFILER
// We have to register this here rather than in profiler_init() because
// profiler_init() runs prior to nsMemoryReporterManager's creation.
RegisterStrongReporter(new