/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
# include <malloc.h> # include <string.h> # include <stdlib.h>
[[nodiscard]] static nsresult GetProcSelfStatmField(int aField, int64_t* aN) { // There are more than two fields, but we're only interested in the first // two. staticconstint MAX_FIELD = 2;
size_t fields[MAX_FIELD];
MOZ_ASSERT(aField < MAX_FIELD, "bad field number");
FILE* f = fopen("/proc/self/statm", "r"); if (f) { int nread = fscanf(f, "%zu %zu", &fields[0], &fields[1]);
fclose(f); if (nread == MAX_FIELD) {
*aN = fields[aField] * getpagesize(); return NS_OK;
}
} return NS_ERROR_FAILURE;
}
[[nodiscard]] static nsresult GetProcSelfSmapsPrivate(int64_t* aN, pid_t aPid) { // You might be tempted to calculate USS by subtracting the "shared" value // from the "resident" value in /proc/<pid>/statm. But at least on Linux, // statm's "shared" value actually counts pages backed by files, which has // little to do with whether the pages are actually shared. /proc/self/smaps // on the other hand appears to give us the correct information.
// The documentation in the glibc man page makes it sound like |uordblks| // would suffice, but that only gets the small allocations that are put in // the brk heap. We need |hblkhd| as well to get the larger allocations // that are mmapped. // // The fields in |struct mallinfo| are all |int|, <sigh>, so it is // unreliable if memory usage gets high. However, the system heap size on // Linux should usually be zero (so long as jemalloc is enabled) so that // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before // adding them to provide a small amount of extra overflow protection.
*aSizeOut = size_t(info.hblkhd) + size_t(info.uordblks); return NS_OK;
} # endif
// The VSIZE figure on Mac includes huge amounts of shared memory and is always // absurdly high, eg. 2GB+ even at start-up. But both 'top' and 'ps' report // it, so we might as well too. # define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
[[nodiscard]] static nsresult VsizeDistinguishedAmount(int64_t* aN) {
task_basic_info ti; if (!GetTaskBasicInfo(&ti)) { return NS_ERROR_FAILURE;
}
*aN = ti.virtual_size; return NS_OK;
}
// If we're using jemalloc on Mac, we need to instruct jemalloc to purge the // pages it has madvise(MADV_FREE)'d before we read our RSS in order to get // an accurate result. The OS will take away MADV_FREE'd pages when there's // memory pressure, so ideally, they shouldn't count against our RSS. // // Purging these pages can take a long time for some users (see bug 789975), // so we provide the option to get the RSS without purging first.
[[nodiscard]] static nsresult ResidentDistinguishedAmountHelper(int64_t* aN, bool aDoPurge) { # ifdef HAVE_JEMALLOC_STATS if (aDoPurge) {
Telemetry::AutoTimer<Telemetry::MEMORY_FREE_PURGED_PAGES_MS> timer;
jemalloc_purge_freed_pages();
} # endif
if (InSharedRegion(addr, cpu_type) && topInfo.share_mode != SM_PRIVATE) { continue;
}
switch (topInfo.share_mode) { case SM_LARGE_PAGE: // NB: Large pages are not shareable and always resident. case SM_PRIVATE:
privatePages += topInfo.private_pages_resident;
privatePages += topInfo.shared_pages_resident; break; case SM_COW:
privatePages += topInfo.private_pages_resident; if (topInfo.ref_count == 1) { // Treat copy-on-write pages as private if they only have one // reference.
privatePages += topInfo.shared_pages_resident;
} break; case SM_SHARED: { // Using mprotect() or similar to protect a page in the middle of a // mapping can create aliased mappings. They look like shared mappings // to the VM_REGION_TOP_INFO interface, so re-check with // VM_REGION_EXTENDED_INFO.
// The phys_footprint value (introduced in 10.11) of the TASK_VM_INFO data // matches the value in the 'Memory' column of the Activity Monitor.
task_vm_info_data_t task_vm_info;
mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
kern_return_t kr = task_info(aPort ? aPort : mach_task_self(), TASK_VM_INFO,
(task_info_t)&task_vm_info, &count); if (kr != KERN_SUCCESS) { return NS_ERROR_FAILURE;
}
if (!QueryWorkingSet(proc, infoArray.get(), infoArraySize)) { return NS_ERROR_FAILURE;
}
entries = static_cast<size_t>(infoArray->NumberOfEntries);
size_t privatePages = 0; for (size_t i = 0; i < entries; i++) { // Count shared pages that only one process is using as private. if (!infoArray->WorkingSetInfo[i].Shared ||
infoArray->WorkingSetInfo[i].ShareCount <= 1) {
privatePages++;
}
}
if (!GetProcessMemoryInfo(GetCurrentProcess(),
(PPROCESS_MEMORY_COUNTERS)&pmcex, sizeof(pmcex))) { return NS_ERROR_FAILURE;
}
*aN = pmcex.PrivateUsage; return NS_OK;
}
# define HAVE_SYSTEM_HEAP_REPORTER 1 // Windows can have multiple separate heaps, but we should not touch non-default // heaps because they may be destroyed at anytime while we hold a handle. So we // count only the default heap.
[[nodiscard]] static nsresult SystemHeapSize(int64_t* aSizeOut) {
HANDLE heap = GetProcessHeap();
NS_ENSURE_TRUE(HeapLock(heap), NS_ERROR_FAILURE);
int64_t heapSize = 0;
PROCESS_HEAP_ENTRY entry;
entry.lpData = nullptr; while (HeapWalk(heap, &entry)) { // We don't count entry.cbOverhead, because we just want to measure the // space available to the program. if (entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) {
heapSize += entry.cbData;
}
}
// Check this result only after unlocking the heap, so that we don't leave // the heap locked if there was an error.
DWORD lastError = GetLastError();
// I have no idea how things would proceed if unlocking this heap failed...
NS_ENSURE_TRUE(HeapUnlock(heap), NS_ERROR_FAILURE);
SegmentKind mKind; // The segment kind.
uint32_t mCount; // The number of segments of this kind.
size_t mSize; // The combined size of segments of this kind.
};
class WindowsAddressSpaceReporter final : public nsIMemoryReporter {
~WindowsAddressSpaceReporter() {}
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override { // First iterate over all the segments and record how many of each kind // there were and their aggregate sizes. We use a hash table for this // because there are a couple of dozen different kinds possible.
PLDHashTable table(&SegmentEntry::Ops, sizeof(SegmentEntry));
MEMORY_BASIC_INFORMATION info = {0}; bool isPrevSegStackGuard = false; for (size_t currentAddress = 0;;) { if (!VirtualQuery((LPCVOID)currentAddress, &info, sizeof(info))) { // Something went wrong, just return whatever we've got already. break;
}
size_t size = info.RegionSize;
// Note that |type| and |protect| are ignored in some cases.
DWORD state = info.State;
DWORD type =
(state == MEM_RESERVE || state == MEM_COMMIT) ? info.Type : 0;
DWORD protect = (state == MEM_COMMIT) ? info.Protect : 0; bool isStack = isPrevSegStackGuard && state == MEM_COMMIT &&
type == MEM_PRIVATE && protect == PAGE_READWRITE;
auto entry = static_cast<const SegmentEntry*>(iter.Get());
nsCString path("address-space");
switch (entry->mKind.mState) { case MEM_FREE:
path.AppendLiteral("/free"); break;
case MEM_RESERVE:
path.AppendLiteral("/reserved");
doType = true; break;
case MEM_COMMIT:
path.AppendLiteral("/commit");
doType = true;
doProtect = true; break;
default: // Should be impossible, but handle it just in case.
path.AppendLiteral("/???"); break;
}
if (doType) { switch (entry->mKind.mType) { case MEM_IMAGE:
path.AppendLiteral("/image"); break;
case MEM_MAPPED:
path.AppendLiteral("/mapped"); break;
case MEM_PRIVATE:
path.AppendLiteral("/private"); break;
default: // Should be impossible, but handle it just in case.
path.AppendLiteral("/???"); break;
}
}
if (doProtect) {
DWORD protect = entry->mKind.mProtect; // Basic attributes. Exactly one of these should be set. if (protect & PAGE_EXECUTE) {
path.AppendLiteral("/execute");
} if (protect & PAGE_EXECUTE_READ) {
path.AppendLiteral("/execute-read");
} if (protect & PAGE_EXECUTE_READWRITE) {
path.AppendLiteral("/execute-readwrite");
} if (protect & PAGE_EXECUTE_WRITECOPY) {
path.AppendLiteral("/execute-writecopy");
} if (protect & PAGE_NOACCESS) {
path.AppendLiteral("/noaccess");
} if (protect & PAGE_READONLY) {
path.AppendLiteral("/readonly");
} if (protect & PAGE_READWRITE) {
path.AppendLiteral("/readwrite");
} if (protect & PAGE_WRITECOPY) {
path.AppendLiteral("/writecopy");
}
// Modifiers. At most one of these should be set. if (protect & PAGE_GUARD) {
path.AppendLiteral("+guard");
} if (protect & PAGE_NOCACHE) {
path.AppendLiteral("+nocache");
} if (protect & PAGE_WRITECOMBINE) {
path.AppendLiteral("+writecombine");
}
#ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER class VsizeMaxContiguousReporter final : public nsIMemoryReporter {
~VsizeMaxContiguousReporter() {}
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount; if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount))) {
MOZ_COLLECT_REPORT( "vsize-max-contiguous", KIND_OTHER, UNITS_BYTES, amount, "Size of the maximum contiguous block of available virtual memory.");
} return NS_OK;
}
};
NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter, nsIMemoryReporter) #endif
#ifdef HAVE_PRIVATE_REPORTER class PrivateReporter final : public nsIMemoryReporter {
~PrivateReporter() {}
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount; if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount))) { // clang-format off
MOZ_COLLECT_REPORT( "private", KIND_OTHER, UNITS_BYTES, amount, "Memory that cannot be shared with other processes, including memory that is " "committed and marked MEM_PRIVATE, data that is not mapped, and executable " "pages that have been written to."); // clang-format on
} return NS_OK;
}
};
NS_IMPL_ISUPPORTS(PrivateReporter, nsIMemoryReporter) #endif
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS class VsizeReporter final : public nsIMemoryReporter {
~VsizeReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount; if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount))) { // clang-format off
MOZ_COLLECT_REPORT( "vsize", KIND_OTHER, UNITS_BYTES, amount, "Memory mapped by the process, including code and data segments, the heap, " "thread stacks, memory explicitly mapped by the process via mmap and similar " "operations, and memory shared with other processes. This is the vsize figure " "as reported by 'top' and 'ps'. This figure is of limited use on Mac, where " "processes share huge amounts of memory with one another. But even on other " "operating systems, 'resident' is a much better measure of the memory " "resources used by the process."); // clang-format on
} return NS_OK;
}
};
NS_IMPL_ISUPPORTS(VsizeReporter, nsIMemoryReporter)
class ResidentReporter final : public nsIMemoryReporter {
~ResidentReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount; if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount))) { // clang-format off
MOZ_COLLECT_REPORT( "resident", KIND_OTHER, UNITS_BYTES, amount, "Memory mapped by the process that is present in physical memory, also known " "as the resident set size (RSS). This is the best single figure to use when " "considering the memory resources used by the process, but it depends both on " "other processes being run and details of the OS kernel and so is best used " "for comparing the memory usage of a single process at different points in " "time."); // clang-format on
} return NS_OK;
}
};
NS_IMPL_ISUPPORTS(ResidentReporter, nsIMemoryReporter)
#endif// HAVE_VSIZE_AND_RESIDENT_REPORTERS
#ifdef HAVE_RESIDENT_UNIQUE_REPORTER class ResidentUniqueReporter final : public nsIMemoryReporter {
~ResidentUniqueReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount = 0; // clang-format off if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount))) {
MOZ_COLLECT_REPORT( "resident-unique", KIND_OTHER, UNITS_BYTES, amount, "Memory mapped by the process that is present in physical memory and not " "shared with any other processes. This is also known as the process's unique " "set size (USS). This is the amount of RAM we'd expect to be freed if we " "closed this process.");
} #ifdef XP_MACOSX if (NS_SUCCEEDED(PhysicalFootprintAmount(&amount))) {
MOZ_COLLECT_REPORT( "resident-phys-footprint", KIND_OTHER, UNITS_BYTES, amount, "Memory footprint reported by MacOS's task_info API's phys_footprint field. " "This matches the memory column in Activity Monitor.");
} #endif // clang-format on return NS_OK;
}
};
NS_IMPL_ISUPPORTS(ResidentUniqueReporter, nsIMemoryReporter)
#endif// HAVE_RESIDENT_UNIQUE_REPORTER
#ifdef HAVE_SYSTEM_HEAP_REPORTER
class SystemHeapReporter final : public nsIMemoryReporter {
~SystemHeapReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount; if (NS_SUCCEEDED(SystemHeapSize(&amount))) { // clang-format off
MOZ_COLLECT_REPORT( "system-heap-allocated", KIND_OTHER, UNITS_BYTES, amount, "Memory used by the system allocator that is currently allocated to the " "application. This is distinct from the jemalloc heap that Firefox uses for " "most or all of its heap allocations. Ideally this number is zero, but " "on some platforms we cannot force every heap allocation through jemalloc."); // clang-format on
} return NS_OK;
}
};
NS_IMPL_ISUPPORTS(SystemHeapReporter, nsIMemoryReporter) #endif// HAVE_SYSTEM_HEAP_REPORTER
#ifdef XP_UNIX
# include <sys/resource.h>
# define HAVE_RESIDENT_PEAK_REPORTER 1
[[nodiscard]] static nsresult ResidentPeakDistinguishedAmount(int64_t* aN) { struct rusage usage; if (0 == getrusage(RUSAGE_SELF, &usage)) { // The units for ru_maxrrs: // - Mac: bytes // - Solaris: pages? But some sources it actually always returns 0, so // check for that // - Linux, {Net/Open/Free}BSD, DragonFly: KiB # ifdef XP_MACOSX
*aN = usage.ru_maxrss; # elif defined(SOLARIS)
*aN = usage.ru_maxrss * getpagesize(); # else
*aN = usage.ru_maxrss * 1024; # endif if (*aN > 0) { return NS_OK;
}
} return NS_ERROR_FAILURE;
}
class ResidentPeakReporter final : public nsIMemoryReporter {
~ResidentPeakReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount = 0; if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount))) {
MOZ_COLLECT_REPORT( "resident-peak", KIND_OTHER, UNITS_BYTES, amount, "The peak 'resident' value for the lifetime of the process.");
} return NS_OK;
}
};
NS_IMPL_ISUPPORTS(ResidentPeakReporter, nsIMemoryReporter)
# define HAVE_PAGE_FAULT_REPORTERS 1
class PageFaultsSoftReporter final : public nsIMemoryReporter {
~PageFaultsSoftReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override { struct rusage usage; int err = getrusage(RUSAGE_SELF, &usage); if (err == 0) {
int64_t amount = usage.ru_minflt; // clang-format off
MOZ_COLLECT_REPORT( "page-faults-soft", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount, "The number of soft page faults (also known as 'minor page faults') that " "have occurred since the process started. A soft page fault occurs when the " "process tries to access a page which is present in physical memory but is " "not mapped into the process's address space. For instance, a process might " "observe soft page faults when it loads a shared library which is already " "present in physical memory. A process may experience many thousands of soft " "page faults even when the machine has plenty of available physical memory, " "and because the OS services a soft page fault without accessing the disk, " "they impact performance much less than hard page faults."); // clang-format on
} return NS_OK;
}
};
NS_IMPL_ISUPPORTS(PageFaultsSoftReporter, nsIMemoryReporter)
class PageFaultsHardReporter final : public nsIMemoryReporter {
~PageFaultsHardReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
int64_t amount = 0; if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount))) { // clang-format off
MOZ_COLLECT_REPORT( "page-faults-hard", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount, "The number of hard page faults (also known as 'major page faults') that have " "occurred since the process started. A hard page fault occurs when a process " "tries to access a page which is not present in physical memory. The " "operating system must access the disk in order to fulfill a hard page fault. " "When memory is plentiful, you should see very few hard page faults. But if " "the process tries to use more memory than your machine has available, you " "may see many thousands of hard page faults. Because accessing the disk is up " "to a million times slower than accessing RAM, the program may run very " "slowly when it is experiencing more than 100 or so hard page faults a " "second."); // clang-format on
} return NS_OK;
}
};
NS_IMPL_ISUPPORTS(PageFaultsHardReporter, nsIMemoryReporter)
#endif// XP_UNIX
/** ** memory reporter implementation for jemalloc and OSX malloc, ** to obtain info on total memory in use (that we know about, ** at least -- on OSX, there are sometimes other zones in use).
**/
// clang-format off
MOZ_COLLECT_REPORT( "heap/committed/allocated", KIND_OTHER, UNITS_BYTES, stats.allocated, "Memory mapped by the heap allocator that is currently allocated to the " "application. This may exceed the amount of memory requested by the " "application because the allocator regularly rounds up request sizes. (The " "exact amount requested is not recorded.)");
MOZ_COLLECT_REPORT( "heap-allocated", KIND_OTHER, UNITS_BYTES, stats.allocated, "The same as 'heap/committed/allocated'.");
// We mark this and the other heap/committed/overhead reporters as KIND_NONHEAP // because KIND_HEAP memory means "counted in heap-allocated", which // this is not. for (auto& bin : bin_stats) {
MOZ_ASSERT(bin.size);
nsPrintfCString path("heap/committed/bin-unused/bin-%zu",
bin.size);
aHandleReport->Callback(EmptyCString(), path, KIND_NONHEAP, UNITS_BYTES,
bin.bytes_unused,
nsLiteralCString( "Unused bytes in all runs of all bins for this size class"),
aData);
}
if (stats.waste > 0) {
MOZ_COLLECT_REPORT( "heap/committed/waste", KIND_NONHEAP, UNITS_BYTES,
stats.waste, "Committed bytes which do not correspond to an active allocation and which the " "allocator is not intentionally keeping alive (i.e., not " "'heap/{bookkeeping,unused-pages,bin-unused}').");
}
MOZ_COLLECT_REPORT( "heap/committed/bookkeeping", KIND_NONHEAP, UNITS_BYTES,
stats.bookkeeping, "Committed bytes which the heap allocator uses for internal data structures.");
MOZ_COLLECT_REPORT( "heap/committed/unused-pages/dirty", KIND_NONHEAP, UNITS_BYTES,
stats.pages_dirty, "Memory which the allocator could return to the operating system, but hasn't. " "The allocator keeps this memory around as an optimization, so it doesn't " "have to ask the OS the next time it needs to fulfill a request. This value " "is typically not larger than a few megabytes.");
MOZ_COLLECT_REPORT( "heap/decommitted/unused-pages/fresh", KIND_OTHER, UNITS_BYTES, stats.pages_fresh, "Amount of memory currently mapped but has never been used."); // A duplicate entry in the decommitted part of the tree.
MOZ_COLLECT_REPORT( "decommitted/heap/unused-pages/fresh", KIND_OTHER, UNITS_BYTES, stats.pages_fresh, "Amount of memory currently mapped but has never been used.");
// On MacOS madvised memory is still counted in the resident set until the OS // actually decommits it. #ifdef XP_MACOSX #define MADVISED_GROUP "committed" #else #define MADVISED_GROUP "decommitted" #endif
MOZ_COLLECT_REPORT( "heap/" MADVISED_GROUP "/unused-pages/madvised", KIND_OTHER, UNITS_BYTES,
stats.pages_madvised, "Amount of memory currently mapped, not used and that the OS should remove " "from the application's resident set."); // A duplicate entry in the decommitted part of the tree.
MOZ_COLLECT_REPORT( "decommitted/heap/unused-pages/madvised", KIND_OTHER, UNITS_BYTES, stats.pages_madvised, "Amount of memory currently mapped, not used and that the OS should remove " "from the application's resident set.");
{
size_t decommitted = stats.mapped - stats.allocated - stats.waste - stats.pages_dirty - stats.pages_fresh - stats.bookkeeping - stats.bin_unused;
MOZ_COLLECT_REPORT( "heap/decommitted/unmapped", KIND_OTHER, UNITS_BYTES, decommitted, "Amount of memory currently mapped but not committed, " "neither in physical memory nor paged to disk.");
MOZ_COLLECT_REPORT( "decommitted/heap/decommitted", KIND_OTHER, UNITS_BYTES, decommitted, "Amount of memory currently mapped but not committed, " "neither in physical memory nor paged to disk.");
}
MOZ_COLLECT_REPORT( "heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize, "Size of chunks.");
MOZ_COLLECT_REPORT( "explicit/phc/metadata", KIND_NONHEAP, UNITS_BYTES,
usage.mMetadataBytes, "Memory used by PHC to store stacks and other metadata for each allocation");
MOZ_COLLECT_REPORT( "explicit/phc/fragmentation", KIND_NONHEAP, UNITS_BYTES,
usage.mFragmentationBytes, "The amount of memory lost due to rounding up allocations to the next page " "size. " "This is also known as 'internal fragmentation'. " "Note that all allocators have some internal fragmentation, there may still " "be some internal fragmentation without PHC."); #endif
// Why is this here? At first glance, you'd think it could be defined and // registered with nsMemoryReporterManager entirely within nsAtomTable.cpp. // However, the obvious time to register it is when the table is initialized, // and that happens before XPCOM components are initialized, which means the // RegisterStrongMemoryReporter call fails. So instead we do it here. class AtomTablesReporter final : public nsIMemoryReporter {
MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
MOZ_COLLECT_REPORT("explicit/atoms/table", KIND_HEAP, UNITS_BYTES,
sizes.mTable, "Memory used by the atom table.");
MOZ_COLLECT_REPORT( "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP, UNITS_BYTES,
sizes.mDynamicAtoms, "Memory used by dynamic atom objects and chars (which are stored " "at the end of each atom object).");
// Enumerating over active threads requires holding a lock, so we collect // info on all threads, and then call our reporter callbacks after releasing // the lock. struct ThreadData {
nsCString mName;
uint32_t mThreadId;
size_t mPrivateSize;
};
AutoTArray<ThreadData, 32> threads;
#ifdefined(XP_LINUX) int idx = mappings.BinaryIndexOf(thread->StackBase()); if (idx < 0) { continue;
} // Referenced() is the combined size of all pages in the region which // have ever been touched, and are therefore consuming memory. For stack // regions, these pages are guaranteed to be un-shared unless we fork // after creating threads (which we don't).
size_t privateSize = mappings[idx].Referenced();
// On Linux, we have to be very careful matching memory regions to // thread stacks. // // To begin with, the kernel only reports VM stats for regions of all // adjacent pages with the same flags, protection, and backing file. // There's no way to get finer-grained usage information for a subset of // those pages. // // Stack segments always have a guard page at the bottom of the stack // (assuming we only support stacks that grow down), so there's no // danger of them being merged with other stack regions. At the top, // there's no protection page, and no way to allocate one without using // pthreads directly and allocating our own stacks. So we get around the // problem by adding an extra VM flag (NOHUGEPAGES) to our stack region, // which we don't expect to be set on any heap regions. But this is not // fool-proof. // // A second kink is that different C libraries (and different versions // thereof) report stack base locations and sizes differently with // regard to the guard page. For the libraries that include the guard // page in the stack size base pointer, we need to adjust those values // to compensate. But it's possible that our logic will get out of sync // with library changes, or someone will compile with an unexpected // library. // // // The upshot of all of this is that there may be configurations that // our special cases don't cover. And if there are, we want to know // about it. So assert that total size of the memory region we're // reporting actually matches the allocated size of the thread stack. # ifndef ANDROID
MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(), "Mapping region size doesn't match stack allocation size"); # endif #elifdefined(XP_WIN) auto memInfo =
MemoryInfo::Get(thread->StackBase(), thread->StackSize());
size_t privateSize = memInfo.Committed(); #else
size_t privateSize = thread->StackSize();
MOZ_ASSERT_UNREACHABLE( "Shouldn't have stack base pointer on this " "platform"); #endif
nsCString threadName;
thread->GetThreadName(threadName);
threads.AppendElement(ThreadData{
std::move(threadName),
thread->ThreadId(), // On Linux, it's possible (but unlikely) that our stack region will // have been merged with adjacent heap regions, in which case we'll // get combined size information for both. So we take the minimum of // the reported private size and the requested stack size to avoid // the possible of majorly over-reporting in that case.
std::min(privateSize, thread->StackSize()),
});
}
}
aHandleReport->Callback( ""_ns, path, KIND_NONHEAP, UNITS_BYTES, thread.mPrivateSize,
nsLiteralCString("The sizes of thread stacks which have been " "committed to memory."),
aData);
}
MOZ_COLLECT_REPORT("explicit/threads/overhead/event-queues", KIND_HEAP,
UNITS_BYTES, eventQueueSizes, "The sizes of nsThread event queues and observers.");
MOZ_COLLECT_REPORT("explicit/threads/overhead/wrappers", KIND_HEAP,
UNITS_BYTES, wrapperSizes, "The sizes of nsThread/PRThread wrappers.");
#ifdefined(XP_WIN) // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows, // that's 12K. For 64 bit, it's 24K. // // See // https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
constexpr size_t kKernelSize = (sizeof(void*) == 8 ? 24 : 12) * 1024; #elifdefined(XP_LINUX) // On Linux, kernel stacks are usually 8K. However, on x86, they are // allocated virtually, and start out at 4K. They may grow to 8K, but we // have no way of knowing which ones do, so all we can do is guess. # ifdefined(__x86_64__) || defined(__i386__)
constexpr size_t kKernelSize = 4 * 1024; # else
constexpr size_t kKernelSize = 8 * 1024; # endif #elifdefined(XP_MACOSX) // On Darwin, kernel stacks are 16K: // // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
constexpr size_t kKernelSize = 16 * 1024; #else // Elsewhere, just assume that kernel stacks require at least 8K.
constexpr size_t kKernelSize = 8 * 1024; #endif
MOZ_COLLECT_REPORT("explicit/threads/overhead/kernel", KIND_NONHEAP,
UNITS_BYTES, threadCount * kKernelSize, "The total kernel overhead for all active threads.");
// Ideally, this would be implemented in BlockingResourceBase.cpp. // However, this ends up breaking the linking step of various unit tests due // to adding a new dependency to libdmd for a commonly used feature (mutexes) // in DMD builds. So instead we do it here. class DeadlockDetectorReporter final : public nsIMemoryReporter {
MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
~DeadlockDetectorReporter() = default;
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) override {
MOZ_COLLECT_REPORT( "explicit/deadlock-detector", KIND_HEAP, UNITS_BYTES,
BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf), "Memory used by the deadlock detector.");
MOZ_COLLECT_REPORT( "explicit/dmd/stack-traces/used", KIND_HEAP, UNITS_BYTES,
sizes.mStackTracesUsed, "Memory used by stack traces which correspond to at least " "one heap block DMD is tracking.");
MOZ_COLLECT_REPORT( "explicit/dmd/stack-traces/unused", KIND_HEAP, UNITS_BYTES,
sizes.mStackTracesUnused, "Memory used by stack traces which don't correspond to any heap " "blocks DMD is currently tracking.");
MOZ_COLLECT_REPORT("explicit/dmd/stack-traces/table", KIND_HEAP,
UNITS_BYTES, sizes.mStackTraceTable, "Memory used by DMD's stack trace table.");
MOZ_COLLECT_REPORT("explicit/dmd/live-block-table", KIND_HEAP, UNITS_BYTES,
sizes.mLiveBlockTable, "Memory used by DMD's live block table.");
MOZ_COLLECT_REPORT("explicit/dmd/dead-block-list", KIND_HEAP, UNITS_BYTES,
sizes.mDeadBlockTable, "Memory used by DMD's dead block list.");
NS_IMETHODIMP
nsMemoryReporterManager::Init() { if (!NS_IsMainThread()) {
MOZ_CRASH();
}
// Under normal circumstances this function is only called once. However, // we've (infrequently) seen memory report dumps in crash reports that // suggest that this function is sometimes called multiple times. That in // turn means that multiple reporters of each kind are registered, which // leads to duplicated reports of individual measurements such as "resident", // "vsize", etc. // // It's unclear how these multiple calls can occur. The only plausible theory // so far is badly-written extensions, because this function is callable from // JS code via nsIMemoryReporter.idl. // // Whatever the cause, it's a bad thing. So we protect against it with the // following check. staticbool isInited = false; if (isInited) {
NS_WARNING("nsMemoryReporterManager::Init() has already been called!"); return NS_OK;
}
isInited = true;
{ // Add a series of low-level reporters meant to be executed in order and // before any other reporters. These reporters are never released until // the manager dies (at process shutdown). Note that this currently only // works for reporters expecting to be executed sync. // // Note that we explicitly handle our self-reporting inside // GetReportsForThisProcessExtended, such that we do not need to register // ourself to any array/table here.
#ifdef MOZ_GECKO_PROFILER // We have to register this here rather than in profiler_init() because // profiler_init() runs prior to nsMemoryReporterManager's creation.
mStrongEternalReporters->AppendElement(new GeckoProfilerReporter()); #endif
nsMemoryReporterManager::~nsMemoryReporterManager() {
NS_ASSERTION(!mSavedStrongEternalReporters, "failed to restore eternal reporters");
NS_ASSERTION(!mSavedStrongReporters, "failed to restore strong reporters");
NS_ASSERTION(!mSavedWeakReporters, "failed to restore weak reporters");
}
NS_IMETHODIMP
nsMemoryReporterManager::CollectReports(nsIHandleReportCallback* aHandleReport,
nsISupports* aData, bool aAnonymize) {
size_t n = MallocSizeOf(this);
{
mozilla::MutexAutoLock autoLock(mMutex);
n += mStrongEternalReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
n += mStrongReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
n += mWeakReporters->ShallowSizeOfIncludingThis(MallocSizeOf); // Note that we do not include the mSaved<X>Reporters here, as during // normal operations they are always nullptr and during testing we want to // hide the saved variants entirely.
}
MOZ_COLLECT_REPORT("explicit/memory-reporter-manager", KIND_HEAP, UNITS_BYTES,
n, "Memory used by the memory reporter infrastructure.");
// Memory reporters are not necessarily threadsafe, so this function must // be called from the main thread. if (!NS_IsMainThread()) {
MOZ_CRASH();
}
uint32_t generation = mNextGeneration++;
if (mPendingProcessesState) { // A request is in flight. Don't start another one. And don't report // an error; just ignore it, and let the in-flight request finish.
MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n", generation,
mPendingProcessesState->mGeneration); return NS_OK;
}
// MainThread only
nsresult nsMemoryReporterManager::StartGettingReports() {
PendingProcessesState* s = mPendingProcessesState;
nsresult rv;
// Get reports for this process.
FILE* parentDMDFile = nullptr; #ifdef MOZ_DMD if (!s->mDMDDumpIdent.IsEmpty()) {
rv = nsMemoryInfoDumper::OpenDMDFile(s->mDMDDumpIdent, getpid(),
&parentDMDFile); if (NS_WARN_IF(NS_FAILED(rv))) { // Proceed with the memory report as if DMD were disabled.
parentDMDFile = nullptr;
}
} #endif
// This is async.
GetReportsForThisProcessExtended(
s->mHandleReport, s->mHandleReportData, s->mAnonymize, parentDMDFile,
s->mFinishReporting, s->mFinishReportingData);
nsTArray<dom::ContentParent*> childWeakRefs;
dom::ContentParent::GetAll(childWeakRefs); if (!childWeakRefs.IsEmpty()) { // Request memory reports from child processes. This happens // after the parent report so that the parent's main thread will // be free to process the child reports, instead of causing them // to be buffered and consume (possibly scarce) memory.
for (size_t i = 0; i < childWeakRefs.Length(); ++i) {
s->mChildrenPending.AppendElement(childWeakRefs[i]);
}
}
if (gfx::GPUProcessManager* gpu = gfx::GPUProcessManager::Get()) { if (RefPtr<MemoryReportingProcess> proc = gpu->GetProcessMemoryReporter()) {
s->mChildrenPending.AppendElement(proc.forget());
}
}
if (RDDProcessManager* rdd = RDDProcessManager::Get()) { if (RefPtr<MemoryReportingProcess> proc = rdd->GetProcessMemoryReporter()) {
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.34 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.