/* * Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/ #include"precompiled.hpp" #include"logging/log.hpp" #include"memory/metaspaceUtils.hpp" #include"memory/metaspaceStats.hpp" #include"runtime/os.hpp" #include"runtime/threadCritical.hpp" #include"services/memTracker.hpp" #include"services/threadStackTracker.hpp" #include"services/virtualMemoryTracker.hpp" #include"utilities/ostream.hpp"
void VirtualMemorySummary::initialize() {
assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check"); // Use placement operator new to initialize static data area.
::new ((void*)_snapshot) VirtualMemorySnapshot();
}
void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) { // Only if thread stack is backed by virtual memory if (ThreadStackTracker::track_as_vm()) { // Snapshot current thread stacks
VirtualMemoryTracker::snapshot_thread_stacks();
}
as_snapshot()->copy_to(s);
}
staticbool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions. return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
}
// Find the region that fully precedes the [addr, addr + size) region.
LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
LinkedListNode<CommittedMemoryRegion>* next = (prev != NULL ? prev->next() : _committed_regions.head());
if (next != NULL) { // Ignore request if region already exists. if (is_same_as(next->data(), addr, size, stack)) { returntrue;
}
// The new region is after prev, and either overlaps with the // next region (and maybe more regions), or overlaps with no region. if (next->data()->overlap_region(addr, size)) { // Remove _all_ overlapping regions, and parts of regions, // in preparation for the addition of this new region.
remove_uncommitted_region(addr, size);
// The remove could have split a region into two and created a // new prev region. Need to reset the prev and next pointers.
prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr);
next = (prev != NULL ? prev->next() : _committed_regions.head());
}
}
// At this point the previous overlapping regions have been // cleared, and the full region is guaranteed to be inserted.
VirtualMemorySummary::record_committed_memory(size, flag());
// Try to merge with prev and possibly next. if (try_merge_with(prev, addr, size, stack)) { if (try_merge_with(prev, next)) { // prev was expanded to contain the new region // and next, need to remove next from the list
_committed_regions.remove_after(prev);
}
returntrue;
}
// Didn't merge with prev, try with next. if (try_merge_with(next, addr, size, stack)) { returntrue;
}
// Couldn't merge with any regions - create a new region. return add_committed_region(CommittedMemoryRegion(addr, size, stack));
}
CommittedMemoryRegion* rgn = node->data();
assert(rgn->contain_region(addr, size), "Has to be contained");
assert(!rgn->same_region(addr, size), "Can not be the same region");
if (rgn->base() == addr ||
rgn->end() == addr + size) {
rgn->exclude_region(addr, size); returntrue;
} else { // split this region
address top =rgn->end(); // use this region for lower part
size_t exclude_size = rgn->end() - addr;
rgn->exclude_region(addr, exclude_size);
// higher part
address high_base = addr + size;
size_t high_size = top - high_base;
CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
address end = addr + sz;
LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = NULL;
CommittedMemoryRegion* crgn;
while (head != NULL) {
crgn = head->data();
if (crgn->same_region(addr, sz)) {
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
_committed_regions.remove_after(prev); returntrue;
}
// del_rgn contains crgn if (del_rgn.contain_region(crgn->base(), crgn->size())) {
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
head = head->next();
_committed_regions.remove_after(prev); continue; // don't update head or prev
}
// Found addr in the current crgn. There are 2 subcases: if (crgn->contain_address(addr)) {
// (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) if (crgn->contain_address(end - 1)) {
VirtualMemorySummary::record_uncommitted_memory(sz, flag()); return remove_uncommitted_region(head, addr, sz); // done!
} else { // (2) Did not find del_rgn's end in crgn.
size_t size = crgn->end() - del_rgn.base();
crgn->exclude_region(addr, size);
VirtualMemorySummary::record_uncommitted_memory(size, flag());
}
} elseif (crgn->contain_address(end - 1)) { // Found del_rgn's end, but not its base addr.
size_t size = del_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), size);
VirtualMemorySummary::record_uncommitted_memory(size, flag()); returntrue; // should be done if the list is sorted properly!
}
// Overlapped reservation. // It can happen when the regions are thread stacks, as JNI // thread does not detach from VM before exits, and leads to // leak JavaThread object if (reserved_rgn->flag() == mtThreadStack) {
guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached"); // Overwrite with new region
// Release old region
VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
// Add new region
VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
*reserved_rgn = rgn; returntrue;
}
// CDS mapping region. // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. // NMT reports CDS as a whole. if (reserved_rgn->flag() == mtClassShared) {
log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region"); returntrue;
}
// Mapped CDS string region. // The string region(s) is part of the java heap. if (reserved_rgn->flag() == mtJavaHeap) {
log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")",
reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region"); returntrue;
}
// Print some more details. Don't use UL here to avoid circularities. #ifdef ASSERT
tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.\n" " new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.",
p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(),
p2i(base_addr), p2i(base_addr + size), (unsigned)flag); #endif
ShouldNotReachHere(); returnfalse;
}
}
}
// uncommit regions within the released region
ReservedMemoryRegion backup(*rgn); bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s",
backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); if (!result) { returnfalse;
}
VirtualMemorySummary::record_released_memory(rgn->size(), rgn->flag());
result = _reserved_regions->remove(*rgn);
log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _resvered_regions %s" ,
backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); return result;
}
if (reserved_rgn == NULL) {
log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")!",
p2i(rgn.base()), rgn.size());
}
assert(reserved_rgn != NULL, "No reserved region"); if (reserved_rgn->same_region(addr, size)) { return remove_released_region(reserved_rgn);
}
// uncommit regions within the released region if (!reserved_rgn->remove_uncommitted_region(addr, size)) { returnfalse;
}
if (reserved_rgn->flag() == mtClassShared) { if (reserved_rgn->contain_region(addr, size)) { // This is an unmapped CDS region, which is part of the reserved shared // memory region. // See special handling in VirtualMemoryTracker::add_reserved_region also. returntrue;
}
if (size > reserved_rgn->size()) { // This is from release the whole region spanning from archive space to class space, // so we release them altogether.
ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(),
(size - reserved_rgn->size()));
ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
assert(cls_rgn != NULL, "Class space region not recorded?");
assert(cls_rgn->flag() == mtClass, "Must be class type");
remove_released_region(reserved_rgn);
remove_released_region(cls_rgn); returntrue;
}
}
// use original region for lower region
reserved_rgn->exclude_region(addr, top - addr);
LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn); if (new_rgn == NULL) { returnfalse;
} else {
reserved_rgn->move_committed_regions(addr, *new_rgn->data()); returntrue;
}
}
}
// Given an existing memory mapping registered with NMT, split the mapping in // two. The newly created two mappings will be registered under the call // stack and the memory flags of the original section. bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split) {
constchar* name = reserved_rgn->flag_name();
remove_released_region(reserved_rgn);
log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") with size " SIZE_FORMAT,
name, p2i(rgn.base()), rgn.size(), split); // Now, create two new regions.
add_reserved_region(addr, split, original_stack, original_flags);
add_reserved_region(addr + split, size - split, original_stack, original_flags);
returntrue;
}
// Iterate the range, find committed region within its bound. class RegionIterator : public StackObj { private: const address _start; const size_t _size;
// Walk all known thread stacks, snapshot their committed ranges. class SnapshotThreadStackWalker : public VirtualMemoryWalker { public:
SnapshotThreadStackWalker() {}
bool do_allocation_site(const ReservedMemoryRegion* rgn) { if (rgn->flag() == mtThreadStack) {
address stack_bottom = rgn->thread_stack_uncommitted_bottom();
address committed_start;
size_t committed_size;
size_t stack_size = rgn->base() + rgn->size() - stack_bottom; // Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
NativeCallStack ncs; // empty stack
RegionIterator itr(stack_bottom, aligned_stack_size);
DEBUG_ONLY(bool found_stack = false;) while (itr.next_committed(committed_start, committed_size)) {
assert(committed_start != NULL, "Should not be null");
assert(committed_size > 0, "Should not be 0"); // unaligned stack_size case: correct the region to fit the actual stack_size if (stack_bottom + stack_size < committed_start + committed_size) {
committed_size = stack_bottom + stack_size - committed_start;
}
region->add_committed_region(committed_start, committed_size, ncs);
DEBUG_ONLY(found_stack = true;)
} #ifdef ASSERT if (!found_stack) {
log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
} #endif
} returntrue;
}
};
bool do_allocation_site(const ReservedMemoryRegion* rgn) { if (rgn->contain_address(_p)) {
_st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "] by %s",
p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), rgn->flag_name()); if (MemTracker::tracking_level() == NMT_detail) {
rgn->call_stack()->print_on(_st);
_st->cr();
} returnfalse;
} returntrue;
}
};
// If p is contained within a known memory region, print information about it to the // given stream and return true; false otherwise. bool VirtualMemoryTracker::print_containing_region(constvoid* p, outputStream* st) {
PrintRegionWalker walker(p, st); return !walk_virtual_memory(&walker);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.