/* * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) { // Insert segments in address order, merge segments when possible for (int i = _segments.length(); i > 0; i--) { constint current = i - 1;
if (_segments.at(current).end() <= segment.start()) { if (is_mergable(_segments.at(current), segment)) { if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { // Merge with end of current segment and start of next segment const size_t start = _segments.at(current).start(); const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size();
replace_segment(current, start, size, segment.is_committed());
remove_segment(current + 1); return;
}
// Merge with end of current segment const size_t start = _segments.at(current).start(); const size_t size = _segments.at(current).size() + segment.size();
replace_segment(current, start, size, segment.is_committed()); return;
} elseif (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { // Merge with start of next segment const size_t start = segment.start(); const size_t size = segment.size() + _segments.at(current + 1).size();
replace_segment(current + 1, start, size, segment.is_committed()); return;
}
// Insert after current segment
insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed()); return;
}
}
if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) { // Merge with start of first segment const size_t start = segment.start(); const size_t size = segment.size() + _segments.at(0).size();
replace_segment(0, start, size, segment.is_committed()); return;
}
// Insert before first segment
insert_segment(0, segment.start(), segment.size(), segment.is_committed());
}
ZPhysicalMemory ZPhysicalMemory::split_committed() {
ZPhysicalMemory pmem; int nsegments = 0;
for (int i = 0; i < _segments.length(); i++) { const ZPhysicalMemorySegment& segment = _segments.at(i); if (segment.is_committed()) { // Transfer segment
pmem.add_segment(segment);
} else { // Keep segment
_segments.at_put(nsegments++, segment);
}
}
_segments.trunc_to(nsegments);
return pmem;
}
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
_backing(max_capacity) { // Make the whole range free
_manager.free(0, max_capacity);
}
// If uncommit is not explicitly disabled, max capacity is greater than // min capacity, and uncommit is supported by the platform, then uncommit // will be enabled. if (!ZUncommit) {
log_info_p(gc, init)("Uncommit: Disabled"); return;
}
// Test if uncommit is supported by the operating system by committing // and then uncommitting a granule.
ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false/* committed */)); if (!commit(pmem) || !uncommit(pmem)) {
log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
FLAG_SET_ERGO(ZUncommit, false); return;
}
void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const { // From an NMT point of view we treat the first heap view (marked0) as committed const uintptr_t addr = ZAddress::marked0(offset);
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
}
// Map segments for (int i = 0; i < pmem.nsegments(); i++) { const ZPhysicalMemorySegment& segment = pmem.segment(i);
_backing.map(addr + size, segment.size(), segment.start());
size += segment.size();
}
// Setup NUMA interleaving for large pages if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) { // To get granule-level NUMA interleaving when using large pages, // we simply let the kernel interleave the memory for us at page // fault time.
os::numa_make_global((char*)addr, size);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.