/* * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Support for mapped heap. bool ArchiveHeapLoader::_mapped_heap_relocation_initialized = false;
ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
// Every mapped region is offset by _mapped_heap_delta from its requested address. // See FileMapInfo::heap_region_requested_address(). void ArchiveHeapLoader::init_mapped_heap_relocation(ptrdiff_t delta, int dumptime_oop_shift) {
assert(!_mapped_heap_relocation_initialized, "only once"); if (!UseCompressedOops) {
assert(dumptime_oop_shift == 0, "sanity");
}
assert(can_map(), "sanity");
init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
_mapped_heap_delta = delta;
_mapped_heap_relocation_initialized = true;
}
void ArchiveHeapLoader::fixup_regions() {
FileMapInfo* mapinfo = FileMapInfo::current_info(); if (is_mapped()) {
mapinfo->fixup_mapped_heap_regions();
} elseif (_loading_failed) {
fill_failed_loaded_heap();
} if (is_fully_available()) { if (!MetaspaceShared::use_full_module_graph()) { // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
ClassLoaderDataShared::clear_archived_oops();
}
}
}
// ------------------ Support for Region MAPPING -----------------------------------------
// Patch all the embedded oop pointers inside an archived heap region, // to be consistent with the runtime oop encoding. class PatchCompressedEmbeddedPointers: public BitMapClosure {
narrowOop* _start;
bool do_bit(size_t offset) {
narrowOop* p = _start + offset;
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
oop o = ArchiveHeapLoader::decode_from_archive(v);
RawAccess<IS_NOT_NULL>::oop_store(p, o); returntrue;
}
};
class PatchUncompressedEmbeddedPointers: public BitMapClosure {
oop* _start;
bool do_bit(size_t offset) {
oop* p = _start + offset;
intptr_t dumptime_oop = (intptr_t)((void*)*p);
assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
intptr_t runtime_oop = dumptime_oop + ArchiveHeapLoader::mapped_heap_delta();
RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop)); returntrue;
}
};
// Patch all the non-null pointers that are embedded in the archived heap objects // in this (mapped) region void ArchiveHeapLoader::patch_embedded_pointers(MemRegion region, address oopmap,
size_t oopmap_size_in_bits) {
BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
// ------------------ Support for Region LOADING -----------------------------------------
// The CDS archive remembers each heap object by its address at dump time, but // the heap object may be loaded at a different address at run time. This structure is used // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index) // to their runtime addresses. struct LoadedArchiveHeapRegion { int _region_index; // index for FileMapInfo::space_at(index)
size_t _region_size; // number of bytes in this region
uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
intx _runtime_offset; // If an object's dump time address P is within in this region, its // runtime address is P + _runtime_offset
static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions");
static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions");
static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions");
bool do_bit(size_t offset) {
assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
narrowOop* p = _start + offset;
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v));
assert(_base_0 <= o && o < _top, "must be");
// We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons. if (NUM_LOADED_REGIONS > 3 && o >= _base_3) {
o += _offset_3;
} elseif (NUM_LOADED_REGIONS > 2 && o >= _base_2) {
o += _offset_2;
} elseif (o >= _base_1) {
o += _offset_1;
} else {
o += _offset_0;
}
ArchiveHeapLoader::assert_in_loaded_heap(o);
RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o)); returntrue;
}
};
int ArchiveHeapLoader::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
MemRegion& archive_space) {
size_t total_bytes = 0; int num_loaded_regions = 0; for (int i = MetaspaceShared::first_archive_heap_region;
i <= MetaspaceShared::last_archive_heap_region; i++) {
FileMapRegion* r = mapinfo->region_at(i);
r->assert_is_heap_region(); if (r->used() > 0) {
assert(is_aligned(r->used(), HeapWordSize), "must be");
total_bytes += r->used();
LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++];
ri->_region_index = i;
ri->_region_size = r->used();
ri->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address(r);
}
}
void ArchiveHeapLoader::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
uintptr_t buffer) { // Find the relocation offset of the pointers in each region
qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion),
LoadedArchiveHeapRegion::comparator);
uintptr_t p = buffer; for (int i = 0; i < num_loaded_regions; i++) { // This region will be loaded at p, so all objects inside this // region will be shifted by ri->offset
LoadedArchiveHeapRegion* ri = &loaded_regions[i];
ri->_runtime_offset = p - ri->_dumptime_base;
p += ri->_region_size;
}
assert(p == _loaded_heap_top, "must be");
}
bool ArchiveHeapLoader::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions, uintptr_t buffer) {
uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region(); if (bitmap_base == 0) {
_loading_failed = true; returnfalse; // OOM or CRC error
}
uintptr_t load_address = buffer; for (int i = 0; i < num_loaded_regions; i++) {
LoadedArchiveHeapRegion* ri = &loaded_regions[i];
FileMapRegion* r = mapinfo->region_at(ri->_region_index);
if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) { // There's no easy way to free the buffer, so we will fill it with zero later // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", i);
_loading_failed = true; returnfalse;
}
log_info(cds)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT,
ri->_region_index, load_address, load_address + ri->_region_size,
ri->_region_size, ri->_runtime_offset);
virtualvoid do_oop(narrowOop* p) { // This should be called before the loaded regions are modified, so all the embedded pointers // must be NULL, or must point to a valid object in the loaded regions.
narrowOop v = *p; if (!CompressedOops::is_null(v)) {
oop o = CompressedOops::decode_not_null(v);
uintptr_t u = cast_from_oop<uintptr_t>(o);
ArchiveHeapLoader::assert_in_loaded_heap(u);
guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions");
}
} virtualvoid do_oop(oop* p) { // Uncompressed oops are not supported by loaded heaps.
Unimplemented();
}
};
void ArchiveHeapLoader::finish_initialization() { if (is_loaded()) { // These operations are needed only when the heap is loaded (not mapped).
finish_loaded_heap(); if (VerifyArchivedFields > 0) {
verify_loaded_heap();
}
}
patch_native_pointers();
}
bool do_bit(size_t offset) {
Metadata** p = _start + offset;
*p = (Metadata*)(address(*p) + MetaspaceShared::relocation_delta()); // Currently we have only Klass pointers in heap objects. // This needs to be relaxed when we support other types of native // pointers such as Method.
assert(((Klass*)(*p))->is_klass(), "must be"); returntrue;
}
};
void ArchiveHeapLoader::patch_native_pointers() { if (MetaspaceShared::relocation_delta() == 0) { return;
}
for (int i = MetaspaceShared::first_archive_heap_region;
i <= MetaspaceShared::last_archive_heap_region; i++) {
FileMapRegion* r = FileMapInfo::current_info()->region_at(i); if (r->mapped_base() != NULL && r->has_ptrmap()) {
log_info(cds, heap)("Patching native pointers in heap region %d", i);
BitMapView bm = r->ptrmap_view();
PatchNativePointers patcher((Metadata**)r->mapped_base());
bm.iterate(&patcher);
}
}
} #endif// INCLUDE_CDS_JAVA_HEAP
Messung V0.5
¤ Dauer der Verarbeitung: 0.2 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.