/* * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
void ArchiveBuilder::SourceObjList::append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info) { // Save this source object for copying
_objs->append(src_info);
// Prepare for marking the pointers in this source object
assert(is_aligned(_total_bytes, sizeof(address)), "must be");
src_info->set_ptrmap_start(_total_bytes / sizeof(address));
_total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
src_info->set_ptrmap_end(_total_bytes / sizeof(address));
void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) { // src_obj contains a pointer. Remember the location of this pointer in _ptrmap, // so that we can copy/relocate it later. E.g., if we have // class Foo { intx scala; Bar* ptr; } // Foo *f = 0x100; // To mark the f->ptr pointer on 64-bit platform, this function is called with // src_info()->obj() == 0x100 // ref->addr() == 0x108
address src_obj = src_info->obj();
address* field_addr = ref->addr();
assert(src_info->ptrmap_start() < _total_bytes, "sanity");
assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
assert(*field_addr != NULL, "should have checked");
if (DumpSharedSpaces) { // To ensure deterministic contents in the static archive, we need to ensure that // we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where // the MetaspaceObjs are located originally, as they are copied sequentially into // the archive during the iteration. // // The only issue here is that the symbol table and the system directories may be // randomly ordered, so we copy the symbols and klasses into two arrays and sort // them deterministically. // // During -Xshare:dump, the order of Symbol creation is strictly determined by // the SharedClassListFile (class loading is done in a single thread and the JIT // is disabled). Also, Symbols are allocated in monotonically increasing addresses // (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by // ascending address order, we ensure that all Symbols are copied into deterministic // locations in the archive. // // TODO: in the future, if we want to produce deterministic contents in the // dynamic archive, we might need to sort the symbols alphabetically (also see // DynamicArchiveBuilder::sort_methods()).
sort_symbols_and_fix_hash();
sort_klasses();
// TODO -- we need a proper estimate for the archived modules, etc, // but this should be enough for now
_estimated_metaspaceobj_bytes += 200 * 1024 * 1024;
}
}
int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) { if (a[0] < b[0]) { return -1;
} else {
assert(a[0] > b[0], "Duplicated symbol %s unexpected", (*a)->as_C_string()); return 1;
}
}
void ArchiveBuilder::sort_symbols_and_fix_hash() {
log_info(cds)("Sorting symbols and fixing identity hash ... ");
os::init_random(0x12345678);
_symbols->sort(compare_symbols_by_address); for (int i = 0; i < _symbols->length(); i++) {
assert(_symbols->at(i)->is_permanent(), "archived symbols must be permanent");
_symbols->at(i)->update_identity_hash();
}
}
int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) { return a[0]->name()->fast_compare(b[0]->name());
}
size_t ArchiveBuilder::estimate_archive_size() { // size of the symbol table and two dictionaries, plus the RunTimeClassInfo's
size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
_estimated_hashtable_bytes = symbol_table_est + dictionary_est;
size_t total = 0;
total += _estimated_metaspaceobj_bytes;
total += _estimated_hashtable_bytes;
// allow fragmentation at the end of each dump region
total += _total_dump_regions * MetaspaceShared::core_region_alignment();
address ArchiveBuilder::reserve_buffer() {
size_t buffer_size = estimate_archive_size();
ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size()); if (!rs.is_reserved()) {
log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
os::_exit(0);
}
// buffer_bottom is the lowest address of the 2 core regions (rw, ro) when // we are copying the class metadata into the buffer.
address buffer_bottom = (address)rs.base();
log_info(cds)("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
p2i(buffer_bottom), buffer_size);
_shared_rs = rs;
// The bottom of the static archive should be mapped at this address by default.
_requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
// The bottom of the archive (that I am writing now) should be mapped at this address by default.
address my_archive_requested_bottom;
// At run time, we will mmap the dynamic archive at my_archive_requested_bottom
_requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
my_archive_requested_bottom = align_up(_requested_static_archive_top, MetaspaceShared::core_region_alignment());
address my_archive_requested_top = my_archive_requested_bottom + buffer_size; if (my_archive_requested_bottom < _requested_static_archive_bottom ||
my_archive_requested_top <= _requested_static_archive_bottom) { // Size overflow.
log_error(cds)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
log_error(cds)("my_archive_requested_top = " INTPTR_FORMAT, p2i(my_archive_requested_top));
log_error(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. " "Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
os::_exit(0);
}
if (DumpSharedSpaces) { // We don't want any valid object to be at the very bottom of the archive. // See ArchivePtrMarker::mark_pointer().
rw_region()->allocate(16);
}
return buffer_bottom;
}
void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) { int i;
if (!is_relocating_pointers) { // Don't relocate _symbol, so we can safely call decrement_refcount on the // original symbols. int num_symbols = _symbols->length(); for (i = 0; i < num_symbols; i++) {
it->push(_symbols->adr_at(i));
}
}
int num_klasses = _klasses->length(); for (i = 0; i < num_klasses; i++) {
it->push(_klasses->adr_at(i));
}
iterate_roots(it, is_relocating_pointers);
}
class GatherSortedSourceObjs : public MetaspaceClosure {
ArchiveBuilder* _builder;
if (created && src_info.should_copy()) {
ref->set_user_data((void*)p); if (read_only) {
_ro_src_objs.append(enclosing_ref, p);
} else {
_rw_src_objs.append(enclosing_ref, p);
} returntrue; // Need to recurse into this ref only if we are copying it
} else { returnfalse;
}
}
if (enclosing_ref != NULL) {
SourceObjInfo* src_info = (SourceObjInfo*)enclosing_ref->user_data(); if (src_info == NULL) { // source objects of point_to_it/set_to_null types are not copied // so we don't need to remember their pointers.
} else { if (src_info->read_only()) {
_ro_src_objs.remember_embedded_pointer(src_info, ref);
} else {
_rw_src_objs.remember_embedded_pointer(src_info, ref);
}
}
}
}
oldtop = dump_region->top(); if (ref->msotype() == MetaspaceObj::ClassType) { // Save a pointer immediate in front of an InstanceKlass, so // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo* // without building another hashtable. See RunTimeClassInfo::get_for() // in systemDictionaryShared.cpp.
Klass* klass = (Klass*)src; if (klass->is_instance_klass()) {
SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
dump_region->allocate(sizeof(address));
}
}
dest = dump_region->allocate(bytes);
newtop = dump_region->top();
void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
log_info(cds)("Relocating embedded pointers in core regions ... ");
relocate_embedded_pointers(&_rw_src_objs);
relocate_embedded_pointers(&_ro_src_objs);
update_special_refs();
}
// We must relocate vmClasses::_klasses[] only after we have copied the // java objects in during dump_java_heap_objects(): during the object copy, we operate on // old objects which assert that their klass is the original klass. void ArchiveBuilder::relocate_vm_classes() {
log_info(cds)("Relocating vmClasses::_klasses[] ... ");
ResourceMark rm;
RefRelocator doit(this);
vmClasses::metaspace_pointers_do(&doit);
}
void ArchiveBuilder::make_klasses_shareable() { int num_instance_klasses = 0; int num_boot_klasses = 0; int num_platform_klasses = 0; int num_app_klasses = 0; int num_hidden_klasses = 0; int num_unlinked_klasses = 0; int num_unregistered_klasses = 0; int num_obj_array_klasses = 0; int num_type_array_klasses = 0;
for (int i = 0; i < klasses()->length(); i++) { constchar* type; constchar* unlinked = ""; constchar* hidden = ""; constchar* generated = "";
Klass* k = klasses()->at(i);
k->remove_java_mirror(); if (k->is_objArray_klass()) { // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info // on their array classes.
num_obj_array_klasses ++;
type = "array";
} elseif (k->is_typeArray_klass()) {
num_type_array_klasses ++;
type = "array";
k->remove_unshareable_info();
} else {
assert(k->is_instance_klass(), " must be");
num_instance_klasses ++;
InstanceKlass* ik = InstanceKlass::cast(k); if (DynamicDumpSharedSpaces) { // For static dump, class loader type are already set.
ik->assign_class_loader_type();
} if (ik->is_shared_boot_class()) {
type = "boot";
num_boot_klasses ++;
} elseif (ik->is_shared_platform_class()) {
type = "plat";
num_platform_klasses ++;
} elseif (ik->is_shared_app_class()) {
type = "app";
num_app_klasses ++;
} else {
assert(ik->is_shared_unregistered_class(), "must be");
type = "unreg";
num_unregistered_klasses ++;
}
uintx ArchiveBuilder::any_to_offset(address p) const { if (is_in_mapped_static_archive(p)) {
assert(DynamicDumpSharedSpaces, "must be"); return p - _mapped_static_archive_bottom;
} return buffer_to_offset(p);
}
// Update a Java object to point its Klass* to the address whene // the class would be mapped at runtime. void ArchiveBuilder::relocate_klass_ptr_of_oop(oop o) {
assert(DumpSharedSpaces, "sanity");
Klass* k = get_buffered_klass(o->klass());
Klass* requested_k = to_requested(k);
narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
o->set_narrow_klass(nk);
}
// RelocateBufferToRequested --- Relocate all the pointers in rw/ro, // so that the archive can be mapped to the "requested" location without runtime relocation. // // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested" // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions // - Every pointer must have one of the following values: // [a] NULL: // No relocation is needed. Remove this pointer from ptrmap so we don't need to // consider it at runtime. // [b] Points into an object X which is inside the buffer: // Adjust this pointer by _buffer_to_requested_delta, so it points to X // when the archive is mapped at the requested location. // [c] Points into an object Y which is inside mapped static archive: // - This happens only during dynamic dump // - Adjust this pointer by _mapped_to_requested_static_archive_delta, // so it points to Y when the static archive is mapped at the requested location. template <bool STATIC_DUMP> class RelocateBufferToRequested : public BitMapClosure {
ArchiveBuilder* _builder;
address _buffer_bottom;
intx _buffer_to_requested_delta;
intx _mapped_to_requested_static_archive_delta;
size_t _max_non_null_offset;
bool do_bit(size_t offset) {
address* p = (address*)_buffer_bottom + offset;
assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
if (*p == NULL) { // todo -- clear bit, etc
ArchivePtrMarker::ptrmap()->clear_bit(offset);
} else { if (STATIC_DUMP) {
assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
*p += _buffer_to_requested_delta;
assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
} else { if (_builder->is_in_buffer_space(*p)) {
*p += _buffer_to_requested_delta; // assert is in requested dynamic archive
} else {
assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
*p += _mapped_to_requested_static_archive_delta;
assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
}
}
_max_non_null_offset = offset;
}
// Write detailed info to a mapfile to analyze contents of the archive. // static dump: // java -Xshare:dump -Xlog:cds+map=trace:file=cds.map:none:filesize=0 // dynamic dump: // java -cp MyApp.jar -XX:ArchiveClassesAtExit=MyApp.jsa \ // -Xlog:cds+map=trace:file=cds.map:none:filesize=0 MyApp // // We need to do some address translation because the buffers used at dump time may be mapped to // a different location at runtime. At dump time, the buffers may be at arbitrary locations // picked by the OS. At runtime, we try to map at a fixed location (SharedBaseAddress). For // consistency, we log everything using runtime addresses. class ArchiveBuilder::CDSMapLogger : AllStatic { static intx buffer_to_runtime_delta() { // Translate the buffers used by the RW/RO regions to their eventual (requested) locations // at runtime. return ArchiveBuilder::current()->buffer_to_requested_delta();
}
// Log information about a region, whose address at dump time is [base .. top). At // runtime, this region will be mapped to requested_base. requested_base is 0 if this // region will be mapped at os-selected addresses (such as the bitmap region), or will // be accessed with os::read (the header). // // Note: across -Xshare:dump runs, base may be different, but requested_base should // be the same as the archive contents should be deterministic. staticvoid log_region(constchar* name, address base, address top, address requested_base) {
size_t size = top - base;
base = requested_base;
top = requested_base + size;
log_info(cds, map)("[%-18s " PTR_FORMAT " - " PTR_FORMAT " " SIZE_FORMAT_W(9) " bytes]",
name, p2i(base), p2i(top), size);
}
#if INCLUDE_CDS_JAVA_HEAP // open and closed archive regions staticvoid log_heap_regions(constchar* which, GrowableArray<MemRegion> *regions) { for (int i = 0; i < regions->length(); i++) {
address start = address(regions->at(i).start());
address end = address(regions->at(i).end());
log_region(which, start, end, to_requested(start));
while (start < end) {
size_t byte_size;
oop archived_oop = cast_to_oop(start);
oop original_oop = HeapShared::get_original_object(archived_oop); if (original_oop != NULL) {
ResourceMark rm;
log_info(cds, map)(PTR_FORMAT ": @@ Object %s",
p2i(to_requested(start)), original_oop->klass()->external_name());
byte_size = original_oop->size() * BytesPerWord;
} elseif (archived_oop == HeapShared::roots()) { // HeapShared::roots() is copied specially so it doesn't exist in // HeapShared::OriginalObjectTable. See HeapShared::copy_roots().
log_info(cds, map)(PTR_FORMAT ": @@ Object HeapShared::roots (ObjArray)",
p2i(to_requested(start)));
byte_size = objArrayOopDesc::object_size(HeapShared::roots()->length()) * BytesPerWord;
} else { // We have reached the end of the region break;
}
address oop_end = start + byte_size;
log_data(start, oop_end, to_requested(start), /*is_heap=*/true);
start = oop_end;
} if (start < end) {
log_info(cds, map)(PTR_FORMAT ": @@ Unused heap space " SIZE_FORMAT " bytes",
p2i(to_requested(start)), size_t(end - start));
log_data(start, end, to_requested(start), /*is_heap=*/true);
}
}
} static address to_requested(address p) { return HeapShared::to_requested_address(p);
} #endif
// Log all the data [base...top). Pretend that the base address // will be mapped to requested_base at run-time. staticvoid log_data(address base, address top, address requested_base, bool is_heap = false) {
assert(top >= base, "must be");
LogStreamHandle(Trace, cds, map) lsh; if (lsh.is_enabled()) { int unitsize = sizeof(address); if (is_heap && UseCompressedOops) { // This makes the compressed oop pointers easier to read, but // longs and doubles will be split into two words.
unitsize = sizeof(narrowOop);
}
os::print_hex_dump(&lsh, base, top, unitsize, 32, requested_base);
}
}
mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
mapinfo->set_header_crc(mapinfo->compute_header_crc()); // After this point, we should not write any data into mapinfo->header() since this // would corrupt its checksum we have calculated before.
mapinfo->write_header();
mapinfo->close();
log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
total_bytes, total_reserved, total_u_perc);
}
void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
size, size/double(total_size)*100.0, size);
}
void ArchiveBuilder::print_heap_region_stats(GrowableArray<MemRegion>* regions, constchar *name, size_t total_size) { int arr_len = regions == NULL ? 0 : regions->length(); for (int i = 0; i < arr_len; i++) { char* start = (char*)regions->at(i).start();
size_t size = regions->at(i).byte_size(); char* top = start + size;
log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
name, i, size, size/double(total_size)*100.0, size, p2i(start));
}
}
void ArchiveBuilder::report_out_of_space(constchar* name, size_t needed_bytes) { // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes // or so.
_rw_region.print_out_of_space_msg(name, needed_bytes);
_ro_region.print_out_of_space_msg(name, needed_bytes);
vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), "Please reduce the number of shared classes.");
}
#ifndef PRODUCT void ArchiveBuilder::assert_is_vm_thread() {
assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
} #endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.24 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.