/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
//--------------------------------------------------------------------------------- // NMethod statistics // They are printed under various flags, including: // PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation. // (In the latter two cases, they like other stats are printed to the log only.)
#ifndef PRODUCT // These variables are put into one block to reduce relocations // and make it simpler to print from the debugger. struct java_nmethod_stats_struct { int nmethod_count; int total_size; int relocation_size; int consts_size; int insts_size; int stub_size; int scopes_data_size; int scopes_pcs_size; int dependencies_size; int handler_table_size; int nul_chk_table_size; #if INCLUDE_JVMCI int speculations_size; int jvmci_data_size; #endif int oops_size; int metadata_size;
void note_nmethod(nmethod* nm) {
nmethod_count += 1;
total_size += nm->size();
relocation_size += nm->relocation_size();
consts_size += nm->consts_size();
insts_size += nm->insts_size();
stub_size += nm->stub_size();
oops_size += nm->oops_size();
metadata_size += nm->metadata_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
dependencies_size += nm->dependencies_size();
handler_table_size += nm->handler_table_size();
nul_chk_table_size += nm->nul_chk_table_size(); #if INCLUDE_JVMCI
speculations_size += nm->speculations_size();
jvmci_data_size += nm->jvmci_data_size(); #endif
} void print_nmethod_stats(constchar* name) { if (nmethod_count == 0) return;
tty->print_cr("Statistics for %d bytecoded nmethods for %s:", nmethod_count, name); if (total_size != 0) tty->print_cr(" total in heap = %d", total_size); if (nmethod_count != 0) tty->print_cr(" header = " SIZE_FORMAT, nmethod_count * sizeof(nmethod)); if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size); if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); if (insts_size != 0) tty->print_cr(" main code = %d", insts_size); if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); if (oops_size != 0) tty->print_cr(" oops = %d", oops_size); if (metadata_size != 0) tty->print_cr(" metadata = %d", metadata_size); if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size); if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size); if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size); if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size); if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size); #if INCLUDE_JVMCI if (speculations_size != 0) tty->print_cr(" speculations = %d", speculations_size); if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %d", jvmci_data_size); #endif
}
};
struct native_nmethod_stats_struct { int native_nmethod_count; int native_total_size; int native_relocation_size; int native_insts_size; int native_oops_size; int native_metadata_size; void note_native_nmethod(nmethod* nm) {
native_nmethod_count += 1;
native_total_size += nm->size();
native_relocation_size += nm->relocation_size();
native_insts_size += nm->insts_size();
native_oops_size += nm->oops_size();
native_metadata_size += nm->metadata_size();
} void print_native_nmethod_stats() { if (native_nmethod_count == 0) return;
tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count); if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size); if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size); if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size); if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size); if (native_metadata_size != 0) tty->print_cr(" N. metadata = %d", native_metadata_size);
}
};
struct pc_nmethod_stats_struct { int pc_desc_resets; // number of resets (= number of caches) int pc_desc_queries; // queries to nmethod::find_pc_desc int pc_desc_approx; // number of those which have approximate true int pc_desc_repeats; // number of _pc_descs[0] hits int pc_desc_hits; // number of LRU cache hits int pc_desc_tests; // total number of PcDesc examinations int pc_desc_searches; // total number of quasi-binary search steps int pc_desc_adds; // number of LUR cache insertions
ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
assert(pc != NULL, "Must be non null");
assert(exception.not_null(), "Must be non null");
assert(handler != NULL, "Must be non null");
address ExceptionCache::match(Handle exception, address pc) {
assert(pc != NULL,"Must be non null");
assert(exception.not_null(),"Must be non null"); if (exception->klass() == exception_type()) { return (test_address(pc));
}
return NULL;
}
bool ExceptionCache::match_exception_with_space(Handle exception) {
assert(exception.not_null(),"Must be non null"); if (exception->klass() == exception_type() && count() < cache_size) { returntrue;
} returnfalse;
}
address ExceptionCache::test_address(address addr) { int limit = count(); for (int i = 0; i < limit; i++) { if (pc_at(i) == addr) { return handler_at(i);
}
} return NULL;
}
// Helper used by both find_pc_desc methods. staticinlinebool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests); if (!approximate) return pc->pc_offset() == pc_offset; else return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
}
void PcDescCache::reset_to(PcDesc* initial_pc_desc) { if (initial_pc_desc == NULL) {
_pc_descs[0] = NULL; // native method; no PcDescs at all return;
}
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets); // reset the cache by filling it with benign (non-null) values
assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); for (int i = 0; i < cache_size; i++)
_pc_descs[i] = initial_pc_desc;
}
// Note: one might think that caching the most recently // read value separately would be a win, but one would be // wrong. When many threads are updating it, the cache // line it's in would bounce between caches, negating // any benefit.
// In order to prevent race conditions do not load cache elements // repeatedly, but use a local copy:
PcDesc* res;
// Step one: Check the most recently added value.
res = _pc_descs[0]; if (res == NULL) return NULL; // native method; no PcDescs at all if (match_desc(res, pc_offset, approximate)) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats); return res;
}
// Step two: Check the rest of the LRU cache. for (int i = 1; i < cache_size; ++i) {
res = _pc_descs[i]; if (res->pc_offset() < 0) break; // optimization: skip empty cache if (match_desc(res, pc_offset, approximate)) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits); return res;
}
}
// Report failure. return NULL;
}
void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds); // Update the LRU cache by shifting pc_desc forward. for (int i = 0; i < cache_size; i++) {
PcDesc* next = _pc_descs[i];
_pc_descs[i] = pc_desc;
pc_desc = next;
}
}
// adjust pcs_size so that it is a multiple of both oopSize and // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple // of oopSize, then 2*sizeof(PcDesc) is) staticint adjust_pcs_size(int pcs_size) { int nsize = align_up(pcs_size, oopSize); if ((nsize % sizeof(PcDesc)) != 0) {
nsize = pcs_size + sizeof(PcDesc);
}
assert((nsize % oopSize) == 0, "correct alignment"); return nsize;
}
constchar* nmethod::compile_kind() const { if (is_osr_method()) return"osr"; if (method() != NULL && is_native_method()) { if (method()->is_continuation_native_intrinsic()) { return"cnt";
} return"c2n";
} return NULL;
}
// Fill in default values for various flag fields void nmethod::init_defaults() {
_state = not_installed;
_has_flushed_dependencies = 0;
_load_reported = false; // jvmti state
CheckForOopsClosure cfo;
nm->oops_do(&cfo);
assert(!cfo.found_oop(), "no oops allowed");
// We allow an exception for the own Method, but require its class to be permanent.
Method* own_method = nm->method();
CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
nm->metadata_do(&cfm);
assert(!cfm.found_metadata(), "no metadata allowed");
assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(), "Method's class needs to be permanent");
} #endif
nmethod* nmethod::new_native_nmethod(const methodHandle& method, int compile_id,
CodeBuffer *code_buffer, int vep_offset, int frame_complete, int frame_size,
ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps, int exception_handler) {
code_buffer->finalize_oop_references(method); // create nmethod
nmethod* nm = NULL; int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
if (nm != NULL) { #if INCLUDE_JVMCI if (compiler->is_jvmci()) { // Initialize the JVMCINMethodData object inlined into nm
nm->jvmci_nmethod_data()->initialize(nmethod_mirror_index, nmethod_mirror_name, failed_speculations);
} #endif // To make dependency checking during class loading fast, record // the nmethod dependencies in the classes it is dependent on. // This allows the dependency checking code to simply walk the // class hierarchy above the loaded class, checking only nmethods // which are dependent on those classes. The slow way is to // check every nmethod for dependencies which makes it linear in // the number of methods compiled. For applications with a lot // classes the slow way is too slow. for (Dependencies::DepStream deps(nm); deps.next(); ) { if (deps.type() == Dependencies::call_site_target_value) { // CallSite dependencies are managed on per-CallSite instance basis.
oop call_site = deps.argument_oop(0);
MethodHandles::add_dependent_nmethod(call_site, nm);
} else {
Klass* klass = deps.context_type(); if (klass == NULL) { continue; // ignore things like evol_method
} // record this nmethod as dependent on this klass
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
}
}
NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm));
}
} // Do verification and logging outside CodeCache_lock. if (nm != NULL) { // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
DEBUG_ONLY(nm->verify();)
nm->log_new_nmethod();
} return nm;
}
// For native wrappers
nmethod::nmethod(
Method* method,
CompilerType type, int nmethod_size, int compile_id,
CodeOffsets* offsets,
CodeBuffer* code_buffer, int frame_size,
ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps )
: CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
_unlinked_next(NULL),
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
_native_basic_lock_sp_offset(basic_lock_sp_offset),
_is_unloading_state(0)
{
{ int scopes_data_offset = 0; int deoptimize_offset = 0; int deoptimize_mh_offset = 0;
init_defaults();
_comp_level = CompLevel_none;
_entry_bci = InvocationEntryBci; // We have no exception handler or deopt handler make the // values something that will never match a pc like the nmethod vtable entry
_exception_offset = 0;
_orig_pc_offset = 0;
_gc_epoch = CodeCache::gc_epoch();
// Copy contents of ExceptionHandlerTable to nmethod
handler_table->copy_to(this);
nul_chk_table->copy_to(this);
#if INCLUDE_JVMCI // Copy speculations to nmethod if (speculations_size() != 0) {
memcpy(speculations_begin(), speculations, speculations_len);
} #endif
// we use the information of entry points to find out if a method is // static or non static
assert(compiler->is_c2() || compiler->is_jvmci() ||
_method->is_static() == (entry_point() == _verified_entry_point), " entry points must be same for static methods and vice versa");
}
}
// Print a short set of xml attributes to identify this nmethod. The // output should be embedded in some other element. void nmethod::log_identity(xmlStream* log) const {
log->print(" compile_id='%d'", compile_id()); constchar* nm_kind = compile_kind(); if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind);
log->print(" compiler='%s'", compiler_name()); if (TieredCompilation) {
log->print(" level='%d'", comp_level());
} #if INCLUDE_JVMCI if (jvmci_nmethod_data() != NULL) { constchar* jvmci_name = jvmci_nmethod_data()->name(); if (jvmci_name != NULL) {
log->print(" jvmci_mirror_name='");
log->text("%s", jvmci_name);
log->print("'");
}
} #endif
}
if (xtty != NULL) {
xtty->tail("print_nmethod");
}
}
// Promote one word from an assembly-time handle to a live embedded oop. inlinevoid nmethod::initialize_immediate_oop(oop* dest, jobject handle) { if (handle == NULL || // As a special case, IC oops are initialized to 1 or -1.
handle == (jobject) Universe::non_oop_word()) {
*(void**)dest = handle;
} else {
*dest = JNIHandles::resolve_non_null(handle);
}
}
// Have to have the same name because it's called by a template void nmethod::copy_values(GrowableArray<jobject>* array) { int length = array->length();
assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
oop* dest = oops_begin(); for (int index = 0 ; index < length; index++) {
initialize_immediate_oop(&dest[index], array->at(index));
}
// Now we can fix up all the oops in the code. We need to do this // in the code because the assembler uses jobjects as placeholders. // The code and relocations have already been initialized by the // CodeBlob constructor, so it is valid even at this early point to // iterate over relocations and patch the code.
fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
}
void nmethod::copy_values(GrowableArray<Metadata*>* array) { int length = array->length();
assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
Metadata** dest = metadata_begin(); for (int index = 0 ; index < length; index++) {
dest[index] = array->at(index);
}
}
void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) { // re-patch all oop-bearing instructions, just in case some oops moved
RelocIterator iter(this, begin, end); while (iter.next()) { if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc(); if (initialize_immediates && reloc->oop_is_immediate()) {
oop* dest = reloc->oop_addr();
initialize_immediate_oop(dest, cast_from_oop<jobject>(*dest));
} // Refresh the oop-related bits of this instruction.
reloc->fix_oop_relocation();
} elseif (iter.type() == relocInfo::metadata_type) {
metadata_Relocation* reloc = iter.metadata_reloc();
reloc->fix_metadata_relocation();
}
}
}
// Make sure that post call nops fill in nmethod offsets eagerly so // we don't have to race with deoptimization
RelocIterator iter(this); while (iter.next()) { if (iter.type() == relocInfo::post_call_nop_type) {
post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
address pc = reloc->addr();
install_post_call_nop_displacement(this, pc);
}
}
}
void nmethod::make_deoptimized() { if (!Continuations::enabled()) { return;
}
switch (iter.type()) { case relocInfo::virtual_call_type: case relocInfo::opt_virtual_call_type: {
CompiledIC *ic = CompiledIC_at(&iter);
address pc = ic->end_of_call();
NativePostCallNop* nop = nativePostCallNop_at(pc); if (nop != NULL) {
nop->make_deopt();
}
assert(NativeDeoptInstruction::is_deopt_at(pc), "check"); break;
} case relocInfo::static_call_type: {
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
address pc = csc->end_of_call();
NativePostCallNop* nop = nativePostCallNop_at(pc); //tty->print_cr(" - static pc %p", pc); if (nop != NULL) {
nop->make_deopt();
} // We can't assert here, there are some calls to stubs / runtime // that have reloc data and doesn't have a post call NOP. //assert(NativeDeoptInstruction::is_deopt_at(pc), "check"); break;
} default: break;
}
} // Don't deopt this again.
mark_deoptimized();
}
bool nmethod::is_maybe_on_stack() { // If the condition below is true, it means that the nmethod was found to // be alive the previous completed marking cycle. return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
}
void nmethod::inc_decompile_count() { if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return; // Could be gated by ProfileTraps, but do not bother...
Method* m = method(); if (m == NULL) return;
MethodData* mdo = m->method_data(); if (mdo == NULL) return; // There is a benign race here. See comments in methodData.hpp.
mdo->inc_decompile_count();
}
void nmethod::invalidate_osr_method() {
assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); // Remove from list of active nmethods if (method() != NULL) {
method()->method_holder()->remove_osr_nmethod(this);
}
}
void nmethod::log_state_change() const { if (LogCompilation) { if (xtty != NULL) {
ttyLocker ttyl; // keep the following output all in one block
xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'",
os::current_thread_id());
log_identity(xtty);
xtty->stamp();
xtty->end_elem();
}
}
CompileTask::print_ul(this, "made not entrant"); if (PrintCompilation) {
print_on(tty, "made not entrant");
}
}
// Invalidate code bool nmethod::make_not_entrant() { // This can be called while the system is already at a safepoint which is ok
NoSafepointVerifier nsv;
if (is_unloading()) { // If the nmethod is unloading, then it is already not entrant through // the nmethod entry barriers. No need to do anything; GC will unload it. returnfalse;
}
if (Atomic::load(&_state) == not_entrant) { // Avoid taking the lock if already in required state. // This is safe from races because the state is an end-state, // which the nmethod cannot back out of once entered. // No need for fencing either. returnfalse;
}
{ // Enter critical section. Does not block for safepoint.
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
if (Atomic::load(&_state) == not_entrant) { // another thread already performed this transition so nothing // to do, but return false to indicate this. returnfalse;
}
if (is_osr_method()) { // This logic is equivalent to the logic below for patching the // verified entry point of regular methods. // this effectively makes the osr nmethod not entrant
invalidate_osr_method();
} else { // The caller can be calling the method statically or through an inline // cache call.
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
SharedRuntime::get_handle_wrong_method_stub());
}
if (update_recompile_counts()) { // Mark the method as decompiled.
inc_decompile_count();
}
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) { // If nmethod entry barriers are not supported, we won't mark // nmethods as on-stack when they become on-stack. So we // degrade to a less accurate flushing strategy, for now.
mark_as_maybe_on_stack();
}
// Remove nmethod from method.
unlink_from_method();
} // leave critical region under CompiledMethod_lock
#if INCLUDE_JVMCI // Invalidate can't occur while holding the Patching lock
JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); if (nmethod_data != NULL) {
nmethod_data->invalidate_nmethod_mirror(this);
} #endif
#ifdef ASSERT if (is_osr_method() && method() != NULL) { // Make sure osr nmethod is invalidated, i.e. not on the list bool found = method()->method_holder()->remove_osr_nmethod(this);
assert(!found, "osr nmethod should have been invalidated");
} #endif
returntrue;
}
// For concurrent GCs, there must be a handshake between unlink and flush void nmethod::unlink() { if (_unlinked_next != NULL) { // Already unlinked. It can be invoked twice because concurrent code cache // unloading might need to restart when inline cache cleaning fails due to // running out of ICStubs, which can only be refilled at safepoints return;
}
flush_dependencies();
// unlink_from_method will take the CompiledMethod_lock. // In this case we don't strictly need it when unlinking nmethods from // the Method, because it is only concurrently unlinked by // the entry barrier, which acquires the per nmethod lock.
unlink_from_method();
clear_ic_callsites();
if (is_osr_method()) {
invalidate_osr_method();
}
#if INCLUDE_JVMCI // Clear the link between this nmethod and a HotSpotNmethod mirror
JVMCINMethodData* nmethod_data = jvmci_nmethod_data(); if (nmethod_data != NULL) {
nmethod_data->invalidate_nmethod_mirror(this);
} #endif
// Post before flushing as jmethodID is being used
post_compiled_method_unload();
// Register for flushing when it is safe. For concurrent class unloading, // that would be after the unloading handshake, and for STW class unloading // that would be when getting back to the VM thread.
CodeCache::register_unlinked(this);
}
// We need to deallocate any ExceptionCache data. // Note that we do not need to grab the nmethod lock for this, it // better be thread safe if we're disposing of it!
ExceptionCache* ec = exception_cache(); while(ec != NULL) {
ExceptionCache* next = ec->next(); delete ec;
ec = next;
}
// // Notify all classes this nmethod is dependent on that it is no // longer dependent.
void nmethod::flush_dependencies() { if (!has_flushed_dependencies()) {
set_has_flushed_dependencies(); for (Dependencies::DepStream deps(this); deps.next(); ) { if (deps.type() == Dependencies::call_site_target_value) { // CallSite dependencies are managed on per-CallSite instance basis.
oop call_site = deps.argument_oop(0);
MethodHandles::clean_dependency_context(call_site);
} else {
Klass* klass = deps.context_type(); if (klass == NULL) { continue; // ignore things like evol_method
} // During GC liveness of dependee determines class that needs to be updated. // The GC may clean dependency contexts concurrently and in parallel.
InstanceKlass::cast(klass)->clean_dependency_context();
}
}
}
}
// ------------------------------------------------------------------ // post_compiled_method_load_event // new method for install_code() path // Transfer information from compilation to jvmti void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) { // This is a bad time for a safepoint. We don't want // this nmethod to get unloaded while we're queueing the event.
NoSafepointVerifier nsv;
if (JvmtiExport::should_post_compiled_method_load()) { // Only post unload events if load events are found.
set_load_reported(); // If a JavaThread hasn't been passed in, let the Service thread // (which is a real Java thread) post the event
JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this); if (state == NULL) { // Execute any barrier code for this nmethod as if it's called, since // keeping it alive looks like stack walking.
run_nmethod_entry_barrier();
ServiceThread::enqueue_deferred_event(&event);
} else { // This enters the nmethod barrier outside in the caller.
state->enqueue_event(&event);
}
}
}
// If a JVMTI agent has enabled the CompiledMethodUnload event then // post the event. The Method* will not be valid when this is freed.
// Don't bother posting the unload if the load event wasn't posted. if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
JvmtiDeferredEvent event =
JvmtiDeferredEvent::compiled_method_unload_event(
method()->jmethod_id(), insts_begin());
ServiceThread::enqueue_deferred_event(&event);
}
}
// Iterate over metadata calling this function. Used by RedefineClasses void nmethod::metadata_do(MetadataClosure* f) {
{ // Visit all immediate references that are embedded in the instruction stream.
RelocIterator iter(this, oops_reloc_begin()); while (iter.next()) { if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation* r = iter.metadata_reloc(); // In this metadata, we must only follow those metadatas directly embedded in // the code. Other metadatas (oop_index>0) are seen as part of // the metadata section below.
assert(1 == (r->metadata_is_immediate()) +
(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()), "metadata must be found in exactly one place"); if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
Metadata* md = r->metadata_value(); if (md != _method) f->do_metadata(md);
}
} elseif (iter.type() == relocInfo::virtual_call_type) { // Check compiledIC holders associated with this nmethod
ResourceMark rm;
CompiledIC *ic = CompiledIC_at(&iter); if (ic->is_icholder_call()) {
CompiledICHolder* cichk = ic->cached_icholder();
f->do_metadata(cichk->holder_metadata());
f->do_metadata(cichk->holder_klass());
} else {
Metadata* ic_oop = ic->cached_metadata(); if (ic_oop != NULL) {
f->do_metadata(ic_oop);
}
}
}
}
}
// Visit the metadata section for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
Metadata* md = *p;
f->do_metadata(md);
}
// Visit metadata not embedded in the other places. if (_method != NULL) f->do_metadata(_method);
}
// Heuristic for nuking nmethods even though their oops are live. // Main purpose is to reduce code cache pressure and get rid of // nmethods that don't seem to be all that relevant any longer. bool nmethod::is_cold() { if (!MethodFlushing || is_native_method() || is_not_installed()) { // No heuristic unloading at all returnfalse;
}
if (!is_maybe_on_stack() && is_not_entrant()) { // Not entrant nmethods that are not on any stack can just // be removed returntrue;
}
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) { // On platforms that don't support nmethod entry barriers, we can't // trust the temporal aspect of the gc epochs. So we can't detect // cold nmethods on such platforms. returnfalse;
}
if (!UseCodeCacheFlushing) { // Bail out if we don't heuristically remove nmethods returnfalse;
}
// Other code can be phased out more gradually after N GCs return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
}
// The _is_unloading_state encodes a tuple comprising the unloading cycle // and the result of IsUnloadingBehaviour::is_unloading() for that cycle. // This is the bit layout of the _is_unloading_state byte: 00000CCU // CC refers to the cycle, which has 2 bits, and U refers to the result of // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
// The IsUnloadingBehaviour is responsible for calculating if the nmethod // should be unloaded. This can be either because there is a dead oop, // or because is_cold() heuristically determines it is time to unload.
state_unloading_cycle = current_cycle;
state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
// Note that if an nmethod has dead oops, everyone will agree that the // nmethod is_unloading. However, the is_cold heuristics can yield // different outcomes, so we guard the computed result with a CAS // to ensure all threads have a shared view of whether an nmethod // is_unloading or not.
uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
if (found_state == state) { // First to change state, we win return state_is_unloading;
} else { // State already set, so use it return IsUnloadingState::is_unloading(found_state);
}
}
// This is called at the end of the strong tracing/marking phase of a // GC to unload an nmethod if it contains otherwise unreachable // oops or is heuristically found to be not important. void nmethod::do_unloading(bool unloading_occurred) { // Make sure the oop's ready to receive visitors if (is_unloading()) {
unlink();
} else {
guarantee(unload_nmethod_caches(unloading_occurred), "Should not need transition stubs");
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm != NULL) {
bs_nm->disarm(this);
}
}
}
void nmethod::oops_do(OopClosure* f, bool allow_dead) { // Prevent extra code cache walk for platforms that don't have immediate oops. if (relocInfo::mustIterateImmediateOopsInCode()) {
RelocIterator iter(this, oops_reloc_begin());
while (iter.next()) { if (iter.type() == relocInfo::oop_type ) {
oop_Relocation* r = iter.oop_reloc(); // In this loop, we must only follow those oops directly embedded in // the code. Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place"); if (r->oop_is_immediate() && r->oop_value() != NULL) {
f->do_oop(r->oop_addr());
}
}
}
}
// Scopes // This includes oop constants not inlined in the code stream. for (oop* p = oops_begin(); p < oops_end(); p++) { if (*p == Universe::non_oop_word()) continue; // skip non-oops
f->do_oop(p);
}
}
void nmethod::follow_nmethod(OopIterateClosure* cl) { // Process oops in the nmethod
oops_do(cl);
// CodeCache unloading support
mark_as_maybe_on_stack();
bool nmethod::oops_do_try_claim() { if (oops_do_try_claim_weak_request()) {
nmethod* result = oops_do_try_add_to_list_as_weak_done();
assert(result == NULL, "adding to global list as weak done must always succeed."); returntrue;
} returnfalse;
}
bool nmethod::oops_do_try_claim_weak_request() {
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.40 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.