/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/compiledMethod.inline.hpp"
#include "code/dependencies.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.hpp"
#include "code/scopeDesc.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compilationLog.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/compileTask.hpp"
#include "compiler/compilerDirectives.hpp"
#include "compiler/directivesParser.hpp"
#include "compiler/disassembler.hpp"
#include "compiler/oopMap.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/bytecode.hpp"
#include "jvm.h"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/continuation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/threadWXSetters.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/resourceHash.hpp"
#include "utilities/xmlstream.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmciRuntime.hpp"
#endif
#ifdef DTRACE_ENABLED
// Only bother with this argument setup if dtrace is available
#define DTRACE_METHOD_UNLOAD_PROBE(method) \
{ \
Method* m = (method); \
if (m != NULL) { \
Symbol* klass_name = m->klass_name(); \
Symbol* name = m->name(); \
Symbol* signature = m->signature(); \
HOTSPOT_COMPILED_METHOD_UNLOAD( \
(char *) klass_name->bytes(), klass_name->utf8_length(), \
(char *) name->bytes(), name->utf8_length(), \
(char *) signature->bytes(), signature->utf8_length()); \
} \
}
#else // ndef DTRACE_ENABLED
#define DTRACE_METHOD_UNLOAD_PROBE(method)
#endif
//---------------------------------------------------------------------------------
// NMethod statistics
// They are printed under various flags, including:
// PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
// (In the latter two cases, they like other stats are printed to the log only.)
#ifndef PRODUCT
// These variables are put into one block to reduce relocations
// and make it simpler to print from the debugger.
struct java_nmethod_stats_struct {
int nmethod_count;
int total_size;
int relocation_size;
int consts_size;
int insts_size;
int stub_size;
int scopes_data_size;
int scopes_pcs_size;
int dependencies_size;
int handler_table_size;
int nul_chk_table_size;
#if INCLUDE_JVMCI
int speculations_size;
int jvmci_data_size;
#endif
int oops_size;
int metadata_size;
void note_nmethod(nmethod* nm) {
nmethod_count += 1;
total_size += nm->size();
relocation_size += nm->relocation_size();
consts_size += nm->consts_size();
insts_size += nm->insts_size();
stub_size += nm->stub_size();
oops_size += nm->oops_size();
metadata_size += nm->metadata_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
dependencies_size += nm->dependencies_size();
handler_table_size += nm->handler_table_size();
nul_chk_table_size += nm->nul_chk_table_size();
#if INCLUDE_JVMCI
speculations_size += nm->speculations_size();
jvmci_data_size += nm->jvmci_data_size();
#endif
}
void print_nmethod_stats(const char* name) {
if (nmethod_count == 0) return;
tty->print_cr("Statistics for %d bytecoded nmethods for %s:", nmethod_count, name);
if (total_size != 0) tty->print_cr(" total in heap = %d", total_size);
if (nmethod_count != 0) tty->print_cr(" header = " SIZE_FORMAT, nmethod_count * sizeof(nmethod));
if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size);
if (consts_size != 0) tty->print_cr(" constants = %d", consts_size);
if (insts_size != 0) tty->print_cr(" main code = %d", insts_size);
if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size);
if (oops_size != 0) tty->print_cr(" oops = %d", oops_size);
if (metadata_size != 0) tty->print_cr(" metadata = %d", metadata_size);
if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size);
if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size);
if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size);
if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size);
if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size);
#if INCLUDE_JVMCI
if (speculations_size != 0) tty->print_cr(" speculations = %d", speculations_size);
if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %d", jvmci_data_size);
#endif
}
};
struct native_nmethod_stats_struct {
int native_nmethod_count;
int native_total_size;
int native_relocation_size;
int native_insts_size;
int native_oops_size;
int native_metadata_size;
void note_native_nmethod(nmethod* nm) {
native_nmethod_count += 1;
native_total_size += nm->size();
native_relocation_size += nm->relocation_size();
native_insts_size += nm->insts_size();
native_oops_size += nm->oops_size();
native_metadata_size += nm->metadata_size();
}
void print_native_nmethod_stats() {
if (native_nmethod_count == 0) return;
tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size);
if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size);
if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size);
if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size);
if (native_metadata_size != 0) tty->print_cr(" N. metadata = %d", native_metadata_size);
}
};
struct pc_nmethod_stats_struct {
int pc_desc_resets; // number of resets (= number of caches)
int pc_desc_queries; // queries to nmethod::find_pc_desc
int pc_desc_approx; // number of those which have approximate true
int pc_desc_repeats; // number of _pc_descs[0] hits
int pc_desc_hits; // number of LRU cache hits
int pc_desc_tests; // total number of PcDesc examinations
int pc_desc_searches; // total number of quasi-binary search steps
int pc_desc_adds; // number of LUR cache insertions
void print_pc_stats() {
tty->print_cr("PcDesc Statistics: %d queries, %.2f comparisons per query",
pc_desc_queries,
(double)(pc_desc_tests + pc_desc_searches)
/ pc_desc_queries);
tty->print_cr(" caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
pc_desc_resets,
pc_desc_queries, pc_desc_approx,
pc_desc_repeats, pc_desc_hits,
pc_desc_tests, pc_desc_searches, pc_desc_adds);
}
};
#ifdef COMPILER1
static java_nmethod_stats_struct c1_java_nmethod_stats;
#endif
#ifdef COMPILER2
static java_nmethod_stats_struct c2_java_nmethod_stats;
#endif
#if INCLUDE_JVMCI
static java_nmethod_stats_struct jvmci_java_nmethod_stats;
#endif
static java_nmethod_stats_struct unknown_java_nmethod_stats;
static native_nmethod_stats_struct native_nmethod_stats;
static pc_nmethod_stats_struct pc_nmethod_stats;
static void note_java_nmethod(nmethod* nm) {
#ifdef COMPILER1
if (nm->is_compiled_by_c1()) {
c1_java_nmethod_stats.note_nmethod(nm);
} else
#endif
#ifdef COMPILER2
if (nm->is_compiled_by_c2()) {
c2_java_nmethod_stats.note_nmethod(nm);
} else
#endif
#if INCLUDE_JVMCI
if (nm->is_compiled_by_jvmci()) {
jvmci_java_nmethod_stats.note_nmethod(nm);
} else
#endif
{
unknown_java_nmethod_stats.note_nmethod(nm);
}
}
#endif // !PRODUCT
//---------------------------------------------------------------------------------
ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
assert(pc != NULL, "Must be non null");
assert(exception.not_null(), "Must be non null");
assert(handler != NULL, "Must be non null");
_count = 0;
_exception_type = exception->klass();
_next = NULL;
_purge_list_next = NULL;
add_address_and_handler(pc,handler);
}
address ExceptionCache::match(Handle exception, address pc) {
assert(pc != NULL,"Must be non null");
assert(exception.not_null(),"Must be non null");
if (exception->klass() == exception_type()) {
return (test_address(pc));
}
return NULL;
}
bool ExceptionCache::match_exception_with_space(Handle exception) {
assert(exception.not_null(),"Must be non null");
if (exception->klass() == exception_type() && count() < cache_size) {
return true;
}
return false;
}
address ExceptionCache::test_address(address addr) {
int limit = count();
for (int i = 0; i < limit; i++) {
if (pc_at(i) == addr) {
return handler_at(i);
}
}
return NULL;
}
bool ExceptionCache::add_address_and_handler(address addr, address handler) {
if (test_address(addr) == handler) return true;
int index = count();
if (index < cache_size) {
set_pc_at(index, addr);
set_handler_at(index, handler);
increment_count();
return true;
}
return false;
}
ExceptionCache* ExceptionCache::next() {
return Atomic::load(&_next);
}
void ExceptionCache::set_next(ExceptionCache *ec) {
Atomic::store(&_next, ec);
}
//-----------------------------------------------------------------------------
// Helper used by both find_pc_desc methods.
static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
if (!approximate)
return pc->pc_offset() == pc_offset;
else
return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
}
void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
if (initial_pc_desc == NULL) {
_pc_descs[0] = NULL; // native method; no PcDescs at all
return;
}
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets);
// reset the cache by filling it with benign (non-null) values
assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
for (int i = 0; i < cache_size; i++)
_pc_descs[i] = initial_pc_desc;
}
PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_queries);
NOT_PRODUCT(if (approximate) ++pc_nmethod_stats.pc_desc_approx);
// Note: one might think that caching the most recently
// read value separately would be a win, but one would be
// wrong. When many threads are updating it, the cache
// line it's in would bounce between caches, negating
// any benefit.
// In order to prevent race conditions do not load cache elements
// repeatedly, but use a local copy:
PcDesc* res;
// Step one: Check the most recently added value.
res = _pc_descs[0];
if (res == NULL) return NULL; // native method; no PcDescs at all
if (match_desc(res, pc_offset, approximate)) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
return res;
}
// Step two: Check the rest of the LRU cache.
for (int i = 1; i < cache_size; ++i) {
res = _pc_descs[i];
if (res->pc_offset() < 0) break; // optimization: skip empty cache
if (match_desc(res, pc_offset, approximate)) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
return res;
}
}
// Report failure.
return NULL;
}
void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
// Update the LRU cache by shifting pc_desc forward.
for (int i = 0; i < cache_size; i++) {
PcDesc* next = _pc_descs[i];
_pc_descs[i] = pc_desc;
pc_desc = next;
}
}
// adjust pcs_size so that it is a multiple of both oopSize and
// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
// of oopSize, then 2*sizeof(PcDesc) is)
static int adjust_pcs_size(int pcs_size) {
int nsize = align_up(pcs_size, oopSize);
if ((nsize % sizeof(PcDesc)) != 0) {
nsize = pcs_size + sizeof(PcDesc);
}
assert((nsize % oopSize) == 0, "correct alignment");
return nsize;
}
int nmethod::total_size() const {
return
consts_size() +
insts_size() +
stub_size() +
scopes_data_size() +
scopes_pcs_size() +
handler_table_size() +
nul_chk_table_size();
}
const char* nmethod::compile_kind() const {
if (is_osr_method()) return "osr";
if (method() != NULL && is_native_method()) {
if (method()->is_continuation_native_intrinsic()) {
return "cnt";
}
return "c2n";
}
return NULL;
}
// Fill in default values for various flag fields
void nmethod::init_defaults() {
_state = not_installed;
_has_flushed_dependencies = 0;
_load_reported = false; // jvmti state
_oops_do_mark_link = NULL;
_osr_link = NULL;
#if INCLUDE_RTM_OPT
_rtm_state = NoRTM;
#endif
}
#ifdef ASSERT
class CheckForOopsClosure : public OopClosure {
bool _found_oop = false;
public:
virtual void do_oop(oop* o) { _found_oop = true; }
virtual void do_oop(narrowOop* o) { _found_oop = true; }
bool found_oop() { return _found_oop; }
};
class CheckForMetadataClosure : public MetadataClosure {
bool _found_metadata = false;
Metadata* _ignore = nullptr;
public:
CheckForMetadataClosure(Metadata* ignore) : _ignore(ignore) {}
virtual void do_metadata(Metadata* md) { if (md != _ignore) _found_metadata = true; }
bool found_metadata() { return _found_metadata; }
};
static void assert_no_oops_or_metadata(nmethod* nm) {
if (nm == nullptr) return;
assert(nm->oop_maps() == nullptr, "expectation");
CheckForOopsClosure cfo;
nm->oops_do(&cfo);
assert(!cfo.found_oop(), "no oops allowed");
// We allow an exception for the own Method, but require its class to be permanent.
Method* own_method = nm->method();
CheckForMetadataClosure cfm(/* ignore reference to own Method */ own_method);
nm->metadata_do(&cfm);
assert(!cfm.found_metadata(), "no metadata allowed");
assert(own_method->method_holder()->class_loader_data()->is_permanent_class_loader_data(),
"Method's class needs to be permanent");
}
#endif
nmethod* nmethod::new_native_nmethod(const methodHandle& method,
int compile_id,
CodeBuffer *code_buffer,
int vep_offset,
int frame_complete,
int frame_size,
ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps,
int exception_handler) {
code_buffer->finalize_oop_references(method);
// create nmethod
nmethod* nm = NULL;
int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
if (exception_handler != -1) {
offsets.set_value(CodeOffsets::Exceptions, exception_handler);
}
// MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
// IsUnloadingBehaviour::is_unloading needs to handle them separately.
bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space();
nm = new (native_nmethod_size, allow_NonNMethod_space)
nmethod(method(), compiler_none, native_nmethod_size,
compile_id, &offsets,
code_buffer, frame_size,
basic_lock_owner_sp_offset,
basic_lock_sp_offset,
oop_maps);
DEBUG_ONLY( if (allow_NonNMethod_space) assert_no_oops_or_metadata(nm); )
NOT_PRODUCT(if (nm != NULL) native_nmethod_stats.note_native_nmethod(nm));
}
if (nm != NULL) {
// verify nmethod
debug_only(nm->verify();) // might block
nm->log_new_nmethod();
}
return nm;
}
nmethod* nmethod::new_nmethod(const methodHandle& method,
int compile_id,
int entry_bci,
CodeOffsets* offsets,
int orig_pc_offset,
DebugInformationRecorder* debug_info,
Dependencies* dependencies,
CodeBuffer* code_buffer, int frame_size,
OopMapSet* oop_maps,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
CompLevel comp_level
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
int nmethod_mirror_index,
const char* nmethod_mirror_name,
FailedSpeculation** failed_speculations
#endif
)
{
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
code_buffer->finalize_oop_references(method);
// create nmethod
nmethod* nm = NULL;
#if INCLUDE_JVMCI
int jvmci_data_size = !compiler->is_jvmci() ? 0 : JVMCINMethodData::compute_size(nmethod_mirror_name);
#endif
int nmethod_size =
CodeBlob::allocation_size(code_buffer, sizeof(nmethod))
+ adjust_pcs_size(debug_info->pcs_size())
+ align_up((int)dependencies->size_in_bytes(), oopSize)
+ align_up(handler_table->size_in_bytes() , oopSize)
+ align_up(nul_chk_table->size_in_bytes() , oopSize)
#if INCLUDE_JVMCI
+ align_up(speculations_len , oopSize)
+ align_up(jvmci_data_size , oopSize)
#endif
+ align_up(debug_info->data_size() , oopSize);
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm = new (nmethod_size, comp_level)
nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps,
handler_table,
nul_chk_table,
compiler,
comp_level
#if INCLUDE_JVMCI
, speculations,
speculations_len,
jvmci_data_size
#endif
);
if (nm != NULL) {
#if INCLUDE_JVMCI
if (compiler->is_jvmci()) {
// Initialize the JVMCINMethodData object inlined into nm
nm->jvmci_nmethod_data()->initialize(nmethod_mirror_index, nmethod_mirror_name, failed_speculations);
}
#endif
// To make dependency checking during class loading fast, record
// the nmethod dependencies in the classes it is dependent on.
// This allows the dependency checking code to simply walk the
// class hierarchy above the loaded class, checking only nmethods
// which are dependent on those classes. The slow way is to
// check every nmethod for dependencies which makes it linear in
// the number of methods compiled. For applications with a lot
// classes the slow way is too slow.
for (Dependencies::DepStream deps(nm); deps.next(); ) {
if (deps.type() == Dependencies::call_site_target_value) {
// CallSite dependencies are managed on per-CallSite instance basis.
oop call_site = deps.argument_oop(0);
MethodHandles::add_dependent_nmethod(call_site, nm);
} else {
Klass* klass = deps.context_type();
if (klass == NULL) {
continue; // ignore things like evol_method
}
// record this nmethod as dependent on this klass
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
}
}
NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm));
}
}
// Do verification and logging outside CodeCache_lock.
if (nm != NULL) {
// Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
DEBUG_ONLY(nm->verify();)
nm->log_new_nmethod();
}
return nm;
}
// For native wrappers
nmethod::nmethod(
Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
CodeOffsets* offsets,
CodeBuffer* code_buffer,
int frame_size,
ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps )
: CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
_unlinked_next(NULL),
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
_native_basic_lock_sp_offset(basic_lock_sp_offset),
_is_unloading_state(0)
{
{
int scopes_data_offset = 0;
int deoptimize_offset = 0;
int deoptimize_mh_offset = 0;
debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
init_defaults();
_comp_level = CompLevel_none;
_entry_bci = InvocationEntryBci;
// We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry
_exception_offset = 0;
_orig_pc_offset = 0;
_gc_epoch = CodeCache::gc_epoch();
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
_oops_offset = data_offset();
_metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize);
scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
_nul_chk_table_offset = _handler_table_offset;
#if INCLUDE_JVMCI
_speculations_offset = _nul_chk_table_offset;
_jvmci_data_offset = _speculations_offset;
_nmethod_end_offset = _jvmci_data_offset;
#else
_nmethod_end_offset = _nul_chk_table_offset;
#endif
_compile_id = compile_id;
_entry_point = code_begin() + offsets->value(CodeOffsets::Entry);
_verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
_osr_entry_point = NULL;
_exception_cache = NULL;
_pc_desc_container.reset_to(NULL);
_exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
_scopes_data_begin = (address) this + scopes_data_offset;
_deopt_handler_begin = (address) this + deoptimize_offset;
_deopt_mh_handler_begin = (address) this + deoptimize_mh_offset;
code_buffer->copy_code_and_locs_to(this);
code_buffer->copy_values_to(this);
clear_unloading_state();
Universe::heap()->register_nmethod(this);
debug_only(Universe::heap()->verify_nmethod(this));
CodeCache::commit(this);
finalize_relocations();
}
if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
ttyLocker ttyl; // keep the following output all in one block
// This output goes directly to the tty, not the compiler log.
// To enable tools to match it up with the compilation activity,
// be sure to tag this tty output with the compile ID.
if (xtty != NULL) {
xtty->begin_head("print_native_nmethod");
xtty->method(_method);
xtty->stamp();
xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
}
// Print the header part, then print the requested information.
// This is both handled in decode2(), called via print_code() -> decode()
if (PrintNativeNMethods) {
tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
print_code();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
#if defined(SUPPORT_DATA_STRUCTS)
if (AbstractDisassembler::show_structs()) {
if (oop_maps != NULL) {
tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
oop_maps->print_on(tty);
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
}
#endif
} else {
print(); // print the header part only.
}
#if defined(SUPPORT_DATA_STRUCTS)
if (AbstractDisassembler::show_structs()) {
if (PrintRelocations) {
print_relocations();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
}
#endif
if (xtty != NULL) {
xtty->tail("print_native_nmethod");
}
}
}
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
}
void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw () {
// Try MethodNonProfiled and MethodProfiled.
void* return_value = CodeCache::allocate(nmethod_size, CodeBlobType::MethodNonProfiled);
if (return_value != nullptr || !allow_NonNMethod_space) return return_value;
// Try NonNMethod or give up.
return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
}
nmethod::nmethod(
Method* method,
CompilerType type,
int nmethod_size,
int compile_id,
int entry_bci,
CodeOffsets* offsets,
int orig_pc_offset,
DebugInformationRecorder* debug_info,
Dependencies* dependencies,
CodeBuffer *code_buffer,
int frame_size,
OopMapSet* oop_maps,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
CompLevel comp_level
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
int jvmci_data_size
#endif
)
: CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true),
_unlinked_next(NULL),
_native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1)),
_is_unloading_state(0)
{
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
{
debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
_deopt_handler_begin = (address) this;
_deopt_mh_handler_begin = (address) this;
init_defaults();
_entry_bci = entry_bci;
_compile_id = compile_id;
_comp_level = comp_level;
_orig_pc_offset = orig_pc_offset;
_gc_epoch = CodeCache::gc_epoch();
// Section offsets
_consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
set_ctable_begin(header_begin() + _consts_offset);
#if INCLUDE_JVMCI
if (compiler->is_jvmci()) {
// JVMCI might not produce any stub sections
if (offsets->value(CodeOffsets::Exceptions) != -1) {
_exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
} else {
_exception_offset = -1;
}
if (offsets->value(CodeOffsets::Deopt) != -1) {
_deopt_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::Deopt);
} else {
_deopt_handler_begin = NULL;
}
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
_deopt_mh_handler_begin = (address) this + code_offset() + offsets->value(CodeOffsets::DeoptMH);
} else {
_deopt_mh_handler_begin = NULL;
}
} else
#endif
{
// Exception handler and deopt handler are in the stub section
assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
_deopt_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::Deopt);
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
_deopt_mh_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::DeoptMH);
} else {
_deopt_mh_handler_begin = NULL;
}
}
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
_unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
} else {
_unwind_handler_offset = -1;
}
_oops_offset = data_offset();
_metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize);
int scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = scopes_data_offset + align_up(debug_info->data_size (), oopSize);
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes(), oopSize);
_nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
#if INCLUDE_JVMCI
_speculations_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
_jvmci_data_offset = _speculations_offset + align_up(speculations_len, oopSize);
_nmethod_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize);
#else
_nmethod_end_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
#endif
_entry_point = code_begin() + offsets->value(CodeOffsets::Entry);
_verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
_osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
_exception_cache = NULL;
_scopes_data_begin = (address) this + scopes_data_offset;
_pc_desc_container.reset_to(scopes_pcs_begin());
code_buffer->copy_code_and_locs_to(this);
// Copy contents of ScopeDescRecorder to nmethod
code_buffer->copy_values_to(this);
debug_info->copy_to(this);
dependencies->copy_to(this);
clear_unloading_state();
Universe::heap()->register_nmethod(this);
debug_only(Universe::heap()->verify_nmethod(this));
CodeCache::commit(this);
finalize_relocations();
// Copy contents of ExceptionHandlerTable to nmethod
handler_table->copy_to(this);
nul_chk_table->copy_to(this);
#if INCLUDE_JVMCI
// Copy speculations to nmethod
if (speculations_size() != 0) {
memcpy(speculations_begin(), speculations, speculations_len);
}
#endif
// we use the information of entry points to find out if a method is
// static or non static
assert(compiler->is_c2() || compiler->is_jvmci() ||
_method->is_static() == (entry_point() == _verified_entry_point),
" entry points must be same for static methods and vice versa");
}
}
// Print a short set of xml attributes to identify this nmethod. The
// output should be embedded in some other element.
void nmethod::log_identity(xmlStream* log) const {
log->print(" compile_id='%d'", compile_id());
const char* nm_kind = compile_kind();
if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind);
log->print(" compiler='%s'", compiler_name());
if (TieredCompilation) {
log->print(" level='%d'", comp_level());
}
#if INCLUDE_JVMCI
if (jvmci_nmethod_data() != NULL) {
const char* jvmci_name = jvmci_nmethod_data()->name();
if (jvmci_name != NULL) {
log->print(" jvmci_mirror_name='");
log->text("%s", jvmci_name);
log->print("'");
}
}
#endif
}
#define LOG_OFFSET(log, name) \
if (p2i(name##_end()) - p2i(name##_begin())) \
log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'" , \
p2i(name##_begin()) - p2i(this))
void nmethod::log_new_nmethod() const {
if (LogCompilation && xtty != NULL) {
ttyLocker ttyl;
xtty->begin_elem("nmethod");
log_identity(xtty);
xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
LOG_OFFSET(xtty, relocation);
LOG_OFFSET(xtty, consts);
LOG_OFFSET(xtty, insts);
LOG_OFFSET(xtty, stub);
LOG_OFFSET(xtty, scopes_data);
LOG_OFFSET(xtty, scopes_pcs);
LOG_OFFSET(xtty, dependencies);
LOG_OFFSET(xtty, handler_table);
LOG_OFFSET(xtty, nul_chk_table);
LOG_OFFSET(xtty, oops);
LOG_OFFSET(xtty, metadata);
xtty->method(method());
xtty->stamp();
xtty->end_elem();
}
}
#undef LOG_OFFSET
// Print out more verbose output usually for a newly created nmethod.
void nmethod::print_on(outputStream* st, const char* msg) const {
if (st != NULL) {
ttyLocker ttyl;
if (WizardMode) {
CompileTask::print(st, this, msg, /*short_form:*/ true);
st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
} else {
CompileTask::print(st, this, msg, /*short_form:*/ false);
}
}
}
void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
print_nmethod(printnmethods);
}
}
void nmethod::print_nmethod(bool printmethod) {
ttyLocker ttyl; // keep the following output all in one block
if (xtty != NULL) {
xtty->begin_head("print_nmethod");
log_identity(xtty);
xtty->stamp();
xtty->end_head();
}
// Print the header part, then print the requested information.
// This is both handled in decode2().
if (printmethod) {
ResourceMark m;
if (is_compiled_by_c1()) {
tty->cr();
tty->print_cr("============================= C1-compiled nmethod ==============================");
}
if (is_compiled_by_jvmci()) {
tty->cr();
tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
}
tty->print_cr("----------------------------------- Assembly -----------------------------------");
decode2(tty);
#if defined(SUPPORT_DATA_STRUCTS)
if (AbstractDisassembler::show_structs()) {
// Print the oops from the underlying CodeBlob as well.
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
print_oops(tty);
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
print_metadata(tty);
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
print_pcs();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
if (oop_maps() != NULL) {
tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
oop_maps()->print_on(tty);
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
}
#endif
} else {
print(); // print the header part only.
}
#if defined(SUPPORT_DATA_STRUCTS)
if (AbstractDisassembler::show_structs()) {
methodHandle mh(Thread::current(), _method);
if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommand::PrintDebugInfo)) {
print_scopes();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommand::PrintRelocations)) {
print_relocations();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommand::PrintDependencies)) {
print_dependencies();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
if (printmethod || PrintExceptionHandlers) {
print_handler_table();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
print_nul_chk_table();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
if (printmethod) {
print_recorded_oops();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
print_recorded_metadata();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
}
#endif
if (xtty != NULL) {
xtty->tail("print_nmethod");
}
}
// Promote one word from an assembly-time handle to a live embedded oop.
inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
if (handle == NULL ||
// As a special case, IC oops are initialized to 1 or -1.
handle == (jobject) Universe::non_oop_word()) {
*(void**)dest = handle;
} else {
*dest = JNIHandles::resolve_non_null(handle);
}
}
// Have to have the same name because it's called by a template
void nmethod::copy_values(GrowableArray<jobject>* array) {
int length = array->length();
assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
oop* dest = oops_begin();
for (int index = 0 ; index < length; index++) {
initialize_immediate_oop(&dest[index], array->at(index));
}
// Now we can fix up all the oops in the code. We need to do this
// in the code because the assembler uses jobjects as placeholders.
// The code and relocations have already been initialized by the
// CodeBlob constructor, so it is valid even at this early point to
// iterate over relocations and patch the code.
fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
}
void nmethod::copy_values(GrowableArray<Metadata*>* array) {
int length = array->length();
assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
Metadata** dest = metadata_begin();
for (int index = 0 ; index < length; index++) {
dest[index] = array->at(index);
}
}
void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
// re-patch all oop-bearing instructions, just in case some oops moved
RelocIterator iter(this, begin, end);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (initialize_immediates && reloc->oop_is_immediate()) {
oop* dest = reloc->oop_addr();
initialize_immediate_oop(dest, cast_from_oop<jobject>(*dest));
}
// Refresh the oop-related bits of this instruction.
reloc->fix_oop_relocation();
} else if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation* reloc = iter.metadata_reloc();
reloc->fix_metadata_relocation();
}
}
}
static void install_post_call_nop_displacement(nmethod* nm, address pc) {
NativePostCallNop* nop = nativePostCallNop_at((address) pc);
intptr_t cbaddr = (intptr_t) nm;
intptr_t offset = ((intptr_t) pc) - cbaddr;
int oopmap_slot = nm->oop_maps()->find_slot_for_offset((intptr_t) pc - (intptr_t) nm->code_begin());
if (oopmap_slot < 0) { // this can happen at asynchronous (non-safepoint) stackwalks
log_debug(codecache)("failed to find oopmap for cb: " INTPTR_FORMAT " offset: %d", cbaddr, (int) offset);
} else if (((oopmap_slot & 0xff) == oopmap_slot) && ((offset & 0xffffff) == offset)) {
jint value = (oopmap_slot << 24) | (jint) offset;
nop->patch(value);
} else {
log_debug(codecache)("failed to encode %d %d", oopmap_slot, (int) offset);
}
}
void nmethod::finalize_relocations() {
NoSafepointVerifier nsv;
// Make sure that post call nops fill in nmethod offsets eagerly so
// we don't have to race with deoptimization
RelocIterator iter(this);
while (iter.next()) {
if (iter.type() == relocInfo::post_call_nop_type) {
post_call_nop_Relocation* const reloc = iter.post_call_nop_reloc();
address pc = reloc->addr();
install_post_call_nop_displacement(this, pc);
}
}
}
void nmethod::make_deoptimized() {
if (!Continuations::enabled()) {
return;
}
assert(method() == NULL || can_be_deoptimized(), "");
CompiledICLocker ml(this);
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
ResourceMark rm;
RelocIterator iter(this, oops_reloc_begin());
while (iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
CompiledIC *ic = CompiledIC_at(&iter);
address pc = ic->end_of_call();
NativePostCallNop* nop = nativePostCallNop_at(pc);
if (nop != NULL) {
nop->make_deopt();
}
assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
break;
}
case relocInfo::static_call_type: {
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
address pc = csc->end_of_call();
NativePostCallNop* nop = nativePostCallNop_at(pc);
//tty->print_cr(" - static pc %p", pc);
if (nop != NULL) {
nop->make_deopt();
}
// We can't assert here, there are some calls to stubs / runtime
// that have reloc data and doesn't have a post call NOP.
//assert(NativeDeoptInstruction::is_deopt_at(pc), "check");
break;
}
default:
break;
}
}
// Don't deopt this again.
mark_deoptimized();
}
void nmethod::verify_clean_inline_caches() {
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
ResourceMark rm;
RelocIterator iter(this, oops_reloc_begin());
while(iter.next()) {
switch(iter.type()) {
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
CompiledIC *ic = CompiledIC_at(&iter);
CodeBlob *cb = CodeCache::find_blob(ic->ic_destination());
assert(cb != NULL, "destination not in CodeBlob?");
nmethod* nm = cb->as_nmethod_or_null();
if( nm != NULL ) {
// Verify that inline caches pointing to bad nmethods are clean
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
assert(ic->is_clean(), "IC should be clean");
}
}
break;
}
case relocInfo::static_call_type: {
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
CodeBlob *cb = CodeCache::find_blob(csc->destination());
assert(cb != NULL, "destination not in CodeBlob?");
nmethod* nm = cb->as_nmethod_or_null();
if( nm != NULL ) {
// Verify that inline caches pointing to bad nmethods are clean
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
assert(csc->is_clean(), "IC should be clean");
}
}
break;
}
default:
break;
}
}
}
void nmethod::mark_as_maybe_on_stack() {
Atomic::store(&_gc_epoch, CodeCache::gc_epoch());
}
bool nmethod::is_maybe_on_stack() {
// If the condition below is true, it means that the nmethod was found to
// be alive the previous completed marking cycle.
return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle();
}
void nmethod::inc_decompile_count() {
if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
// Could be gated by ProfileTraps, but do not bother...
Method* m = method();
if (m == NULL) return;
MethodData* mdo = m->method_data();
if (mdo == NULL) return;
// There is a benign race here. See comments in methodData.hpp.
mdo->inc_decompile_count();
}
bool nmethod::try_transition(int new_state_int) {
signed char new_state = new_state_int;
assert_lock_strong(CompiledMethod_lock);
signed char old_state = _state;
if (old_state >= new_state) {
// Ensure monotonicity of transitions.
return false;
}
Atomic::store(&_state, new_state);
return true;
}
void nmethod::invalidate_osr_method() {
assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
// Remove from list of active nmethods
if (method() != NULL) {
method()->method_holder()->remove_osr_nmethod(this);
}
}
void nmethod::log_state_change() const {
if (LogCompilation) {
if (xtty != NULL) {
ttyLocker ttyl; // keep the following output all in one block
xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'",
os::current_thread_id());
log_identity(xtty);
xtty->stamp();
xtty->end_elem();
}
}
CompileTask::print_ul(this, "made not entrant");
if (PrintCompilation) {
print_on(tty, "made not entrant");
}
}
void nmethod::unlink_from_method() {
if (method() != NULL) {
method()->unlink_code(this);
}
}
// Invalidate code
bool nmethod::make_not_entrant() {
// This can be called while the system is already at a safepoint which is ok
NoSafepointVerifier nsv;
if (is_unloading()) {
// If the nmethod is unloading, then it is already not entrant through
// the nmethod entry barriers. No need to do anything; GC will unload it.
return false;
}
if (Atomic::load(&_state) == not_entrant) {
// Avoid taking the lock if already in required state.
// This is safe from races because the state is an end-state,
// which the nmethod cannot back out of once entered.
// No need for fencing either.
return false;
}
{
// Enter critical section. Does not block for safepoint.
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
if (Atomic::load(&_state) == not_entrant) {
// another thread already performed this transition so nothing
// to do, but return false to indicate this.
return false;
}
if (is_osr_method()) {
// This logic is equivalent to the logic below for patching the
// verified entry point of regular methods.
// this effectively makes the osr nmethod not entrant
invalidate_osr_method();
} else {
// The caller can be calling the method statically or through an inline
// cache call.
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
SharedRuntime::get_handle_wrong_method_stub());
}
if (update_recompile_counts()) {
// Mark the method as decompiled.
inc_decompile_count();
}
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
// If nmethod entry barriers are not supported, we won't mark
// nmethods as on-stack when they become on-stack. So we
// degrade to a less accurate flushing strategy, for now.
mark_as_maybe_on_stack();
}
// Change state
bool success = try_transition(not_entrant);
assert(success, "Transition can't fail");
// Log the transition once
log_state_change();
// Remove nmethod from method.
unlink_from_method();
} // leave critical region under CompiledMethod_lock
#if INCLUDE_JVMCI
// Invalidate can't occur while holding the Patching lock
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
if (nmethod_data != NULL) {
nmethod_data->invalidate_nmethod_mirror(this);
}
#endif
#ifdef ASSERT
if (is_osr_method() && method() != NULL) {
// Make sure osr nmethod is invalidated, i.e. not on the list
bool found = method()->method_holder()->remove_osr_nmethod(this);
assert(!found, "osr nmethod should have been invalidated");
}
#endif
return true;
}
// For concurrent GCs, there must be a handshake between unlink and flush
void nmethod::unlink() {
if (_unlinked_next != NULL) {
// Already unlinked. It can be invoked twice because concurrent code cache
// unloading might need to restart when inline cache cleaning fails due to
// running out of ICStubs, which can only be refilled at safepoints
return;
}
flush_dependencies();
// unlink_from_method will take the CompiledMethod_lock.
// In this case we don't strictly need it when unlinking nmethods from
// the Method, because it is only concurrently unlinked by
// the entry barrier, which acquires the per nmethod lock.
unlink_from_method();
clear_ic_callsites();
if (is_osr_method()) {
invalidate_osr_method();
}
#if INCLUDE_JVMCI
// Clear the link between this nmethod and a HotSpotNmethod mirror
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
if (nmethod_data != NULL) {
nmethod_data->invalidate_nmethod_mirror(this);
}
#endif
// Post before flushing as jmethodID is being used
post_compiled_method_unload();
// Register for flushing when it is safe. For concurrent class unloading,
// that would be after the unloading handshake, and for STW class unloading
// that would be when getting back to the VM thread.
CodeCache::register_unlinked(this);
}
void nmethod::flush() {
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// completely deallocate this method
Events::log(Thread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
"/Free CodeCache:" SIZE_FORMAT "Kb",
is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
// We need to deallocate any ExceptionCache data.
// Note that we do not need to grab the nmethod lock for this, it
// better be thread safe if we're disposing of it!
ExceptionCache* ec = exception_cache();
while(ec != NULL) {
ExceptionCache* next = ec->next();
delete ec;
ec = next;
}
Universe::heap()->unregister_nmethod(this);
CodeCache::unregister_old_nmethod(this);
CodeBlob::flush();
CodeCache::free(this);
}
oop nmethod::oop_at(int index) const {
if (index == 0) {
return NULL;
}
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(oop_addr_at(index));
}
oop nmethod::oop_at_phantom(int index) const {
if (index == 0) {
return NULL;
}
return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(oop_addr_at(index));
}
//
// Notify all classes this nmethod is dependent on that it is no
// longer dependent.
void nmethod::flush_dependencies() {
if (!has_flushed_dependencies()) {
set_has_flushed_dependencies();
for (Dependencies::DepStream deps(this); deps.next(); ) {
if (deps.type() == Dependencies::call_site_target_value) {
// CallSite dependencies are managed on per-CallSite instance basis.
oop call_site = deps.argument_oop(0);
MethodHandles::clean_dependency_context(call_site);
} else {
Klass* klass = deps.context_type();
if (klass == NULL) {
continue; // ignore things like evol_method
}
// During GC liveness of dependee determines class that needs to be updated.
// The GC may clean dependency contexts concurrently and in parallel.
InstanceKlass::cast(klass)->clean_dependency_context();
}
}
}
}
void nmethod::post_compiled_method(CompileTask* task) {
task->mark_success();
task->set_nm_content_size(content_size());
task->set_nm_insts_size(insts_size());
task->set_nm_total_size(total_size());
// JVMTI -- compiled method notification (must be done outside lock)
post_compiled_method_load_event();
if (CompilationLog::log() != NULL) {
CompilationLog::log()->log_nmethod(JavaThread::current(), this);
}
const DirectiveSet* directive = task->directive();
maybe_print_nmethod(directive);
}
// ------------------------------------------------------------------
// post_compiled_method_load_event
// new method for install_code() path
// Transfer information from compilation to jvmti
void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
// This is a bad time for a safepoint. We don't want
// this nmethod to get unloaded while we're queueing the event.
NoSafepointVerifier nsv;
Method* m = method();
HOTSPOT_COMPILED_METHOD_LOAD(
(char *) m->klass_name()->bytes(),
m->klass_name()->utf8_length(),
(char *) m->name()->bytes(),
m->name()->utf8_length(),
(char *) m->signature()->bytes(),
m->signature()->utf8_length(),
insts_begin(), insts_size());
if (JvmtiExport::should_post_compiled_method_load()) {
// Only post unload events if load events are found.
set_load_reported();
// If a JavaThread hasn't been passed in, let the Service thread
// (which is a real Java thread) post the event
JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
if (state == NULL) {
// Execute any barrier code for this nmethod as if it's called, since
// keeping it alive looks like stack walking.
run_nmethod_entry_barrier();
ServiceThread::enqueue_deferred_event(&event);
} else {
// This enters the nmethod barrier outside in the caller.
state->enqueue_event(&event);
}
}
}
void nmethod::post_compiled_method_unload() {
assert(_method != NULL, "just checking");
DTRACE_METHOD_UNLOAD_PROBE(method());
// If a JVMTI agent has enabled the CompiledMethodUnload event then
// post the event. The Method* will not be valid when this is freed.
// Don't bother posting the unload if the load event wasn't posted.
if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
JvmtiDeferredEvent event =
JvmtiDeferredEvent::compiled_method_unload_event(
method()->jmethod_id(), insts_begin());
ServiceThread::enqueue_deferred_event(&event);
}
}
// Iterate over metadata calling this function. Used by RedefineClasses
void nmethod::metadata_do(MetadataClosure* f) {
{
// Visit all immediate references that are embedded in the instruction stream.
RelocIterator iter(this, oops_reloc_begin());
while (iter.next()) {
if (iter.type() == relocInfo::metadata_type) {
metadata_Relocation* r = iter.metadata_reloc();
// In this metadata, we must only follow those metadatas directly embedded in
// the code. Other metadatas (oop_index>0) are seen as part of
// the metadata section below.
assert(1 == (r->metadata_is_immediate()) +
(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
"metadata must be found in exactly one place");
if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
Metadata* md = r->metadata_value();
if (md != _method) f->do_metadata(md);
}
} else if (iter.type() == relocInfo::virtual_call_type) {
// Check compiledIC holders associated with this nmethod
ResourceMark rm;
CompiledIC *ic = CompiledIC_at(&iter);
if (ic->is_icholder_call()) {
CompiledICHolder* cichk = ic->cached_icholder();
f->do_metadata(cichk->holder_metadata());
f->do_metadata(cichk->holder_klass());
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
f->do_metadata(ic_oop);
}
}
}
}
}
// Visit the metadata section
for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
Metadata* md = *p;
f->do_metadata(md);
}
// Visit metadata not embedded in the other places.
if (_method != NULL) f->do_metadata(_method);
}
// Heuristic for nuking nmethods even though their oops are live.
// Main purpose is to reduce code cache pressure and get rid of
// nmethods that don't seem to be all that relevant any longer.
bool nmethod::is_cold() {
if (!MethodFlushing || is_native_method() || is_not_installed()) {
// No heuristic unloading at all
return false;
}
if (!is_maybe_on_stack() && is_not_entrant()) {
// Not entrant nmethods that are not on any stack can just
// be removed
return true;
}
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == nullptr || !bs_nm->supports_entry_barrier(this)) {
// On platforms that don't support nmethod entry barriers, we can't
// trust the temporal aspect of the gc epochs. So we can't detect
// cold nmethods on such platforms.
return false;
}
if (!UseCodeCacheFlushing) {
// Bail out if we don't heuristically remove nmethods
return false;
}
// Other code can be phased out more gradually after N GCs
return CodeCache::previous_completed_gc_marking_cycle() > _gc_epoch + 2 * CodeCache::cold_gc_count();
}
// The _is_unloading_state encodes a tuple comprising the unloading cycle
// and the result of IsUnloadingBehaviour::is_unloading() for that cycle.
// This is the bit layout of the _is_unloading_state byte: 00000CCU
// CC refers to the cycle, which has 2 bits, and U refers to the result of
// IsUnloadingBehaviour::is_unloading() for that unloading cycle.
class IsUnloadingState: public AllStatic {
static const uint8_t _is_unloading_mask = 1;
static const uint8_t _is_unloading_shift = 0;
static const uint8_t _unloading_cycle_mask = 6;
static const uint8_t _unloading_cycle_shift = 1;
static uint8_t set_is_unloading(uint8_t state, bool value) {
state &= ~_is_unloading_mask;
if (value) {
state |= 1 << _is_unloading_shift;
}
assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
return state;
}
static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
state &= ~_unloading_cycle_mask;
state |= value << _unloading_cycle_shift;
assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
return state;
}
public:
static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
uint8_t state = 0;
state = set_is_unloading(state, is_unloading);
state = set_unloading_cycle(state, unloading_cycle);
return state;
}
};
bool nmethod::is_unloading() {
uint8_t state = RawAccess<MO_RELAXED>::load(&_is_unloading_state);
bool state_is_unloading = IsUnloadingState::is_unloading(state);
if (state_is_unloading) {
return true;
}
uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
uint8_t current_cycle = CodeCache::unloading_cycle();
if (state_unloading_cycle == current_cycle) {
return false;
}
// The IsUnloadingBehaviour is responsible for calculating if the nmethod
// should be unloaded. This can be either because there is a dead oop,
// or because is_cold() heuristically determines it is time to unload.
state_unloading_cycle = current_cycle;
state_is_unloading = IsUnloadingBehaviour::is_unloading(this);
uint8_t new_state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
// Note that if an nmethod has dead oops, everyone will agree that the
// nmethod is_unloading. However, the is_cold heuristics can yield
// different outcomes, so we guard the computed result with a CAS
// to ensure all threads have a shared view of whether an nmethod
// is_unloading or not.
uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed);
if (found_state == state) {
// First to change state, we win
return state_is_unloading;
} else {
// State already set, so use it
return IsUnloadingState::is_unloading(found_state);
}
}
void nmethod::clear_unloading_state() {
uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
RawAccess<MO_RELAXED>::store(&_is_unloading_state, state);
}
// This is called at the end of the strong tracing/marking phase of a
// GC to unload an nmethod if it contains otherwise unreachable
// oops or is heuristically found to be not important.
void nmethod::do_unloading(bool unloading_occurred) {
// Make sure the oop's ready to receive visitors
if (is_unloading()) {
unlink();
} else {
guarantee(unload_nmethod_caches(unloading_occurred),
"Should not need transition stubs");
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
bs_nm->disarm(this);
}
}
}
void nmethod::oops_do(OopClosure* f, bool allow_dead) {
// Prevent extra code cache walk for platforms that don't have immediate oops.
if (relocInfo::mustIterateImmediateOopsInCode()) {
RelocIterator iter(this, oops_reloc_begin());
while (iter.next()) {
if (iter.type() == relocInfo::oop_type ) {
oop_Relocation* r = iter.oop_reloc();
// In this loop, we must only follow those oops directly embedded in
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
assert(1 == (r->oop_is_immediate()) +
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
"oop must be found in exactly one place");
if (r->oop_is_immediate() && r->oop_value() != NULL) {
f->do_oop(r->oop_addr());
}
}
}
}
// Scopes
// This includes oop constants not inlined in the code stream.
for (oop* p = oops_begin(); p < oops_end(); p++) {
if (*p == Universe::non_oop_word()) continue; // skip non-oops
f->do_oop(p);
}
}
void nmethod::follow_nmethod(OopIterateClosure* cl) {
// Process oops in the nmethod
oops_do(cl);
// CodeCache unloading support
mark_as_maybe_on_stack();
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
bs_nm->disarm(this);
// There's an assumption made that this function is not used by GCs that
// relocate objects, and therefore we don't call fix_oop_relocations.
}
nmethod* volatile nmethod::_oops_do_mark_nmethods;
void nmethod::oops_do_log_change(const char* state) {
LogTarget(Trace, gc, nmethod) lt;
if (lt.is_enabled()) {
LogStream ls(lt);
CompileTask::print(&ls, this, state, true /* short_form */);
}
}
bool nmethod::oops_do_try_claim() {
if (oops_do_try_claim_weak_request()) {
nmethod* result = oops_do_try_add_to_list_as_weak_done();
assert(result == NULL, "adding to global list as weak done must always succeed.");
return true;
}
return false;
}
bool nmethod::oops_do_try_claim_weak_request() {
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.78 Sekunden
(vorverarbeitet)
¤
|
Haftungshinweis
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.
|