/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
class CompileTask; class DepChange; class DirectiveSet; class DebugInformationRecorder; class JvmtiThreadState; class OopIterateClosure;
// nmethods (native methods) are the compiled code versions of Java methods. // // An nmethod contains: // - header (the nmethod structure) // [Relocation] // - relocation information // - constant part (doubles, longs and floats used in nmethod) // - oop table // [Code] // - code body // - exception handler // - stub code // [Debugging information] // - oop array // - data array // - pcs // [Exception handler table] // - handler entry point array // [Implicit Null Pointer exception table] // - implicit null table array // [Speculations] // - encoded speculations array // [JVMCINMethodData] // - meta data for JVMCI compiled nmethod
#if INCLUDE_JVMCI class FailedSpeculation; class JVMCINMethodData; #endif
class nmethod : public CompiledMethod { friendclass VMStructs; friendclass JVMCIVMStructs; friendclass CodeCache; // scavengable oops friendclass JVMCINMethodData;
private:
uint64_t _gc_epoch;
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
// STW two-phase nmethod root processing helpers. // // When determining liveness of a given nmethod to do code cache unloading, // some collectors need to do different things depending on whether the nmethods // need to absolutely be kept alive during root processing; "strong"ly reachable // nmethods are known to be kept alive at root processing, but the liveness of // "weak"ly reachable ones is to be determined later. // // We want to allow strong and weak processing of nmethods by different threads // at the same time without heavy synchronization. Additional constraints are // to make sure that every nmethod is processed a minimal amount of time, and // nmethods themselves are always iterated at most once at a particular time. // // Note that strong processing work must be a superset of weak processing work // for this code to work. // // We store state and claim information in the _oops_do_mark_link member, using // the two LSBs for the state and the remaining upper bits for linking together // nmethods that were already visited. // The last element is self-looped, i.e. points to itself to avoid some special // "end-of-list" sentinel value. // // _oops_do_mark_link special values: // // _oops_do_mark_link == NULL: the nmethod has not been visited at all yet, i.e. // is Unclaimed. // // For other values, its lowest two bits indicate the following states of the nmethod: // // weak_request (WR): the nmethod has been claimed by a thread for weak processing // weak_done (WD): weak processing has been completed for this nmethod. // strong_request (SR): the nmethod has been found to need strong processing while // being weak processed. // strong_done (SD): strong processing has been completed for this nmethod . // // The following shows the _only_ possible progressions of the _oops_do_mark_link // pointer. // // Given // N as the nmethod // X the current next value of _oops_do_mark_link // // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by // a single thread. // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been // completed (as above) another thread found that the nmethod needs strong // processing after all. // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another // thread finds that the nmethod needs strong processing, marks it as such and // terminates. The original thread completes strong processing. // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from // the beginning by a single thread. // // "|" describes the concatenation of bits in _oops_do_mark_link. // // The diagram also describes the threads responsible for changing the nmethod to // the next state by marking the _transition_ with (C) and (O), which mean "current" // and "other" thread respectively. // struct oops_do_mark_link; // Opaque data type.
// States used for claiming nmethods during root processing. staticconst uint claim_weak_request_tag = 0; staticconst uint claim_weak_done_tag = 1; staticconst uint claim_strong_request_tag = 2; staticconst uint claim_strong_done_tag = 3;
static oops_do_mark_link* mark_link(nmethod* nm, uint tag) {
assert(tag <= claim_strong_done_tag, "invalid tag %u", tag);
assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB"); return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag);
}
// offsets for entry points
address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check
address _osr_entry_point; // entry point for on stack replacement
nmethod* _unlinked_next;
// Shared fields for all nmethod's int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
// Offsets for different nmethod parts int _exception_offset; // Offset of the unwind handler if it exists int _unwind_handler_offset;
int _consts_offset; int _stub_offset; int _oops_offset; // offset to where embedded oop table begins (inside data) int _metadata_offset; // embedded meta data table int _scopes_data_offset; int _scopes_pcs_offset; int _dependencies_offset; int _handler_table_offset; int _nul_chk_table_offset; #if INCLUDE_JVMCI int _speculations_offset; int _jvmci_data_offset; #endif int _nmethod_end_offset;
int code_offset() const { return (address) code_begin() - header_begin(); }
// location in frame (offset for sp) that deopt can store the original // pc during a deopt. int _orig_pc_offset;
int _compile_id; // which compilation made this nmethod
#if INCLUDE_RTM_OPT // RTM state at compile time. Used during deoptimization to decide // whether to restart collecting RTM locking abort statistic again.
RTMState _rtm_state; #endif
// These are used for compiled synchronized native methods to // locate the owner and stack slot for the BasicLock. They are // needed because there is no debug information for compiled native // wrappers and the oop maps are insufficient to allow // frame::retrieve_receiver() to work. Currently they are expected // to be byte offsets from the Java stack pointer for maximum code // sharing between platforms. JVMTI's GetLocalInstance() uses these // offsets to find the receiver for non-static native wrapper frames.
ByteSize _native_receiver_sp_offset;
ByteSize _native_basic_lock_sp_offset;
CompLevel _comp_level; // compilation level
// Local state used to keep track of whether unloading is happening or not volatile uint8_t _is_unloading_state;
// protected by CodeCache_lock bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
// used by jvmti to track if an event has been posted for this nmethod. bool _load_reported;
// For native wrappers
nmethod(Method* method,
CompilerType type, int nmethod_size, int compile_id,
CodeOffsets* offsets,
CodeBuffer *code_buffer, int frame_size,
ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */
ByteSize basic_lock_sp_offset, /* synchronized natives only */
OopMapSet* oop_maps);
// Creation support
nmethod(Method* method,
CompilerType type, int nmethod_size, int compile_id, int entry_bci,
CodeOffsets* offsets, int orig_pc_offset,
DebugInformationRecorder *recorder,
Dependencies* dependencies,
CodeBuffer *code_buffer, int frame_size,
OopMapSet* oop_maps,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
CompLevel comp_level #if INCLUDE_JVMCI
, char* speculations, int speculations_len, int jvmci_data_size #endif
);
// helper methods void* operatornew(size_t size, int nmethod_size, int comp_level) throw(); // For method handle intrinsics: Try MethodNonProfiled, MethodProfiled and NonNMethod. // Attention: Only allow NonNMethod space for special nmethods which don't need to be // findable by nmethod iterators! In particular, they must not contain oops! void* operatornew(size_t size, int nmethod_size, bool allow_NonNMethod_space) throw();
// Returns true if this thread changed the state of the nmethod or // false if another thread performed the transition. bool make_entrant() { Unimplemented(); returnfalse; } void inc_decompile_count();
// Inform external interfaces that a compiled method has been unloaded void post_compiled_method_unload();
// Initialize fields to their default values void init_defaults();
// Offsets int content_offset() const { return content_begin() - header_begin(); } int data_offset() const { return _data_offset; }
public: // create nmethod with entry_bci static nmethod* new_nmethod(const methodHandle& method, int compile_id, int entry_bci,
CodeOffsets* offsets, int orig_pc_offset,
DebugInformationRecorder* recorder,
Dependencies* dependencies,
CodeBuffer *code_buffer, int frame_size,
OopMapSet* oop_maps,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
CompLevel comp_level #if INCLUDE_JVMCI
, char* speculations = NULL, int speculations_len = 0, int nmethod_mirror_index = -1, constchar* nmethod_mirror_name = NULL,
FailedSpeculation** failed_speculations = NULL #endif
);
// Only used for unit tests.
nmethod()
: CompiledMethod(),
_native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1)),
_is_unloading_state(0) {}
static nmethod* new_native_nmethod(const methodHandle& method, int compile_id,
CodeBuffer *code_buffer, int vep_offset, int frame_complete, int frame_size,
ByteSize receiver_sp_offset,
ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps, int exception_handler = -1);
// type info bool is_nmethod() const { returntrue; } bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
bool make_in_use() { return try_transition(in_use);
} // Make the nmethod non entrant. The nmethod will continue to be // alive. It is used when an uncommon trap happens. Returns true // if this thread changed the state of the nmethod or false if // another thread performed the transition. bool make_not_entrant(); bool make_not_used() { return make_not_entrant(); }
// Support for oops in scopes and relocs: // Note: index 0 is reserved for null.
oop oop_at(int index) const;
oop oop_at_phantom(int index) const; // phantom reference
oop* oop_addr_at(int index) const { // for GC // relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); return &oops_begin()[index - 1];
}
// Support for meta data in scopes and relocs: // Note: index 0 is reserved for null.
Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); }
Metadata** metadata_addr_at(int index) const { // for GC // relocation indexes are biased by 1 (because 0 is reserved)
assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); return &metadata_begin()[index - 1];
}
// Verify calls to dead methods have been cleaned. void verify_clean_inline_caches();
// Unlink this nmethod from the system void unlink();
// Deallocate this nmethod - called by the GC void flush();
// See comment at definition of _last_seen_on_stack void mark_as_maybe_on_stack(); bool is_maybe_on_stack();
// Evolution support. We make old (discarded) compiled methods point to new Method*s. void set_method(Method* method) { _method = method; }
#if INCLUDE_JVMCI // Gets the JVMCI name of this nmethod. constchar* jvmci_name();
// Records the pending failed speculation in the // JVMCI speculation log associated with this nmethod. void update_speculation(JavaThread* thread);
// Gets the data specific to a JVMCI compiled method. // This returns a non-NULL value iff this nmethod was // compiled by the JVMCI compiler.
JVMCINMethodData* jvmci_nmethod_data() const { return jvmci_data_size() == 0 ? NULL : (JVMCINMethodData*) jvmci_data_begin();
} #endif
// All-in-one claiming of nmethods: returns true if the caller successfully claimed that // nmethod. bool oops_do_try_claim();
// Loom support for following nmethods on the stack void follow_nmethod(OopIterateClosure* cl);
// Class containing callbacks for the oops_do_process_weak/strong() methods // below. class OopsDoProcessor { public: // Process the oops of the given nmethod based on whether it has been called // in a weak or strong processing context, i.e. apply either weak or strong // work on it. virtualvoid do_regular_processing(nmethod* nm) = 0; // Assuming that the oops of the given nmethod has already been its weak // processing applied, apply the remaining strong processing part. virtualvoid do_remaining_strong_processing(nmethod* nm) = 0;
};
// The following two methods do the work corresponding to weak/strong nmethod // processing. void oops_do_process_weak(OopsDoProcessor* p); void oops_do_process_strong(OopsDoProcessor* p);
// used by jvmti to track if the load events has been reported bool load_reported() const { return _load_reported; } void set_load_reported() { _load_reported = true; }
public: // copying of debugging information void copy_scopes_pcs(PcDesc* pcs, int count); void copy_scopes_data(address buffer, int size);
int orig_pc_offset() { return _orig_pc_offset; }
// Post successful compilation void post_compiled_method(CompileTask* task);
// jvmti support: void post_compiled_method_load_event(JvmtiThreadState* state = NULL);
// Disassemble this nmethod with additional debug information, e.g. information about blocks. void decode2(outputStream* st) const; void print_constant_pool(outputStream* st);
// Avoid hiding of parent's 'decode(outputStream*)' method. void decode(outputStream* st) const { decode2(st); } // just delegate here.
// need to re-define this from CodeBlob else the overload hides it virtualvoid print_on(outputStream* st) const { CodeBlob::print_on(st); } void print_on(outputStream* st, constchar* msg) const;
// returns whether this nmethod has code comments. bool has_code_comment(address begin, address end); // Prints a comment for one native instruction (reloc info, pc desc) void print_code_comment_on(outputStream* st, int column, address begin, address end);
// Compiler task identification. Note that all OSR methods // are numbered in an independent sequence if CICountOSR is true, // and native method wrappers are also numbered independently if // CICountNative is true. virtualint compile_id() const { return _compile_id; } constchar* compile_kind() const;
// tells if any of this method's dependencies have been invalidated // (this is expensive!) staticvoid check_all_dependencies(DepChange& changes);
// tells if this compiled method is dependent on the given changes, // and the changes have invalidated it bool check_dependency_on(DepChange& changes);
// Fast breakpoint support. Tells if this compiled method is // dependent on the given method. Returns true if this nmethod // corresponds to the given method as well. virtualbool is_dependent_on_method(Method* dependee);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.