SSL sharedRuntime.cpp
Interaktion und PortierbarkeitC
/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
#ifndef PRODUCT // For statistics int SharedRuntime::_ic_miss_ctr = 0; int SharedRuntime::_wrong_method_ctr = 0; int SharedRuntime::_resolve_static_ctr = 0; int SharedRuntime::_resolve_virtual_ctr = 0; int SharedRuntime::_resolve_opt_virtual_ctr = 0; int SharedRuntime::_implicit_null_throws = 0; int SharedRuntime::_implicit_div0_throws = 0;
int SharedRuntime::_new_instance_ctr=0; int SharedRuntime::_new_array_ctr=0; int SharedRuntime::_multi2_ctr=0; int SharedRuntime::_multi3_ctr=0; int SharedRuntime::_multi4_ctr=0; int SharedRuntime::_multi5_ctr=0; int SharedRuntime::_mon_enter_stub_ctr=0; int SharedRuntime::_mon_exit_stub_ctr=0; int SharedRuntime::_mon_enter_ctr=0; int SharedRuntime::_mon_exit_ctr=0; int SharedRuntime::_partial_subtype_ctr=0; int SharedRuntime::_jbyte_array_copy_ctr=0; int SharedRuntime::_jshort_array_copy_ctr=0; int SharedRuntime::_jint_array_copy_ctr=0; int SharedRuntime::_jlong_array_copy_ctr=0; int SharedRuntime::_oop_array_copy_ctr=0; int SharedRuntime::_checkcast_array_copy_ctr=0; int SharedRuntime::_unsafe_array_copy_ctr=0; int SharedRuntime::_generic_array_copy_ctr=0; int SharedRuntime::_slow_array_copy_ctr=0; int SharedRuntime::_find_handler_ctr=0; int SharedRuntime::_rethrow_ctr=0;
int SharedRuntime::_ICmiss_index = 0; int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
void SharedRuntime::trace_ic_miss(address at) { for (int i = 0; i < _ICmiss_index; i++) { if (_ICmiss_at[i] == at) {
_ICmiss_count[i]++; return;
}
} int index = _ICmiss_index++; if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
_ICmiss_at[index] = at;
_ICmiss_count[index] = 1;
}
void SharedRuntime::print_ic_miss_histogram() { if (ICMissHistogram) {
tty->print_cr("IC Miss Histogram:"); int tot_misses = 0; for (int i = 0; i < _ICmiss_index; i++) {
tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
tot_misses += _ICmiss_count[i];
}
tty->print_cr("Total IC misses: %7d", tot_misses);
}
} #endif// PRODUCT
JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x)) return x * y;
JRT_END
JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x)) if (x == min_jlong && y == CONST64(-1)) { return x;
} else { return x / y;
}
JRT_END
JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x)) if (x == min_jlong && y == CONST64(-1)) { return 0;
} else { return x % y;
}
JRT_END
// Overflow threshold is halffloat max value + 1/2 ulp if (abs_f >= (65504.0f + 16.0f)) { return (jshort)(sign_bit | 0x7c00); // Positive or negative infinity
}
// Smallest magnitude of Halffloat is 0x1.0p-24, half-way or smaller rounds to zero if (abs_f <= (pow(2, -24) * 0.5f)) { // Covers float zeros and subnormals. return sign_bit; // Positive or negative zero
}
// For binary16 subnormals, beside forcing exp to -15, retain // the difference exp_delta = E_min - exp. This is the excess // shift value, in addition to 13, to be used in the // computations below. Further the (hidden) msb with value 1 // in f must be involved as well
jint exp_delta = 0;
jint msb = 0x00000000; if (exp < -14) {
exp_delta = -14 - exp;
exp = -15;
msb = 0x00800000;
}
jint f_signif_bits = ((doppel & 0x007fffff) | msb);
// Significand bits as if using rounding to zero
jshort signif_bits = (jshort)(f_signif_bits >> (13 + exp_delta));
// Exception handling across interpreter/compiler boundaries // // exception_handler_for_return_address(...) returns the continuation address. // The continuation address is the entry point of the exception handler of the // previous frame depending on the return address.
address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) { // Note: This is called when we have unwound the frame of the callee that did // throw an exception. So far, no check has been performed by the StackWatermarkSet. // Notably, the stack is not walkable at this point, and hence the check must // be deferred until later. Specifically, any of the handlers returned here in // this function, will get dispatched to, and call deferred checks to // StackWatermarkSet::after_unwind at a point where the stack is walkable.
assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
#if INCLUDE_JVMCI // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear // and other exception handler continuations do not read it
current->set_exception_pc(NULL); #endif// INCLUDE_JVMCI
if (Continuation::is_return_barrier_entry(return_address)) { return StubRoutines::cont_returnBarrierExc();
}
// The fastest case first
CodeBlob* blob = CodeCache::find_blob(return_address);
CompiledMethod* nm = (blob != NULL) ? blob->as_compiled_method_or_null() : NULL; if (nm != NULL) { // Set flag if return address is a method handle call site.
current->set_is_method_handle_return(nm->is_method_handle_return(return_address)); // native nmethods don't have exception handlers
assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
assert(nm->header_begin() != nm->exception_begin(), "no exception handler"); if (nm->is_deopt_pc(return_address)) { // If we come here because of a stack overflow, the stack may be // unguarded. Reguard the stack otherwise if we return to the // deopt blob and the stack bang causes a stack overflow we // crash.
StackOverflow* overflow_state = current->stack_overflow_state(); bool guard_pages_enabled = overflow_state->reguard_stack_if_needed(); if (overflow_state->reserved_stack_activation() != current->stack_base()) {
overflow_state->set_reserved_stack_activation(current->stack_base());
}
assert(guard_pages_enabled, "stack banging in deopt blob may cause crash"); // The deferred StackWatermarkSet::after_unwind check will be performed in // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception) return SharedRuntime::deopt_blob()->unpack_with_exception();
} else { // The deferred StackWatermarkSet::after_unwind check will be performed in // * OptoRuntime::handle_exception_C_helper for C2 code // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code return nm->exception_begin();
}
}
// Entry code if (StubRoutines::returns_to_call_stub(return_address)) { // The deferred StackWatermarkSet::after_unwind check will be performed in // JavaCallWrapper::~JavaCallWrapper return StubRoutines::catch_exception_entry();
} if (blob != NULL && blob->is_upcall_stub()) { return ((UpcallStub*)blob)->exception_handler();
} // Interpreted code if (Interpreter::contains(return_address)) { // The deferred StackWatermarkSet::after_unwind check will be performed in // InterpreterRuntime::exception_handler_for_exception return Interpreter::rethrow_exception_entry();
}
guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
#ifndef PRODUCT
{ ResourceMark rm;
tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
os::print_location(tty, (intptr_t)return_address);
tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
tty->print_cr("b) other problem");
} #endif// PRODUCT
address SharedRuntime::get_poll_stub(address pc) {
address stub; // Look up the code blob
CodeBlob *cb = CodeCache::find_blob(pc);
// Should be an nmethod
guarantee(cb != NULL && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
// Look up the relocation information
assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc), "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
#ifdef ASSERT if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
Disassembler::decode(cb);
fatal("Only polling locations are used for safepoint");
} #endif
bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc); bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors(); if (at_poll_return) {
assert(SharedRuntime::polling_page_return_handler_blob() != NULL, "polling page return stub not created yet");
stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
} elseif (has_wide_vectors) {
assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != NULL, "polling page vectors safepoint stub not created yet");
stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
} else {
assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL, "polling page safepoint stub not created yet");
stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
}
log_debug(safepoint)("... found polling page %s exception at pc = "
INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
at_poll_return ? "return" : "loop",
(intptr_t)pc, (intptr_t)stub); return stub;
}
// The interpreter code to call this tracing function is only // called/generated when UL is on for redefine, class and has the right level // and tags. Since obsolete methods are never compiled, we don't have // to modify the compilers to generate calls to this function. //
JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
JavaThread* thread, Method* method)) if (method->is_obsolete()) { // We are calling an obsolete method, but this is not necessarily // an error. Our method could have been redefined just after we // fetched the Method* from the constant pool.
ResourceMark rm;
log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
} return 0;
JRT_END
// ret_pc points into caller; we are returning caller's exception handler // for given exception
address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address ret_pc, Handle& exception, bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
assert(cm != NULL, "must exist");
ResourceMark rm;
#if INCLUDE_JVMCI if (cm->is_compiled_by_jvmci()) { // lookup exception handler for this pc int catch_pco = ret_pc - cm->code_begin();
ExceptionHandlerTable table(cm);
HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0); if (t != NULL) { return cm->code_begin() + t->pco();
} else { return Deoptimization::deoptimize_for_missing_exception_handler(cm);
}
} #endif// INCLUDE_JVMCI
nmethod* nm = cm->as_nmethod();
ScopeDesc* sd = nm->scope_desc_at(ret_pc); // determine handler bci, if any
EXCEPTION_MARK;
int handler_bci = -1; int scope_depth = 0; if (!force_unwind) { int bci = sd->bci(); bool recursive_exception = false; do { bool skip_scope_increment = false; // exception handler lookup
Klass* ek = exception->klass();
methodHandle mh(THREAD, sd->method());
handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD); if (HAS_PENDING_EXCEPTION) {
recursive_exception = true; // We threw an exception while trying to find the exception handler. // Transfer the new exception to the exception handle which will // be set into thread local storage, and do another lookup for an // exception handler for this exception, this time starting at the // BCI of the exception handler which caused the exception to be // thrown (bugs 4307310 and 4546590). Set "exception" reference // argument to ensure that the correct exception is thrown (4870175).
recursive_exception_occurred = true;
exception = Handle(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION; if (handler_bci >= 0) {
bci = handler_bci;
handler_bci = -1;
skip_scope_increment = true;
}
} else {
recursive_exception = false;
} if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
sd = sd->sender(); if (sd != NULL) {
bci = sd->bci();
}
++scope_depth;
}
} while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != NULL));
}
// found handling method => lookup exception handler int catch_pco = ret_pc - nm->code_begin();
ExceptionHandlerTable table(nm);
HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth); if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) { // Allow abbreviated catch tables. The idea is to allow a method // to materialize its exceptions without committing to the exact // routing of exceptions. In particular this is needed for adding // a synthetic handler to unlock monitors when inlining // synchronized methods since the unlock path isn't represented in // the bytecodes.
t = table.entry_for(catch_pco, -1, 0);
}
JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current)) // These errors occur only at call sites
throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
JRT_END
JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current)) // These errors occur only at call sites
throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
JRT_END
JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
JRT_END
JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current)) // This entry point is effectively only used for NullPointerExceptions which occur at inline // cache sites (when the callee activation is not yet set up) so we are at a call site
throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), NULL);
JRT_END
void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) { // We avoid using the normal exception construction in this case because // it performs an upcall to Java, and we're already out of stack space.
JavaThread* THREAD = current; // For exception macros.
Klass* k = vmClasses::StackOverflowError_klass();
oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK); if (delayed) {
java_lang_Throwable::set_message(exception_oop,
Universe::delayed_stack_overflow_error_message());
}
Handle exception (current, exception_oop); if (StackTraceInThrowable) {
java_lang_Throwable::fill_in_stack_trace(exception);
} // Remove the ScopedValue bindings in case we got a // StackOverflowError while we were trying to remove ScopedValue // bindings.
current->clear_scopedValueBindings(); // Increment counter for hs_err file reporting
Atomic::inc(&Exceptions::_stack_overflow_errors);
throw_and_post_jvmti_exception(current, exception);
}
if (Interpreter::contains(pc)) { switch (exception_kind) { case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry(); case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry(); case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry(); default: ShouldNotReachHere();
}
} else { switch (exception_kind) { case STACK_OVERFLOW: { // Stack overflow only occurs upon frame setup; the callee is // going to be unwound. Dispatch to a shared runtime stub // which will cause the StackOverflowError to be fabricated // and processed. // Stack overflow should never occur during deoptimization: // the compiled method bangs the stack by as much as the // interpreter would need in case of a deoptimization. The // deoptimization blob and uncommon trap blob bang the stack // in a debug VM to verify the correctness of the compiled // method stack banging.
assert(current->deopt_mark() == NULL, "no stack overflow from deopt blob/uncommon trap");
Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc)); return StubRoutines::throw_StackOverflowError_entry();
}
case IMPLICIT_NULL: { if (VtableStubs::contains(pc)) { // We haven't yet entered the callee frame. Fabricate an // exception and begin dispatching it in the caller. Since // the caller was at a call site, it's safe to destroy all // caller-saved registers, as these entry points do.
VtableStub* vt_stub = VtableStubs::stub_containing(pc);
// If vt_stub is NULL, then return NULL to signal handler to report the SEGV error. if (vt_stub == NULL) return NULL;
if (vt_stub->is_abstract_method_error(pc)) {
assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc)); // Instead of throwing the abstract method error here directly, we re-resolve // and will throw the AbstractMethodError during resolve. As a result, we'll // get a more detailed error message. return SharedRuntime::get_handle_wrong_method_stub();
} else {
Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc)); // Assert that the signal comes from the expected location in stub code.
assert(vt_stub->is_null_pointer_exception(pc), "obtained signal from unexpected location in stub code"); return StubRoutines::throw_NullPointerException_at_call_entry();
}
} else {
CodeBlob* cb = CodeCache::find_blob(pc);
// If code blob is NULL, then return NULL to signal handler to report the SEGV error. if (cb == NULL) return NULL;
// Exception happened in CodeCache. Must be either: // 1. Inline-cache check in C2I handler blob, // 2. Inline-cache check in nmethod, or // 3. Implicit null exception in nmethod
if (!cb->is_compiled()) { bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(); if (!is_in_blob) { // Allow normal crash reporting to handle this return NULL;
}
Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc)); // There is no handler here, so we will simply unwind. return StubRoutines::throw_NullPointerException_at_call_entry();
}
// Otherwise, it's a compiled method. Consult its exception handlers.
CompiledMethod* cm = (CompiledMethod*)cb; if (cm->inlinecache_check_contains(pc)) { // exception happened inside inline-cache check code // => the nmethod is not yet active (i.e., the frame // is not set up yet) => use return address pushed by // caller => don't push another return address
Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc)); return StubRoutines::throw_NullPointerException_at_call_entry();
}
if (cm->method()->is_method_handle_intrinsic()) { // exception happened inside MH dispatch code, similar to a vtable stub
Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc)); return StubRoutines::throw_NullPointerException_at_call_entry();
}
#ifndef PRODUCT
_implicit_null_throws++; #endif
target_pc = cm->continuation_for_implicit_null_exception(pc); // If there's an unexpected fault, target_pc might be NULL, // in which case we want to fall through into the normal // error handling code.
}
break; // fall through
}
case IMPLICIT_DIVIDE_BY_ZERO: {
CompiledMethod* cm = CodeCache::find_compiled(pc);
guarantee(cm != NULL, "must have containing compiled method for implicit division-by-zero exceptions"); #ifndef PRODUCT
_implicit_div0_throws++; #endif
target_pc = cm->continuation_for_implicit_div0_exception(pc); // If there's an unexpected fault, target_pc might be NULL, // in which case we want to fall through into the normal // error handling code. break; // fall through
}
if (exception_kind == IMPLICIT_NULL) { #ifndef PRODUCT // for AbortVMOnException flag
Exceptions::debug_check_abort("java.lang.NullPointerException"); #endif//PRODUCT
Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
} else { #ifndef PRODUCT // for AbortVMOnException flag
Exceptions::debug_check_abort("java.lang.ArithmeticException"); #endif//PRODUCT
Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
} return target_pc;
}
ShouldNotReachHere(); return NULL;
}
/** * Throws an java/lang/UnsatisfiedLinkError. The address of this method is * installed in the native function entry of all native Java methods before * they get linked to their actual native methods. * * \note * This method actually never gets called! The reason is because * the interpreter's native entries call NativeLookup::lookup() which * throws the exception when the lookup fails. The exception is then * caught and forwarded on the return from NativeLookup::lookup() call * before the call to the native function. This might change in the future.
*/
JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
{ // We return a bad value here to make sure that the exception is // forwarded before we look at the return value.
THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
}
JNI_END
JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj)) #if INCLUDE_JVMCI if (!obj->klass()->has_finalizer()) { return;
} #endif// INCLUDE_JVMCI
assert(oopDesc::is_oop(obj), "must be a valid oop");
assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
JRT_END
jlong SharedRuntime::get_java_tid(JavaThread* thread) {
assert(thread != NULL, "No thread"); if (thread == NULL) { return 0;
}
guarantee(Thread::current() != thread || thread->is_oop_safe(), "current cannot touch oops after its GC barrier is detached.");
oop obj = thread->threadObj(); return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
}
/** * This function ought to be a void function, but cannot be because * it gets turned into a tail-call on sparc, which runs into dtrace bug * 6254741. Once that is fixed we can remove the dummy return value.
*/ int SharedRuntime::dtrace_object_alloc(oopDesc* o) { return dtrace_object_alloc(JavaThread::current(), o, o->size());
}
int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) { return dtrace_object_alloc(thread, o, o->size());
}
int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
assert(DTraceAllocProbes, "wrong call");
Klass* klass = o->klass();
Symbol* name = klass->name();
HOTSPOT_OBJECT_ALLOC(
get_java_tid(thread),
(char *) name->bytes(), name->utf8_length(), size * HeapWordSize); return 0;
}
// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode) // for a call current in progress, i.e., arguments has been pushed on stack // put callee has not been invoked yet. Used by: resolve virtual/static, // vtable updates, etc. Caller frame must be compiled.
Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
JavaThread* current = THREAD;
ResourceMark rm(current);
// last java frame on stack (which includes native call frames)
vframeStream vfst(current, true); // Do not skip and javaCalls
address pc = vfst.frame_pc();
{ // Get call instruction under lock because another thread may be busy patching it.
CompiledICLocker ic_locker(caller); return caller->attached_method_before_pc(pc);
} return NULL;
}
// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode // for a call current in progress, i.e., arguments has been pushed on stack // but callee has not been invoked yet. Caller frame must be compiled.
Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
CallInfo& callinfo, TRAPS) {
Handle receiver;
Handle nullHandle; // create a handy null handle for exception returns
JavaThread* current = THREAD;
assert(!vfst.at_end(), "Java frame must exist");
// Find caller and bci from vframe
methodHandle caller(current, vfst.method()); int bci = vfst.bci();
if (caller->is_continuation_enter_intrinsic()) {
bc = Bytecodes::_invokestatic;
LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH); return receiver;
}
Bytecode_invoke bytecode(caller, bci); int bytecode_index = bytecode.index();
bc = bytecode.invoke_code();
methodHandle attached_method(current, extract_attached_method(vfst)); if (attached_method.not_null()) {
Method* callee = bytecode.static_target(CHECK_NH);
vmIntrinsics::ID id = callee->intrinsic_id(); // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call, // it attaches statically resolved method to the call site. if (MethodHandles::is_signature_polymorphic(id) &&
MethodHandles::is_signature_polymorphic_intrinsic(id)) {
bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
// Adjust invocation mode according to the attached method. switch (bc) { case Bytecodes::_invokevirtual: if (attached_method->method_holder()->is_interface()) {
bc = Bytecodes::_invokeinterface;
} break; case Bytecodes::_invokeinterface: if (!attached_method->method_holder()->is_interface()) {
bc = Bytecodes::_invokevirtual;
} break; case Bytecodes::_invokehandle: if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
bc = attached_method->is_static() ? Bytecodes::_invokestatic
: Bytecodes::_invokevirtual;
} break; default: break;
}
}
}
bool has_receiver = bc != Bytecodes::_invokestatic &&
bc != Bytecodes::_invokedynamic &&
bc != Bytecodes::_invokehandle;
// Find receiver for non-static call if (has_receiver) { // This register map must be update since we need to find the receiver for // compiled frames. The receiver might be in a register.
RegisterMap reg_map2(current,
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
frame stubFrame = current->last_frame(); // Caller-frame is a compiled frame
frame callerFrame = stubFrame.sender(®_map2);
if (attached_method.is_null()) {
Method* callee = bytecode.static_target(CHECK_NH); if (callee == NULL) {
THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
}
}
// Retrieve from a compiled argument list
receiver = Handle(current, callerFrame.retrieve_receiver(®_map2));
assert(oopDesc::is_oop_or_null(receiver()), "");
if (receiver.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
}
}
#ifdef ASSERT // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls if (has_receiver) {
assert(receiver.not_null(), "should have thrown exception");
Klass* receiver_klass = receiver->klass();
Klass* rk = NULL; if (attached_method.not_null()) { // In case there's resolved method attached, use its holder during the check.
rk = attached_method->method_holder();
} else { // Klass is already loaded.
constantPoolHandle constants(current, caller->constants());
rk = constants->klass_ref_at(bytecode_index, CHECK_NH);
}
Klass* static_receiver_klass = rk;
assert(receiver_klass->is_subtype_of(static_receiver_klass), "actual receiver must be subclass of static receiver klass"); if (receiver_klass->is_instance_klass()) { if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
tty->print_cr("ERROR: Klass not yet initialized!!");
receiver_klass->print();
}
assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
}
} #endif
return receiver;
}
methodHandle SharedRuntime::find_callee_method(TRAPS) {
JavaThread* current = THREAD;
ResourceMark rm(current); // We need first to check if any Java activations (compiled, interpreted) // exist on the stack since last JavaCall. If not, we need // to get the target method from the JavaCall wrapper.
vframeStream vfst(current, true); // Do not skip any javaCalls
methodHandle callee_method; if (vfst.at_end()) { // No Java frames were found on stack since we did the JavaCall. // Hence the stack can only contain an entry_frame. We need to // find the target method from the stub frame.
RegisterMap reg_map(current,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
frame fr = current->last_frame();
assert(fr.is_runtime_frame(), "must be a runtimeStub");
fr = fr.sender(®_map);
assert(fr.is_entry_frame(), "must be"); // fr is now pointing to the entry frame.
callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
} else {
Bytecodes::Code bc;
CallInfo callinfo;
find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
callee_method = methodHandle(current, callinfo.selected_method());
}
assert(callee_method()->is_method(), "must be"); return callee_method;
}
// Resolves a call.
methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
methodHandle callee_method;
callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD); if (JvmtiExport::can_hotswap_or_post_breakpoint()) { int retry_count = 0; while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
callee_method->method_holder() != vmClasses::Object_klass()) { // If has a pending exception then there is no need to re-try to // resolve this method. // If the method has been redefined, we need to try again. // Hack: we have no way to update the vtables of arrays, so don't // require that java.lang.Object has been updated.
// It is very unlikely that method is redefined more than 100 times // in the middle of resolve. If it is looping here more than 100 times // means then there could be a bug here.
guarantee((retry_count++ < 100), "Could not resolve to latest version of redefined method"); // method is redefined in the middle of resolve so re-try.
callee_method = resolve_sub_helper(is_virtual, is_optimized, THREAD);
}
} return callee_method;
}
// This fails if resolution required refilling of IC stubs bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
CompiledMethod* caller_nm, bool is_virtual, bool is_optimized,
Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
StaticCallInfo static_call_info;
CompiledICInfo virtual_call_info;
// Make sure the callee nmethod does not get deoptimized and removed before // we are done patching the code.
CompiledMethod* callee = callee_method->code();
if (callee != NULL) {
assert(callee->is_compiled(), "must be nmethod for patching");
}
if (callee != NULL && !callee->is_in_use()) { // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
callee = NULL;
} #ifdef ASSERT
address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif
// grab lock, check for deoptimization and potentially patch caller
{
CompiledICLocker ml(caller_nm);
// Lock blocks for safepoint during which both nmethods can change state.
// Now that we are ready to patch if the Method* was redefined then // don't update call site and let the caller retry. // Don't update call site if callee nmethod was unloaded or deoptimized. // Don't update call site if callee nmethod was replaced by an other nmethod // which may happen when multiply alive nmethod (tiered compilation) // will be supported. if (!callee_method->is_old() &&
(callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
NoSafepointVerifier nsv; #ifdef ASSERT // We must not try to patch to jump to an already unloaded method. if (dest_entry_point != 0) {
CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), "should not call unloaded nmethod");
} #endif if (is_virtual) {
CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc()); if (inline_cache->is_clean()) { if (!inline_cache->set_to_monomorphic(virtual_call_info)) { returnfalse;
}
}
} else { if (VM_Version::supports_fast_class_init_checks() &&
invoke_code == Bytecodes::_invokestatic &&
callee_method->needs_clinit_barrier() &&
callee != NULL && callee->is_compiled_by_jvmci()) { returntrue; // skip patching for JVMCI
}
CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
} if (ssc->is_clean()) ssc->set(static_call_info);
}
}
} // unlock CompiledICLocker returntrue;
}
// Resolves a call. The compilers generate code for calls that go here // and are patched with the real destination of the call.
methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, TRAPS) {
JavaThread* current = THREAD;
ResourceMark rm(current);
RegisterMap cbl_map(current,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
frame caller_frame = current->last_frame().sender(&cbl_map);
CodeBlob* caller_cb = caller_frame.cb();
guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
// determine call info & receiver // note: a) receiver is NULL for static calls // b) an exception is thrown if receiver is NULL for non-static calls
CallInfo call_info;
Bytecodes::Code invoke_code = Bytecodes::_illegal;
Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
methodHandle callee_method(current, call_info.selected_method());
if (invoke_code == Bytecodes::_invokestatic) {
assert(callee_method->method_holder()->is_initialized() ||
callee_method->method_holder()->is_init_thread(current), "invalid class initialization state for invoke_static"); if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) { // In order to keep class initialization check, do not patch call // site for static call when the class is not fully initialized. // Proper check is enforced by call site re-resolution on every invocation. // // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true), // explicit class initialization check is put in nmethod entry (VEP).
assert(callee_method->method_holder()->is_linked(), "must be"); return callee_method;
}
}
// JSR 292 key invariant: // If the resolved method is a MethodHandle invoke target, the call // site must be a MethodHandle call site, because the lambda form might tail-call // leaving the stack in a state unknown to either caller or callee // TODO detune for now but we might need it again // assert(!callee_method->is_compiled_lambda_form() || // caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
// Compute entry points. This might require generation of C2I converter // frames, so we cannot be holding any locks here. Furthermore, the // computation of the entry points is independent of patching the call. We // always return the entry-point, but we only patch the stub if the call has // not been deoptimized. Return values: For a virtual call this is an // (cached_oop, destination address) pair. For a static call/optimized // virtual this is just a destination address.
// Patching IC caches may fail if we run out if transition stubs. // We refill the ic stubs then and try again. for (;;) {
ICRefillVerifier ic_refill_verifier; bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
is_virtual, is_optimized, receiver,
call_info, invoke_code, CHECK_(methodHandle())); if (successful) { return callee_method;
} else {
InlineCacheBuffer::refill_ic_stubs();
}
}
methodHandle callee_method;
JRT_BLOCK
callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL); // Return Method* through TLS
current->set_vm_result_2(callee_method());
JRT_BLOCK_END // return compiled code entry point after potential safepoints
assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); return callee_method->verified_code_entry();
JRT_END
// Handle call site that has been made non-entrant
JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current)) // 6243940 We might end up in here if the callee is deoptimized // as we race to call it. We don't want to take a safepoint if // the caller was interpreted because the caller frame will look // interpreted to the stack walkers and arguments are now // "compiled" so it is much better to make this transition // invisible to the stack walking code. The i2c path will // place the callee method in the callee_target. It is stashed // there because if we try and find the callee by normal means a // safepoint is possible and have trouble gc'ing the compiled args.
RegisterMap reg_map(current,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
frame stub_frame = current->last_frame();
assert(stub_frame.is_runtime_frame(), "sanity check");
frame caller_frame = stub_frame.sender(®_map);
if (caller_frame.is_interpreted_frame() ||
caller_frame.is_entry_frame() ||
caller_frame.is_upcall_stub_frame()) {
Method* callee = current->callee_target();
guarantee(callee != NULL && callee->is_method(), "bad handshake");
current->set_vm_result_2(callee);
current->set_callee_target(NULL); if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) { // Bypass class initialization checks in c2i when caller is in native. // JNI calls to static methods don't have class initialization checks. // Fast class initialization checks are present in c2i adapters and call into // SharedRuntime::handle_wrong_method() on the slow path. // // JVM upcalls may land here as well, but there's a proper check present in // LinkResolver::resolve_static_call (called from JavaCalls::call_static), // so bypassing it in c2i adapter is benign. return callee->get_c2i_no_clinit_check_entry();
} else { return callee->get_c2i_entry();
}
}
// Must be compiled to compiled path which is safe to stackwalk
methodHandle callee_method;
JRT_BLOCK // Force resolving of caller (if we called from compiled frame)
callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
current->set_vm_result_2(callee_method());
JRT_BLOCK_END // return compiled code entry point after potential safepoints
assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); return callee_method->verified_code_entry();
JRT_END
// Handle abstract method call
JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current)) // Verbose error message for AbstractMethodError. // Get the called method from the invoke bytecode.
vframeStream vfst(current, true);
assert(!vfst.at_end(), "Java frame must exist");
methodHandle caller(current, vfst.method());
Bytecode_invoke invoke(caller, vfst.bci());
DEBUG_ONLY( invoke.verify(); )
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.