deoptimization.cpp
Interaktion und PortierbarkeitC
/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
int Deoptimization::UnrollBlock::size_of_frames() const { // Account first for the adjustment of the initial frame int result = _caller_adjustment; for (int index = 0; index < number_of_frames(); index++) {
result += frame_sizes()[index];
} return result;
}
// In order to make fetch_unroll_info work properly with escape // analysis, the method was changed from JRT_LEAF to JRT_BLOCK_ENTRY. // The actual reallocation of previously eliminated objects occurs in realloc_objects, // which is called from the method fetch_unroll_info_helper below.
JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode)) // fetch_unroll_info() is called at the beginning of the deoptimization // handler. Note this fact before we start generating temporary frames // that can confuse an asynchronous stack walker. This counter is // decremented at the end of unpack_frames().
current->inc_in_deopt_handler();
if (exec_mode == Unpack_exception) { // When we get here, a callee has thrown an exception into a deoptimized // frame. That throw might have deferred stack watermark checking until // after unwinding. So we deal with such deferred requests here.
StackWatermarkSet::after_unwind(current);
}
JavaThread* deoptee_thread = chunk->at(0)->thread();
assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread), "a frame can only be deoptimized by the owner thread");
// The flag return_oop() indicates call sites which return oop // in compiled code. Such sites include java method calls, // runtime calls (for example, used to allocate new objects/arrays // on slow code path) and any other calls generated in compiled code. // It is not guaranteed that we can get such information here only // by analyzing bytecode in deoptimized frames. This is why this flag // is set during method compilation (see Compile::Process_OopMap_Node()). // If the previous frame was popped or if we are dispatching an exception, // we don't have an oop result. bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
Handle return_value; if (save_oop_result) { // Reallocation may trigger GC. If deoptimization happened on return from // call which returns oop we need to save it since it is not in oopmap.
oop result = deoptee.saved_oop_result(&map);
assert(oopDesc::is_oop_or_null(result), "must be oop");
return_value = Handle(thread, result);
assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); if (TraceDeoptimization) {
tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
tty->cr();
}
} if (objects != NULL) { if (exec_mode == Deoptimization::Unpack_none) {
assert(thread->thread_state() == _thread_in_vm, "assumption");
JavaThread* THREAD = thread; // For exception macros. // Clear pending OOM if reallocation fails and return true indicating allocation failure
realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
deoptimized_objects = true;
} else {
JavaThread* current = thread; // For JRT_BLOCK
JRT_BLOCK
realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
JRT_END
} bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal); if (TraceDeoptimization) {
print_objects(deoptee_thread, objects, realloc_failures);
}
} if (save_oop_result) { // Restore result.
deoptee.set_saved_oop_result(&map, return_value());
} return realloc_failures;
}
staticvoid restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
frame& deoptee, int exec_mode, bool& deoptimized_objects) {
JavaThread* deoptee_thread = chunk->at(0)->thread();
assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
assert(thread == Thread::current(), "should be");
HandleMark hm(thread); #ifndef PRODUCT bool first = true; #endif// !PRODUCT for (int i = 0; i < chunk->length(); i++) {
compiledVFrame* cvf = chunk->at(i);
assert (cvf->scope() != NULL,"expect only compiled java frames");
GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); if (monitors->is_nonempty()) { bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
exec_mode, realloc_failures);
deoptimized_objects = deoptimized_objects || relocked; #ifndef PRODUCT if (PrintDeoptimizationDetails) {
ResourceMark rm;
stringStream st; for (int j = 0; j < monitors->length(); j++) {
MonitorInfo* mi = monitors->at(j); if (mi->eliminated()) { if (first) {
first = false;
st.print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
} if (exec_mode == Deoptimization::Unpack_none) {
ObjectMonitor* monitor = deoptee_thread->current_waiting_monitor(); if (monitor != NULL && monitor->object() == mi->owner()) {
st.print_cr(" object <" INTPTR_FORMAT "> DEFERRED relocking after wait", p2i(mi->owner())); continue;
}
} if (mi->owner_is_scalar_replaced()) {
Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
st.print_cr(" failed reallocation for klass %s", k->external_name());
} else {
st.print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
}
}
}
tty->print_raw(st.freeze());
} #endif// !PRODUCT
}
}
}
// Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI. // The given vframes cover one physical frame. bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool& realloc_failures) {
frame deoptee = chunk->at(0)->fr();
JavaThread* deoptee_thread = chunk->at(0)->thread();
CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
RegisterMap map(chunk->at(0)->register_map()); bool deoptimized_objects = false;
// Reallocate the non-escaping objects and restore their fields. if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations)
|| EliminateAutoBox || EnableVectorAggressiveReboxing)) {
realloc_failures = rematerialize_objects(thread, Unpack_none, cm, deoptee, map, chunk, deoptimized_objects);
}
// MonitorInfo structures used in eliminate_locks are not GC safe.
NoSafepointVerifier no_safepoint;
// Now relock objects if synchronization on them was eliminated. if (jvmci_enabled COMPILER2_PRESENT(|| ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks))) {
restore_eliminated_locks(thread, chunk, realloc_failures, deoptee, Unpack_none, deoptimized_objects);
} return deoptimized_objects;
} #endif// COMPILER2_OR_JVMCI
// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* current, int exec_mode) { // When we get here we are about to unwind the deoptee frame. In order to // catch not yet safe to use frames, the following stack watermark barrier // poll will make such frames safe to use.
StackWatermarkSet::before_unwind(current);
// Note: there is a safepoint safety issue here. No matter whether we enter // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once // the vframeArray is created. //
// Allocate our special deoptimization ResourceMark
DeoptResourceMark* dmark = new DeoptResourceMark(current);
assert(current->deopt_mark() == NULL, "Pending deopt!");
current->set_deopt_mark(dmark);
frame stub_frame = current->last_frame(); // Makes stack walkable as side effect
RegisterMap map(current,
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
RegisterMap dummy_map(current,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip); // Now get the deoptee with a valid map
frame deoptee = stub_frame.sender(&map); // Set the deoptee nmethod
assert(current->deopt_compiled_method() == NULL, "Pending deopt!");
CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
current->set_deopt_compiled_method(cm);
if (VerifyStack) {
current->validate_frame_layout();
}
// Create a growable array of VFrames where each VFrame represents an inlined // Java frame. This storage is allocated with the usual system arena.
assert(deoptee.is_compiled_frame(), "Wrong frame type");
GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
vframe* vf = vframe::new_vframe(&deoptee, &map, current); while (!vf->is_top()) {
assert(vf->is_compiled_frame(), "Wrong frame type");
chunk->push(compiledVFrame::cast(vf));
vf = vf->sender();
}
assert(vf->is_compiled_frame(), "Wrong frame type");
chunk->push(compiledVFrame::cast(vf));
// Reallocate the non-escaping objects and restore their fields. Then // relock objects if synchronization on them was eliminated. if (jvmci_enabled COMPILER2_PRESENT( || (DoEscapeAnalysis && EliminateAllocations)
|| EliminateAutoBox || EnableVectorAggressiveReboxing )) { bool unused;
realloc_failures = rematerialize_objects(current, exec_mode, cm, deoptee, map, chunk, unused);
} #endif// COMPILER2_OR_JVMCI
// Ensure that no safepoint is taken after pointers have been stored // in fields of rematerialized objects. If a safepoint occurs from here on // out the java state residing in the vframeArray will be missed. // Locks may be rebaised in a safepoint.
NoSafepointVerifier no_safepoint;
ScopeDesc* trap_scope = chunk->at(0)->scope();
Handle exceptionObject; if (trap_scope->rethrow_exception()) { #ifndef PRODUCT if (PrintDeoptimizationDetails) {
tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
} #endif// !PRODUCT
GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
ScopeValue* topOfStack = expressions->top();
exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
guarantee(exceptionObject() != NULL, "exception oop can not be null");
}
vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures); #if COMPILER2_OR_JVMCI if (realloc_failures) { // This destroys all ScopedValue bindings.
current->clear_scopedValueBindings();
pop_frames_failed_reallocs(current, array);
} #endif
// Now that the vframeArray has been created if we have any deferred local writes // added by jvmti then we can free up that structure as the data is now in the // vframeArray
// Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
CodeBlob* cb = stub_frame.cb(); // Verify we have the right vframeArray
assert(cb->frame_size() >= 0, "Unexpected frame size");
intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
// If the deopt call site is a MethodHandle invoke call site we have // to adjust the unpack_sp.
nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
unpack_sp = deoptee.unextended_sp();
// This is a guarantee instead of an assert because if vframe doesn't match // we will unpack the wrong deoptimized frame and wind up in strange places // where it will be very difficult to figure out what went wrong. Better // to die an early death here than some very obscure death later when the // trail is cold. // Note: on ia64 this guarantee can be fooled by frames with no memory stack // in that it will fail to detect a problem when there is one. This needs // more work in tiger timeframe.
guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
int number_of_frames = array->frames();
// Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost // virtual activation, which is the reverse of the elements in the vframes array.
intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler); // +1 because we always have an interpreter return address for the final slot.
address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler); int popframe_extra_args = 0; // Create an interpreter return address for the stub to use as its return // address so the skeletal frames are perfectly walkable
frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
// PopFrame requires that the preserved incoming arguments from the recently-popped topmost // activation be put back on the expression stack of the caller for reexecution if (JvmtiExport::can_pop_frame() && current->popframe_forcing_deopt_reexecution()) {
popframe_extra_args = in_words(current->popframe_preserved_args_size_in_words());
}
// Find the current pc for sender of the deoptee. Since the sender may have been deoptimized // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather // than simply use array->sender.pc(). This requires us to walk the current set of frames //
frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller
// It's possible that the number of parameters at the call site is // different than number of arguments in the callee when method // handles are used. If the caller is interpreted get the real // value so that the proper amount of space can be added to it's // frame. bool caller_was_method_handle = false; if (deopt_sender.is_interpreted_frame()) {
methodHandle method(current, deopt_sender.interpreter_frame_method());
Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); if (cur.is_invokedynamic() || cur.is_invokehandle()) { // Method handle invokes may involve fairly arbitrary chains of // calls so it's impossible to know how much actual space the // caller has for locals.
caller_was_method_handle = true;
}
}
// // frame_sizes/frame_pcs[0] oldest frame (int or c2i) // frame_sizes/frame_pcs[1] next oldest frame (int) // frame_sizes/frame_pcs[n] youngest frame (int) // // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame // owns the space for the return address to it's caller). Confusing ain't it. // // The vframe array can address vframes with indices running from // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame. // When we create the skeletal frames we need the oldest frame to be in the zero slot // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk. // so things look a little strange in this loop. // int callee_parameters = 0; int callee_locals = 0; for (int index = 0; index < array->frames(); index++ ) { // frame[number_of_frames - 1 ] = on_stack_size(youngest) // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
callee_locals,
index == 0,
popframe_extra_args); // This pc doesn't have to be perfect just good enough to identify the frame // as interpreted so the skeleton frame will be walkable // The correct pc will be set when the skeleton frame is completely filled out // The final pc we store in the loop is wrong and will be overwritten below
frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
// Compute information for handling adapters and adjusting the frame size of the caller. int caller_adjustment = 0;
// Compute the amount the oldest interpreter frame will have to adjust // its caller's stack by. If the caller is a compiled frame then // we pretend that the callee has no parameters so that the // extension counts for the full amount of locals and not just // locals-parms. This is because without a c2i adapter the parm // area as created by the compiled frame will not be usable by // the interpreter. (Depending on the calling convention there // may not even be enough space).
// QQQ I'd rather see this pushed down into last_frame_adjust // and have it take the sender (aka caller).
if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
caller_adjustment = last_frame_adjust(0, callee_locals);
} elseif (callee_locals > callee_parameters) { // The caller frame may need extending to accommodate // non-parameter locals of the first unpacked interpreted frame. // Compute that adjustment.
caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
}
// If the sender is deoptimized the we must retrieve the address of the handler // since the frame will "magically" show the original pc before the deopt // and we'd undo the deopt.
if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
assert(current->has_pending_exception(), "should have thrown OOME");
current->set_exception_oop(current->pending_exception());
current->clear_pending_exception();
exec_mode = Unpack_exception;
}
#if INCLUDE_JVMCI if (current->frames_to_pop_failed_realloc() > 0) {
current->set_pending_monitorenter(false);
} #endif
UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
caller_adjustment * BytesPerWord,
caller_was_method_handle ? 0 : callee_parameters,
number_of_frames,
frame_sizes,
frame_pcs,
return_type,
exec_mode); // On some platforms, we need a way to pass some platform dependent // information to the unpacking code so the skeletal frames come out // correct (initial fp value, unextended sp, ...)
info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
if (array->frames() > 1) { if (VerifyStack && TraceDeoptimization) {
tty->print_cr("Deoptimizing method containing inlining");
}
}
array->set_unroll_block(info); return info;
}
// Called to cleanup deoptimization data structures in normal case // after unpacking to stack and when stack overflow error occurs void Deoptimization::cleanup_deopt_info(JavaThread *thread,
vframeArray *array) {
// Get array if coming from exception if (array == NULL) {
array = thread->vframe_array_head();
}
thread->set_vframe_array_head(NULL);
// Free the previous UnrollBlock
vframeArray* old_array = thread->vframe_array_last();
thread->set_vframe_array_last(array);
if (JvmtiExport::can_pop_frame()) { // Regardless of whether we entered this routine with the pending // popframe condition bit set, we should always clear it now
thread->clear_popframe_condition();
}
// unpack_frames() is called at the end of the deoptimization handler // and (in C2) at the end of the uncommon trap handler. Note this fact // so that an asynchronous stack walker can work again. This counter is // incremented at the beginning of fetch_unroll_info() and (in C2) at // the beginning of uncommon_trap().
thread->dec_in_deopt_handler();
}
// Moved from cpu directories because none of the cpus has callee save values. // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp. void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in // the days we had adapter frames. When we deoptimize a situation where a // compiled caller calls a compiled caller will have registers it expects // to survive the call to the callee. If we deoptimize the callee the only // way we can restore these registers is to have the oldest interpreter // frame that we create restore these values. That is what this routine // will accomplish.
// At the moment we have modified c2 to not have any callee save registers // so this problem does not exist and this routine is just a place holder.
assert(f->is_interpreted_frame(), "must be interpreted");
}
#ifndef PRODUCT staticbool falls_through(Bytecodes::Code bc) { switch (bc) { // List may be incomplete. Here we really only care about bytecodes where compiled code // can deoptimize. case Bytecodes::_goto: case Bytecodes::_goto_w: case Bytecodes::_athrow: returnfalse; default: returntrue;
}
} #endif
// Return BasicType of value being returned
JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
assert(thread == JavaThread::current(), "pre-condition");
// We are already active in the special DeoptResourceMark any ResourceObj's we // allocate will be freed at the end of the routine.
// JRT_LEAF methods don't normally allocate handles and there is a // NoHandleMark to enforce that. It is actually safe to use Handles // in a JRT_LEAF method, and sometimes desirable, but to do so we // must use ResetNoHandleMark to bypass the NoHandleMark, and // then use a HandleMark to ensure any Handles we do create are // cleaned up in this scope.
ResetNoHandleMark rnhm;
HandleMark hm(thread);
// Since the frame to unpack is the top frame of this thread, the vframe_array_head // must point to the vframeArray for the unpack frame.
vframeArray* array = thread->vframe_array_head();
UnrollBlock* info = array->unroll_block();
// We set the last_Java frame. But the stack isn't really parsable here. So we // clear it to make sure JFR understands not to try and walk stacks from events // in here.
intptr_t* sp = thread->frame_anchor()->last_Java_sp();
thread->frame_anchor()->set_last_Java_sp(NULL);
// Unpack the interpreter frames and any adapter frame (c2 only) we might create.
array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
thread->frame_anchor()->set_last_Java_sp(sp);
BasicType bt = info->return_type();
// If we have an exception pending, claim that the return type is an oop // so the deopt_blob does not overwrite the exception_oop.
if (exec_mode == Unpack_exception)
bt = T_OBJECT;
// Cleanup thread deopt data
cleanup_deopt_info(thread, array);
#ifndef PRODUCT if (VerifyStack) {
ResourceMark res_mark; // Clear pending exception to not break verification code (restored afterwards)
PreserveExceptionMark pm(thread);
thread->validate_frame_layout();
// Verify that the just-unpacked frames match the interpreter's // notions of expression stack and locals
vframeArray* cur_array = thread->vframe_array_last();
RegisterMap rm(thread,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
rm.set_include_argument_oops(false); bool is_top_frame = true; int callee_size_of_parameters = 0; int callee_max_locals = 0; for (int i = 0; i < cur_array->frames(); i++) {
vframeArrayElement* el = cur_array->element(i);
frame* iframe = el->iframe();
guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
// Get the oop map for this bci
InterpreterOopMap mask; int cur_invoke_parameter_size = 0; bool try_next_mask = false; int next_mask_expression_stack_size = -1; int top_frame_expression_stack_adjustment = 0;
methodHandle mh(thread, iframe->interpreter_frame_method());
OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
BytecodeStream str(mh, iframe->interpreter_frame_bci()); int max_bci = mh->code_size(); // Get to the next bytecode if possible
assert(str.bci() < max_bci, "bci in interpreter frame out of bounds"); // Check to see if we can grab the number of outgoing arguments // at an uncommon trap for an invoke (where the compiler // generates debug info before the invoke has executed)
Bytecodes::Code cur_code = str.next();
Bytecodes::Code next_code = Bytecodes::_shouldnotreachhere; if (Bytecodes::is_invoke(cur_code)) {
Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
cur_invoke_parameter_size = invoke.size_of_parameters(); if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
callee_size_of_parameters++;
}
} if (str.bci() < max_bci) {
next_code = str.next(); if (next_code >= 0) { // The interpreter oop map generator reports results before // the current bytecode has executed except in the case of // calls. It seems to be hard to tell whether the compiler // has emitted debug information matching the "state before" // a given bytecode or the state after, so we try both if (!Bytecodes::is_invoke(cur_code) && falls_through(cur_code)) { // Get expression stack size for the next bytecode
InterpreterOopMap next_mask;
OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
next_mask_expression_stack_size = next_mask.expression_stack_size(); if (Bytecodes::is_invoke(next_code)) {
Bytecode_invoke invoke(mh, str.bci());
next_mask_expression_stack_size += invoke.size_of_parameters();
} // Need to subtract off the size of the result type of // the bytecode because this is not described in the // debug info but returned to the interpreter in the TOS // caching register
BasicType bytecode_result_type = Bytecodes::result_type(cur_code); if (bytecode_result_type != T_ILLEGAL) {
top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
}
assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive");
try_next_mask = true;
}
}
}
// Verify stack depth and oops in frame // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc) if (!( /* SPARC */
(iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) || /* x86 */
(iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
(try_next_mask &&
(iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
top_frame_expression_stack_adjustment))) ||
(is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
(is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
(iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
)) {
{ // Print out some information that will help us debug the problem
tty->print_cr("Wrong number of expression stack elements during deoptimization");
tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
tty->print_cr(" Current code %s", Bytecodes::name(cur_code)); if (try_next_mask) {
tty->print_cr(" Next code %s", Bytecodes::name(next_code));
}
tty->print_cr(" Fabricated interpreter frame had %d expression stack elements",
iframe->interpreter_frame_expression_stack_size());
tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
tty->print_cr(" try_next_mask = %d", try_next_mask);
tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters);
tty->print_cr(" callee_max_locals = %d", callee_max_locals);
tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
tty->print_cr(" exec_mode = %d", exec_mode);
tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id());
tty->print_cr(" Interpreted frames:"); for (int k = 0; k < cur_array->frames(); k++) {
vframeArrayElement* el = cur_array->element(k);
tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
}
cur_array->print_on_2(tty);
}
guarantee(false, "wrong number of expression stack elements during deopt");
}
VerifyOopClosure verify;
iframe->oops_interpreted_do(&verify, &rm, false);
callee_size_of_parameters = mh->size_of_parameters();
callee_max_locals = mh->max_locals();
is_top_frame = false;
}
} #endif// !PRODUCT
return bt;
JRT_END
class DeoptimizeMarkedClosure : public HandshakeClosure { public:
DeoptimizeMarkedClosure() : HandshakeClosure("Deoptimize") {} void do_thread(Thread* thread) {
JavaThread* jt = JavaThread::cast(thread);
jt->deoptimize_marked_methods();
}
};
// Make the dependent methods not entrant if (nmethod_only != NULL) {
nmethod_only->mark_for_deoptimization();
nmethod_only->make_not_entrant();
CodeCache::make_nmethod_deoptimized(nmethod_only);
} else {
CodeCache::make_marked_nmethods_deoptimized();
}
for (int i = 0; i < objects->length(); i++) {
assert(objects->at(i)->is_object(), "invalid debug information");
ObjectValue* sv = (ObjectValue*) objects->at(i);
Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
oop obj = NULL;
if (k->is_instance_klass()) { #if INCLUDE_JVMCI
CompiledMethod* cm = fr->cb()->as_compiled_method_or_null(); if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
obj = get_cached_box(abv, fr, reg_map, THREAD); if (obj != NULL) { // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
abv->set_cached(true);
}
} #endif// INCLUDE_JVMCI
assert(sv->value().is_null(), "redundant reallocation");
assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
CLEAR_PENDING_EXCEPTION;
sv->set_value(obj);
}
#if INCLUDE_JVMCI /** * For primitive types whose kind gets "erased" at runtime (shorts become stack ints), * we need to somehow be able to recover the actual kind to be able to write the correct * amount of bytes. * For that purpose, this method assumes that, for an entry spanning n bytes at index i, * the entries at index n + 1 to n + i are 'markers'. * For example, if we were writing a short at index 4 of a byte array of size 8, the * expected form of the array would be: * * {b0, b1, b2, b3, INT, marker, b6, b7} * * Thus, in order to get back the size of the entry, we simply need to count the number * of marked entries * * @param virtualArray the virtualized byte array * @param i index of the virtual entry we are recovering * @return The number of bytes the entry spans
*/ staticint count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) { int index = i; while (++index < virtualArray->field_size() &&
virtualArray->field_at(index)->is_marker()) {} return index - i;
}
/** * If there was a guarantee for byte array to always start aligned to a long, we could * do a simple check on the parity of the index. Unfortunately, that is not always the * case. Thus, we check alignment of the actual address we are writing to. * In the unlikely case index 0 is 5-aligned for example, it would then be possible to * write a long to index 3.
*/ static jbyte* check_alignment_get_addr(typeArrayOop obj, int index, int expected_alignment) {
jbyte* res = obj->byte_at_addr(index);
assert((((intptr_t) res) % expected_alignment) == 0, "Non-aligned write"); return res;
}
// restore elements of an eliminated type array void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { int index = 0;
intptr_t val;
for (int i = 0; i < sv->field_size(); i++) {
StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); switch(type) { case T_LONG: case T_DOUBLE: {
assert(value->type() == T_INT, "Agreement.");
StackValue* low =
StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); #ifdef _LP64
jlong res = (jlong)low->get_int(); #else
jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); #endif
obj->long_at_put(index, res); break;
}
// Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. case T_INT: case T_FLOAT: { // 4 bytes.
assert(value->type() == T_INT, "Agreement."); bool big_value = false; if (i + 1 < sv->field_size() && type == T_INT) { if (sv->field_at(i)->is_location()) {
Location::Type type = ((LocationValue*) sv->field_at(i))->location().type(); if (type == Location::dbl || type == Location::lng) {
big_value = true;
}
} elseif (sv->field_at(i)->is_constant_int()) {
ScopeValue* next_scope_field = sv->field_at(i + 1); if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
big_value = true;
}
}
}
if (big_value) {
StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); #ifdef _LP64
jlong res = (jlong)low->get_int(); #else
jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); #endif
obj->int_at_put(index, (jint)*((jint*)&res));
obj->int_at_put(++index, (jint)*(((jint*)&res) + 1));
} else {
val = value->get_int();
obj->int_at_put(index, (jint)*((jint*)&val));
} break;
}
case T_SHORT:
assert(value->type() == T_INT, "Agreement.");
val = value->get_int();
obj->short_at_put(index, (jshort)*((jint*)&val)); break;
case T_CHAR:
assert(value->type() == T_INT, "Agreement.");
val = value->get_int();
obj->char_at_put(index, (jchar)*((jint*)&val)); break;
case T_BYTE: {
assert(value->type() == T_INT, "Agreement."); // The value we get is erased as a regular int. We will need to find its actual byte count 'by hand'.
val = value->get_int(); #if INCLUDE_JVMCI int byte_count = count_number_of_bytes_for_entry(sv, i);
byte_array_put(obj, val, index, byte_count); // According to byte_count contract, the values from i + 1 to i + byte_count are illegal values. Skip.
i += byte_count - 1; // Balance the loop counter.
index += byte_count; // index has been updated so continue at top of loop continue; #else
obj->byte_at_put(index, (jbyte)*((jint*)&val)); break; #endif// INCLUDE_JVMCI
}
case T_BOOLEAN: {
assert(value->type() == T_INT, "Agreement.");
val = value->get_int();
obj->bool_at_put(index, (jboolean)*((jint*)&val)); break;
}
default:
ShouldNotReachHere();
}
index++;
}
}
// restore fields of an eliminated object array void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) { for (int i = 0; i < sv->field_size(); i++) {
StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
assert(value->type() == T_OBJECT, "object element expected");
obj->obj_at_put(i, value->get_obj()());
}
}
class ReassignedField { public: int _offset;
BasicType _type; public:
ReassignedField() {
_offset = 0;
_type = T_ILLEGAL;
}
};
// Restore fields of an eliminated instance object using the same field order // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true) staticint reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
InstanceKlass* ik = klass; while (ik != NULL) { for (AllFieldStream fs(ik); !fs.done(); fs.next()) { if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
ReassignedField field;
field._offset = fs.offset();
field._type = Signature::basic_type(fs.signature());
fields->append(field);
}
}
ik = ik->superklass();
}
fields->sort(compare); for (int i = 0; i < fields->length(); i++) {
intptr_t val;
ScopeValue* scope_field = sv->field_at(svIndex);
StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field); int offset = fields->at(i)._offset;
BasicType type = fields->at(i)._type; switch (type) { case T_OBJECT: case T_ARRAY:
assert(value->type() == T_OBJECT, "Agreement.");
obj->obj_field_put(offset, value->get_obj()()); break;
// Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. case T_INT: case T_FLOAT: { // 4 bytes.
assert(value->type() == T_INT, "Agreement."); bool big_value = false; if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) { if (scope_field->is_location()) {
Location::Type type = ((LocationValue*) scope_field)->location().type(); if (type == Location::dbl || type == Location::lng) {
big_value = true;
}
} if (scope_field->is_constant_int()) {
ScopeValue* next_scope_field = sv->field_at(svIndex + 1); if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
big_value = true;
}
}
}
if (big_value) {
i++;
assert(i < fields->length(), "second T_INT field needed");
assert(fields->at(i)._type == T_INT, "T_INT field needed");
} else {
val = value->get_int();
obj->int_field_put(offset, (jint)*((jint*)&val)); break;
}
} /* no break */
case T_LONG: case T_DOUBLE: {
assert(value->type() == T_INT, "Agreement.");
StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex)); #ifdef _LP64
jlong res = (jlong)low->get_int(); #else
jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); #endif
obj->long_field_put(offset, res); break;
}
case T_SHORT:
assert(value->type() == T_INT, "Agreement.");
val = value->get_int();
obj->short_field_put(offset, (jshort)*((jint*)&val)); break;
case T_CHAR:
assert(value->type() == T_INT, "Agreement.");
val = value->get_int();
obj->char_field_put(offset, (jchar)*((jint*)&val)); break;
case T_BYTE:
assert(value->type() == T_INT, "Agreement.");
val = value->get_int();
obj->byte_field_put(offset, (jbyte)*((jint*)&val)); break;
case T_BOOLEAN:
assert(value->type() == T_INT, "Agreement.");
val = value->get_int();
obj->bool_field_put(offset, (jboolean)*((jint*)&val)); break;
// restore fields of all eliminated objects and arrays void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) { for (int i = 0; i < objects->length(); i++) {
ObjectValue* sv = (ObjectValue*) objects->at(i);
Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
Handle obj = sv->value();
assert(obj.not_null() || realloc_failures, "reallocation was missed"); #ifndef PRODUCT if (PrintDeoptimizationDetails) {
tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
} #endif// !PRODUCT
if (obj.is_null()) { continue;
}
#if INCLUDE_JVMCI // Don't reassign fields of boxes that came from a cache. Caches may be in CDS. if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) { continue;
} #endif// INCLUDE_JVMCI #ifdef COMPILER2 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
ScopeValue* payload = sv->field_at(0); if (payload->is_location() &&
payload->as_LocationValue()->location().type() == Location::vector) { #ifndef PRODUCT if (PrintDeoptimizationDetails) {
tty->print_cr("skip field reassignment for this vector - it should be assigned already"); if (Verbose) {
Handle obj = sv->value();
k->oop_print_on(obj(), tty);
}
} #endif// !PRODUCT continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
} // Else fall-through to do assignment for scalar-replaced boxed vector representation // which could be restored after vector object allocation.
} #endif/* !COMPILER2 */ if (k->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(k);
reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
} elseif (k->is_typeArray_klass()) {
TypeArrayKlass* ak = TypeArrayKlass::cast(k);
reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
} elseif (k->is_objArray_klass()) {
reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
}
}
}
// relock objects for which synchronization was eliminated bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) { bool relocked_objects = false; for (int i = 0; i < monitors->length(); i++) {
MonitorInfo* mon_info = monitors->at(i); if (mon_info->eliminated()) {
assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
relocked_objects = true; if (!mon_info->owner_is_scalar_replaced()) {
Handle obj(thread, mon_info->owner());
markWord mark = obj->mark(); if (exec_mode == Unpack_none) { if (mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) { // With exec_mode == Unpack_none obj may be thread local and locked in // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
markWord dmw = mark.displaced_mark_helper();
mark.locker()->set_displaced_header(markWord::encode((BasicLock*) NULL));
obj->set_mark(dmw);
} if (mark.has_monitor()) { // defer relocking if the deoptee thread is currently waiting for obj
ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor(); if (waiting_monitor != NULL && waiting_monitor->object() == obj()) {
assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
mon_info->lock()->set_displaced_header(markWord::unused_mark());
JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread); continue;
}
}
}
BasicLock* lock = mon_info->lock();
ObjectSynchronizer::enter(obj, lock, deoptee_thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
}
}
} return relocked_objects;
} #endif// COMPILER2_OR_JVMCI
// Register map for next frame (used for stack crawl). We capture // the state of the deopt'ing frame's caller. Thus if we need to // stuff a C2I adapter we can properly fill in the callee-save // register locations.
frame caller = fr.sender(reg_map); int frame_size = caller.sp() - fr.sp();
frame sender = caller;
// Since the Java thread being deoptimized will eventually adjust it's own stack, // the vframeArray containing the unpacking information is allocated in the C heap. // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
--> --------------------
--> maximum size reached
--> --------------------
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.26Bemerkung:
Wie Sie bei der Firma Beratungs- und Dienstleistungen beauftragen können
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.