/* * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) { // We must have enough patching space so that call can be inserted. // We cannot use fat nops here, since the concurrent code rewrite may transiently // create the illegal instruction sequence. while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
_masm->nop();
}
info->set_force_reexecute();
patch->install(_masm, patch_code, obj, info);
append_code_stub(patch);
#ifdef ASSERT
Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); if (patch->id() == PatchingStub::access_field_id) { switch (code) { case Bytecodes::_putstatic: case Bytecodes::_getstatic: case Bytecodes::_putfield: case Bytecodes::_getfield: break; default:
ShouldNotReachHere();
}
} elseif (patch->id() == PatchingStub::load_klass_id) { switch (code) { case Bytecodes::_new: case Bytecodes::_anewarray: case Bytecodes::_multianewarray: case Bytecodes::_instanceof: case Bytecodes::_checkcast: break; default:
ShouldNotReachHere();
}
} elseif (patch->id() == PatchingStub::load_mirror_id) { switch (code) { case Bytecodes::_putstatic: case Bytecodes::_getstatic: case Bytecodes::_ldc: case Bytecodes::_ldc_w: case Bytecodes::_ldc2_w: break; default:
ShouldNotReachHere();
}
} elseif (patch->id() == PatchingStub::load_appendix_id) {
Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
} else {
ShouldNotReachHere();
} #endif
}
LIR_Assembler::~LIR_Assembler() { // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out. // Reset it here to avoid an assertion.
_unwind_handler_entry.reset();
}
// To bang the stack of this compiled method we use the stack size // that the interpreter would need in case of a deoptimization. This // removes the need to bang the stack in the deoptimization blob which // in turn simplifies stack overflow handling. int LIR_Assembler::bang_size_in_bytes() const { return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size());
}
void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) { for (int i = 0; i < info_list->length(); i++) {
XHandlers* handlers = info_list->at(i)->exception_handlers();
for (int j = 0; j < handlers->length(); j++) {
XHandler* handler = handlers->handler_at(j);
assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
assert(handler->entry_code() == NULL ||
handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
if (handler->entry_pco() == -1) { // entry code not emitted yet if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
handler->set_entry_pco(code_offset()); if (CommentedAssembly) {
_masm->block_comment("Exception adapter block");
}
emit_lir_list(handler->entry_code());
} else {
handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
}
assert(handler->entry_pco() != -1, "must be set now");
}
}
}
}
void LIR_Assembler::emit_code(BlockList* hir) { if (PrintLIR) {
print_LIR(hir);
}
int n = hir->length(); for (int i = 0; i < n; i++) {
emit_block(hir->at(i));
CHECK_BAILOUT();
}
flush_debug_info(code_offset());
DEBUG_ONLY(check_no_unbound_labels());
}
void LIR_Assembler::emit_block(BlockBegin* block) { if (block->is_set(BlockBegin::backward_branch_target_flag)) {
align_backward_branch_target();
}
// if this block is the start of an exception handler, record the // PC offset of the first instruction for later construction of // the ExceptionHandlerTable if (block->is_set(BlockBegin::exception_entry_flag)) {
block->set_exception_handler_pco(code_offset());
}
int n = list->length(); for (int i = 0; i < n; i++) {
LIR_Op* op = list->at(i);
check_codespace();
CHECK_BAILOUT();
#ifndef PRODUCT if (CommentedAssembly) { // Don't record out every op since that's too verbose. Print // branches since they include block and stub names. Also print // patching moves since they generate funny looking code. if (op->code() == lir_branch ||
(op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
(op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
stringStream st;
op->print_on(&st);
_masm->block_comment(st.freeze());
}
} if (PrintLIRWithAssembly) { // print out the LIR operation followed by the resulting assembly
list->at(i)->print(); tty->cr();
} #endif/* PRODUCT */
op->emit_code(this);
if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
process_debug_info(op);
}
for (int i = 0; i < _branch_target_blocks.length() - 1; i++) { if (!_branch_target_blocks.at(i)->label()->is_bound()) {
tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
assert(false, "unbound label");
}
}
} #endif
static ValueStack* debug_info(Instruction* ins) {
StateSplit* ss = ins->as_StateSplit(); if (ss != NULL) return ss->state(); return ins->state_before();
}
void LIR_Assembler::process_debug_info(LIR_Op* op) {
Instruction* src = op->source(); if (src == NULL) return; int pc_offset = code_offset(); if (_pending_non_safepoint == src) {
_pending_non_safepoint_offset = pc_offset; return;
}
ValueStack* vstack = debug_info(src); if (vstack == NULL) return; if (_pending_non_safepoint != NULL) { // Got some old debug info. Get rid of it. if (debug_info(_pending_non_safepoint) == vstack) {
_pending_non_safepoint_offset = pc_offset; return;
} if (_pending_non_safepoint_offset < pc_offset) {
record_non_safepoint_debug_info();
}
_pending_non_safepoint = NULL;
} // Remember the debug info. if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
_pending_non_safepoint = src;
_pending_non_safepoint_offset = pc_offset;
}
}
// Index caller states in s, where 0 is the oldest, 1 its callee, etc. // Return NULL if n is too large. // Returns the caller_bci for the next-younger state, also. static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
ValueStack* t = s; for (int i = 0; i < n; i++) { if (t == NULL) break;
t = t->caller_state();
} if (t == NULL) return NULL; for (;;) {
ValueStack* tc = t->caller_state(); if (tc == NULL) return s;
t = tc;
bci_result = tc->bci();
s = s->caller_state();
}
}
void LIR_Assembler::record_non_safepoint_debug_info() { int pc_offset = _pending_non_safepoint_offset;
ValueStack* vstack = debug_info(_pending_non_safepoint); int bci = vstack->bci();
// Visit scopes from oldest to youngest. for (int n = 0; ; n++) { int s_bci = bci;
ValueStack* s = nth_oldest(vstack, n, s_bci); if (s == NULL) break;
IRScope* scope = s->scope(); //Always pass false for reexecute since these ScopeDescs are never used for deopt
methodHandle null_mh;
debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/);
}
// must align calls sites, otherwise they can't be updated atomically
align_call(op->code());
if (CodeBuffer::supports_shared_stubs() && op->method()->can_be_statically_bound()) { // Calls of the same statically bound method can share // a stub to the interpreter.
CodeBuffer::csize_t call_offset = pc() - _masm->code()->insts_begin();
_masm->code()->shared_stub_to_interp_for(op->method(), call_offset);
} else {
emit_static_call_stub();
}
CHECK_BAILOUT();
switch (op->code()) { case lir_static_call: case lir_dynamic_call:
call(op, relocInfo::static_call_type); break; case lir_optvirtual_call:
call(op, relocInfo::opt_virtual_call_type); break; case lir_icvirtual_call:
ic_call(op); break; default:
fatal("unexpected op code: %s", op->name()); break;
}
// JSR 292 // Record if this method has MethodHandle invokes. if (op->is_method_handle_invoke()) {
compilation()->set_has_method_handle_invokes(true);
}
#ifdefined(IA32) && defined(COMPILER2) // C2 leave fpu stack dirty clean it if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) { int i; for ( i = 1; i <= 7 ; i++ ) {
ffree(i);
} if (!op->result_opr()->is_float_kind()) {
ffree(0);
}
} #endif// IA32 && COMPILER2
}
case lir_std_entry: // init offsets
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
_masm->align(CodeEntryAlignment); if (needs_icache(compilation()->method())) {
check_icache();
}
offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
_masm->verified_entry(compilation()->directive()->BreakAtExecuteOption); if (needs_clinit_barrier_on_entry(compilation()->method())) {
clinit_barrier(compilation()->method());
}
build_frame();
offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset()); break;
case lir_osr_entry:
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
osr_entry(); break;
#ifdef IA32 case lir_fpop_raw:
fpop(); break; #endif// IA32
case lir_breakpoint:
breakpoint(); break;
case lir_membar:
membar(); break;
case lir_membar_acquire:
membar_acquire(); break;
case lir_membar_release:
membar_release(); break;
case lir_membar_loadload:
membar_loadload(); break;
case lir_membar_storestore:
membar_storestore(); break;
case lir_membar_loadstore:
membar_loadstore(); break;
case lir_membar_storeload:
membar_storeload(); break;
case lir_get_thread:
get_thread(op->result_opr()); break;
case lir_on_spin_wait:
on_spin_wait(); break;
default:
ShouldNotReachHere(); break;
}
}
void LIR_Assembler::emit_op2(LIR_Op2* op) { switch (op->code()) { case lir_cmp: if (op->info() != NULL) {
assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(), "shouldn't be codeemitinfo for non-address operands");
add_debug_info_for_null_check_here(op->info()); // exception possible
}
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); break;
case lir_cmp_l2i: case lir_cmp_fd2i: case lir_ucmp_fd2i:
comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); break;
case lir_shl: case lir_shr: case lir_ushr: if (op->in_opr2()->is_constant()) {
shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
} else {
shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
} break;
case lir_add: case lir_sub: case lir_mul: case lir_div: case lir_rem:
assert(op->fpu_pop_count() < 2, "");
arith_op(
op->code(),
op->in_opr1(),
op->in_opr2(),
op->result_opr(),
op->info(),
op->fpu_pop_count() == 1); break;
case lir_abs: case lir_sqrt: case lir_tan: case lir_log10:
intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op); break;
case lir_neg:
negate(op->in_opr1(), op->result_opr(), op->in_opr2()); break;
case lir_logic_and: case lir_logic_or: case lir_logic_xor:
logic_op(
op->code(),
op->in_opr1(),
op->in_opr2(),
op->result_opr()); break;
case lir_throw:
throw_op(op->in_opr1(), op->in_opr2(), op->info()); break;
case lir_xadd: case lir_xchg:
atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr()); break;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.