/* * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
int LIR_Assembler::initial_frame_size_in_bytes() const { return in_bytes(frame_map()->framesize_in_bytes());
}
// Inline cache check: done before the frame is built. // The inline cached class is in Z_inline_cache(Z_R9). // We fetch the class of the receiver and compare it with the cached class. // If they do not match we jump to the slow case. int LIR_Assembler::check_icache() { Register receiver = receiverOpr()->as_register(); int offset = __ offset();
__ inline_cache_check(receiver, Z_inline_cache); return offset;
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
Label L_skip_barrier; Register klass = Z_R1_scratch;
void LIR_Assembler::osr_entry() { // On-stack-replacement entry sequence (interpreter frame layout described in frame_s390.hpp): // // 1. Create a new compiled activation. // 2. Initialize local variables in the compiled activation. The expression stack must be empty // at the osr_bci; it is not initialized. // 3. Jump to the continuation address in compiled code to resume execution.
// OSR entry point
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
ValueStack* entry_state = osr_entry->end()->state(); int number_of_locks = entry_state->locks_size();
// Create a frame for the compiled activation.
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
// OSR buffer is // // locals[nlocals-1..0] // monitors[number_of_locks-1..0] // // Locals is a direct copy of the interpreter frame so in the osr buffer // the first slot in the local array is the last local from the interpreter // and the last slot is local[0] (receiver) from the interpreter // // Similarly with locks. The first lock slot in the osr buffer is the nth lock // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock // in the interpreter frame (the method lock if a sync method)
// Initialize monitors in the compiled activation. // I0: pointer to osr buffer // // All other registers are dead at this point and the locals will be // copied into place by code emitted in the IR.
Register OSR_buf = osrBufferPointer()->as_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); int monitor_offset = BytesPerWord * method()->max_locals() +
(2 * BytesPerWord) * (number_of_locks - 1); // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in // the OSR buffer using 2 word entries: first the lock and then // the oop. for (int i = 0; i < number_of_locks; i++) { int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); // Verify the interpreter's monitor has a non-null object.
__ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is NULL", __LINE__); // Copy the lock field into the compiled activation.
__ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf);
__ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i));
__ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf);
__ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i));
}
}
}
int LIR_Assembler::emit_exception_handler() { // Generate code for exception handler.
address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // Not enough space left for the handler.
bailout("exception handler overflow"); return -1;
}
// Emit the code to remove the frame from the stack in the exception // unwind path. int LIR_Assembler::emit_unwind_handler() { #ifndef PRODUCT if (CommentedAssembly) {
_masm->block_comment("Unwind handler");
} #endif
int offset = code_offset(); Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved. Register Rtmp1 = Z_R11; Register Rtmp2 = Z_R12;
// Fetch the exception from TLS and clear out exception related thread state.
Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset());
Address exc_pc_addr = Address(Z_thread, JavaThread::exception_pc_offset());
__ z_lg(Z_EXC_OOP, exc_oop_addr);
__ clear_mem(exc_oop_addr, sizeof(oop));
__ clear_mem(exc_pc_addr, sizeof(intptr_t));
__ bind(_unwind_handler_entry);
__ verify_not_null_oop(Z_EXC_OOP); if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception.
}
if (compilation()->env()->dtrace_method_probes()) {
ShouldNotReachHere(); // Not supported. #if 0
__ mov(rdi, r15_thread);
__ mov_metadata(rsi, method()->constant_encoding());
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); #endif
}
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); // Restore the exception.
}
// Remove the activation and dispatch to the unwind handler.
__ pop_frame();
__ z_lg(Z_EXC_PC, _z_abi16(return_pc), Z_SP);
// Z_EXC_OOP: exception oop // Z_EXC_PC: exception pc
// Dispatch to the unwind logic.
__ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id));
__ z_br(Z_R5);
// Emit the slow path assembly. if (stub != NULL) {
stub->emit_code(this);
}
return offset;
}
int LIR_Assembler::emit_deopt_handler() { // Generate code for exception handler.
address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // Not enough space left for the handler.
bailout("deopt handler overflow"); return -1;
} int offset = code_offset(); // Size must be constant (see HandlerImpl::emit_deopt_handler).
__ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack());
__ call(Z_R1_scratch);
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
__ end_a_stub();
return offset;
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) { if (o == NULL) {
__ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove.
} else {
AddressLiteral a = __ allocate_oop_address(o); bool success = __ load_oop_from_toc(reg, a, reg); if (!success) {
bailout("const section overflow");
}
}
}
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { // Allocate a new index in table to hold the object once it's been patched. int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index));
assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); // The NULL will be dynamically patched later so the sequence to // load the address literal must not be optimized.
__ load_const(reg, addrlit);
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { // Allocate a new index in table to hold the klass once it's been patched. int index = __ oop_recorder()->allocate_metadata_index(NULL);
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index));
assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); // The NULL will be dynamically patched later so the sequence to // load the address literal must not be optimized.
__ load_const(reg, addrlit);
void LIR_Assembler::align_call(LIR_Code code) { // End of call instruction must be 4 byte aligned. int offset = __ offset(); switch (code) { case lir_icvirtual_call:
offset += MacroAssembler::load_const_from_toc_size(); // no break case lir_static_call: case lir_optvirtual_call: case lir_dynamic_call:
offset += NativeCall::call_far_pcrelative_displacement_offset; break; default: ShouldNotReachHere();
} if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) {
__ nop();
}
}
// CALL to fixup routine. Fixup routine uses ScopeDesc info // to determine who we intended to call.
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr));
call(op, relocInfo::none);
}
case T_LONG: // fall through case T_DOUBLE:
dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); break;
case T_LONG: // fall through case T_DOUBLE:
__ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits()));
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); break;
case T_BOOLEAN: // fall through case T_BYTE:
__ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
store_offset = __ offset(); if (Immediate::is_uimm12(addr.disp())) {
__ z_stc(Z_R0_scratch, addr);
} else {
__ z_stcy(Z_R0_scratch, addr);
} break;
case T_CHAR: // fall through case T_SHORT:
__ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint()));
store_offset = __ offset(); if (Immediate::is_uimm12(addr.disp())) {
__ z_sth(Z_R0_scratch, addr);
} else {
__ z_sthy(Z_R0_scratch, addr);
} break;
// Remember the offset of the load. The patching_epilog must be done // before the call to add_debug_info, otherwise the PcDescs don't get // entered in increasing order. int offset = code_offset();
assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
// Pop the frame before the safepoint code.
__ pop_frame_restore_retPC(initial_frame_size_in_bytes());
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check(Z_R14);
}
// We need to mark the code position where the load from the safepoint // polling page was emitted as relocInfo::poll_return_type here.
__ relocate(relocInfo::poll_return_type);
__ load_from_polling_page(Z_R1_scratch);
// See also Matcher::interpreter_method_reg().
AddressLiteral meta = __ allocate_metadata_address(NULL); bool success = __ load_const_from_toc(Z_method, meta);
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
CodeEmitInfo* info, bool pop_fpu_stack) {
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
if (left->is_single_cpu()) {
assert(left == dest, "left and dest must be equal"); Register lreg = left->as_register();
if (right->is_single_cpu()) { // cpu register - cpu register Register rreg = right->as_register(); switch (code) { case lir_add: __ z_ar (lreg, rreg); break; case lir_sub: __ z_sr (lreg, rreg); break; case lir_mul: __ z_msr(lreg, rreg); break; default: ShouldNotReachHere();
}
} elseif (right->is_constant()) { // cpu register - constant
jint c = right->as_constant_ptr()->as_jint(); switch (code) { case lir_add: __ z_agfi(lreg, c); break; case lir_sub: __ z_agfi(lreg, -c); break; // note: -min_jint == min_jint case lir_mul: __ z_msfi(lreg, c); break; default: ShouldNotReachHere();
}
} else {
ShouldNotReachHere();
}
} elseif (left->is_double_cpu()) {
assert(left == dest, "left and dest must be equal"); Register lreg_lo = left->as_register_lo(); Register lreg_hi = left->as_register_hi();
if (right->is_double_cpu()) { // cpu register - cpu register Register rreg_lo = right->as_register_lo(); Register rreg_hi = right->as_register_hi();
assert_different_registers(lreg_lo, rreg_lo); switch (code) { case lir_add:
__ z_agr(lreg_lo, rreg_lo); break; case lir_sub:
__ z_sgr(lreg_lo, rreg_lo); break; case lir_mul:
__ z_msgr(lreg_lo, rreg_lo); break; default:
ShouldNotReachHere();
}
} elseif (right->is_constant()) { // cpu register - constant
jlong c = right->as_constant_ptr()->as_jlong_bits(); switch (code) { case lir_add: __ z_agfi(lreg_lo, c); break; case lir_sub: if (c != min_jint) {
__ z_agfi(lreg_lo, -c);
} else { // -min_jint cannot be represented as simm32 in z_agfi // min_jint sign extended: 0xffffffff80000000 // -min_jint as 64 bit integer: 0x0000000080000000 // 0x80000000 can be represented as uimm32 in z_algfi // lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000
__ z_algfi(lreg_lo, UCONST64(0x80000000));
} break; case lir_mul: __ z_msgfi(lreg_lo, c); break; default:
ShouldNotReachHere();
}
} else {
ShouldNotReachHere();
}
} elseif (left->is_single_fpu()) {
assert(left == dest, "left and dest must be equal");
FloatRegister lreg = left->as_float_reg();
FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg;
Address raddr;
if (rreg == fnoreg) {
assert(right->is_single_stack(), "constants should be loaded into register");
raddr = frame_map()->address_for_slot(right->single_stack_ix()); if (!Immediate::is_uimm12(raddr.disp())) {
__ mem2freg_opt(rreg = Z_fscratch_1, raddr, false);
}
}
if (rreg != fnoreg) { switch (code) { case lir_add: __ z_aebr(lreg, rreg); break; case lir_sub: __ z_sebr(lreg, rreg); break; case lir_mul: __ z_meebr(lreg, rreg); break; case lir_div: __ z_debr(lreg, rreg); break; default: ShouldNotReachHere();
}
} else { switch (code) { case lir_add: __ z_aeb(lreg, raddr); break; case lir_sub: __ z_seb(lreg, raddr); break; case lir_mul: __ z_meeb(lreg, raddr); break; case lir_div: __ z_deb(lreg, raddr); break; default: ShouldNotReachHere();
}
}
} elseif (left->is_double_fpu()) {
assert(left == dest, "left and dest must be equal");
FloatRegister lreg = left->as_double_reg();
FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg;
Address raddr;
if (rreg == fnoreg) {
assert(right->is_double_stack(), "constants should be loaded into register");
raddr = frame_map()->address_for_slot(right->double_stack_ix()); if (!Immediate::is_uimm12(raddr.disp())) {
__ mem2freg_opt(rreg = Z_fscratch_1, raddr, true);
}
}
if (rreg != fnoreg) { switch (code) { case lir_add: __ z_adbr(lreg, rreg); break; case lir_sub: __ z_sdbr(lreg, rreg); break; case lir_mul: __ z_mdbr(lreg, rreg); break; case lir_div: __ z_ddbr(lreg, rreg); break; default: ShouldNotReachHere();
}
} else { switch (code) { case lir_add: __ z_adb(lreg, raddr); break; case lir_sub: __ z_sdb(lreg, raddr); break; case lir_mul: __ z_mdb(lreg, raddr); break; case lir_div: __ z_ddb(lreg, raddr); break; default: ShouldNotReachHere();
}
}
} elseif (left->is_address()) {
assert(left == dest, "left and dest must be equal");
assert(code == lir_add, "unsupported operation");
assert(right->is_constant(), "unsupported operand");
jint c = right->as_constant_ptr()->as_jint();
LIR_Address* lir_addr = left->as_address_ptr();
Address addr = as_Address(lir_addr); switch (lir_addr->type()) { case T_INT:
__ add2mem_32(addr, c, Z_R1_scratch); break; case T_LONG:
__ add2mem_64(addr, c, Z_R1_scratch); break; default:
ShouldNotReachHere();
}
} else {
ShouldNotReachHere();
}
}
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { switch (code) { case lir_sqrt: {
assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
FloatRegister src_reg = value->as_double_reg();
FloatRegister dst_reg = dest->as_double_reg();
__ z_sqdbr(dst_reg, src_reg); break;
} case lir_abs: {
assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
FloatRegister src_reg = value->as_double_reg();
FloatRegister dst_reg = dest->as_double_reg();
__ z_lpdbr(dst_reg, src_reg); break;
} default: {
ShouldNotReachHere(); break;
}
}
}
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { if (left->is_single_cpu()) { Register reg = left->as_register(); if (right->is_constant()) { int val = right->as_constant_ptr()->as_jint(); switch (code) { case lir_logic_and: __ z_nilf(reg, val); break; case lir_logic_or: __ z_oilf(reg, val); break; case lir_logic_xor: __ z_xilf(reg, val); break; default: ShouldNotReachHere();
}
} elseif (right->is_stack()) {
Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); switch (code) { case lir_logic_and: __ z_ny(reg, raddr); break; case lir_logic_or: __ z_oy(reg, raddr); break; case lir_logic_xor: __ z_xy(reg, raddr); break; default: ShouldNotReachHere();
}
} else { Register rright = right->as_register(); switch (code) { case lir_logic_and: __ z_nr(reg, rright); break; case lir_logic_or : __ z_or(reg, rright); break; case lir_logic_xor: __ z_xr(reg, rright); break; default: ShouldNotReachHere();
}
}
move_regs(reg, dst->as_register());
} else { Register l_lo = left->as_register_lo(); if (right->is_constant()) {
__ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong()); switch (code) { case lir_logic_and:
__ z_ngr(l_lo, Z_R1_scratch); break; case lir_logic_or:
__ z_ogr(l_lo, Z_R1_scratch); break; case lir_logic_xor:
__ z_xgr(l_lo, Z_R1_scratch); break; default: ShouldNotReachHere();
}
} else { Register r_lo; if (is_reference_type(right->type())) {
r_lo = right->as_register();
} else {
r_lo = right->as_register_lo();
} switch (code) { case lir_logic_and:
__ z_ngr(l_lo, r_lo); break; case lir_logic_or:
__ z_ogr(l_lo, r_lo); break; case lir_logic_xor:
__ z_xgr(l_lo, r_lo); break; default: ShouldNotReachHere();
}
}
Register dst_lo = dst->as_register_lo();
move_regs(l_lo, dst_lo);
}
}
// See operand selection in LIRGenerator::do_ArithmeticOp_Int(). void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { if (left->is_double_cpu()) { // 64 bit integer case
assert(left->is_double_cpu(), "left must be register");
assert(right->is_double_cpu() || is_power_of_2(right->as_jlong()), "right must be register or power of 2 constant");
assert(result->is_double_cpu(), "result must be register");
if (right->is_constant()) { // Convert division by a power of two into some shifts and logical operations. Register treg1 = Z_R0_scratch; Register treg2 = Z_R1_scratch;
jlong divisor = right->as_jlong();
jlong log_divisor = log2i_exact(right->as_jlong());
// Divisor is not a power of 2 constant. Register rreg = right->as_register_lo(); Register treg = temp->as_register_lo();
assert(right->is_double_cpu(), "right must be register");
assert(lreg == Z_R11, "see ldivInOpr()");
assert(rreg != lreg, "right register must not be same as left register");
assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) ||
(code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()");
Register R1 = lreg->predecessor(); Register R2 = rreg;
assert(code != lir_idiv || lreg==dreg, "see code below"); if (code == lir_idiv) {
__ z_lcgr(lreg, lreg);
} else {
__ clear_reg(dreg, true, false);
}
NearLabel done;
__ compare64_and_branch(R2, -1, Assembler::bcondEqual, done); if (code == lir_idiv) {
__ z_lcgr(lreg, lreg); // Revert lcgr above.
} if (ImplicitDiv0Checks) { // No debug info because the idiv won't trap. // Add_debug_info_for_div0 would instantiate another DivByZeroStub, // which is unnecessary, too.
add_debug_info_for_div0(__ offset(), info);
}
__ z_dsgr(R1, R2);
__ bind(done); return;
}
// 32 bit integer case
assert(left->is_single_cpu(), "left must be register");
assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant");
assert(result->is_single_cpu(), "result must be register");
if (right->is_constant()) { // Convert division by a power of two into some shifts and logical operations. Register treg1 = Z_R0_scratch; Register treg2 = Z_R1_scratch;
jlong divisor = right->as_jint();
jlong log_divisor = log2i_exact(right->as_jint());
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.50 Sekunden
(vorverarbeitet)
¤