/* * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, int shift, int disp, BasicType type) {
assert(base->is_register(), "must be");
intx large_disp = disp;
// accumulate fixed displacements if (index->is_constant()) {
LIR_Const *constant = index->as_constant_ptr(); if (constant->type() == T_INT) {
large_disp += ((intx)index->as_jint()) << shift;
} else {
assert(constant->type() == T_LONG, "should be");
jlong c = index->as_jlong() << shift; if ((jlong)((jint)c) == c) {
large_disp += c;
index = LIR_OprFact::illegalOpr;
} else {
LIR_Opr tmp = new_register(T_LONG);
__ move(index, tmp);
index = tmp; // apply shift and displacement below
}
}
}
if (index->is_register()) { // apply the shift and accumulate the displacement if (shift > 0) {
LIR_Opr tmp = new_pointer_register();
__ shift_left(index, shift, tmp);
index = tmp;
} if (large_disp != 0) {
LIR_Opr tmp = new_pointer_register(); if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
__ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
index = tmp;
} else {
__ move(LIR_OprFact::intptrConst(large_disp), tmp);
__ add(tmp, index, tmp);
index = tmp;
}
large_disp = 0;
}
} elseif (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) { // index is illegal so replace it with the displacement loaded into a register
index = new_pointer_register();
__ move(LIR_OprFact::intptrConst(large_disp), index);
large_disp = 0;
}
// at this point we either have base + index or base + displacement if (large_disp == 0 && index->is_register()) { returnnew LIR_Address(base, index, type);
} else {
assert(Address::offset_ok_for_immed(large_disp, shift), "failed for large_disp: " INTPTR_FORMAT " and shift %d", large_disp, shift); returnnew LIR_Address(base, large_disp, type);
}
}
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
BasicType type) { int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); int elem_size = type2aelembytes(type); int shift = exact_log2(elem_size); return generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
}
LIR_Opr LIRGenerator::load_immediate(jlong x, BasicType type) {
LIR_Opr r; if (type == T_LONG) {
r = LIR_OprFact::longConst(x); if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
LIR_Opr tmp = new_register(type);
__ move(r, tmp); return tmp;
}
} elseif (type == T_INT) {
r = LIR_OprFact::intConst(checked_cast<jint>(x)); if (!Assembler::operand_valid_for_logical_immediate(true, x)) { // This is all rather nasty. We don't know whether our constant // is required for a logical or an arithmetic operation, wo we // don't know what the range of valid values is!!
LIR_Opr tmp = new_register(type);
__ move(r, tmp); return tmp;
}
} else {
ShouldNotReachHere();
} return r;
}
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) {
info_for_exception = state_for(x);
} // this CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
x->monitor_no(), info_for_exception, info);
}
if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { // float remainder is implemented as a direct call into the runtime
LIRItem right(x->x(), this);
LIRItem left(x->y(), this);
BasicTypeList signature(2); if (x->op() == Bytecodes::_frem) {
signature.append(T_FLOAT);
signature.append(T_FLOAT);
} else {
signature.append(T_DOUBLE);
signature.append(T_DOUBLE);
}
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
// missing test if instr is commutative and if we should swap
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
left.load_item(); bool need_zero_check = true; if (right.is_constant()) {
jlong c = right.get_jlong_constant(); // no need to do div-by-zero check if the divisor is a non-zero constant if (c != 0) need_zero_check = false; // do not load right if the divisor is a power-of-2 constant if (c > 0 && is_power_of_2(c)) {
right.dont_load_item();
} else {
right.load_item();
}
} else {
right.load_item();
} if (need_zero_check) {
CodeEmitInfo* info = state_for(x);
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
}
rlock_result(x); switch (x->op()) { case Bytecodes::_lrem:
__ rem (left.result(), right.result(), x->operand()); break; case Bytecodes::_ldiv:
__ div (left.result(), right.result(), x->operand()); break; default:
ShouldNotReachHere(); break;
}
} else {
assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect lmul, ladd or lsub"); // add, sub, mul
left.load_item(); if (! right.is_register()) { if (x->op() == Bytecodes::_lmul
|| ! right.is_constant()
|| ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
right.load_item();
} else { // add, sub
assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub"); // don't load constants to save register
right.load_nonconstant();
}
}
rlock_result(x);
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
}
}
// Test if instr is commutative and if we should swap
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
LIRItem* left_arg = &left;
LIRItem* right_arg = &right; if (x->is_commutative() && left.is_stack() && right.is_register()) { // swap them if left is real stack (or cached) and right is real register(not cached)
left_arg = &right;
right_arg = &left;
}
left_arg->load_item();
// do not need to load right, as we can handle stack and constants if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
rlock_result(x); bool need_zero_check = true; if (right.is_constant()) {
jint c = right.get_jint_constant(); // no need to do div-by-zero check if the divisor is a non-zero constant if (c != 0) need_zero_check = false; // do not load right if the divisor is a power-of-2 constant if (c > 0 && is_power_of_2(c)) {
right_arg->dont_load_item();
} else {
right_arg->load_item();
}
} else {
right_arg->load_item();
} if (need_zero_check) {
CodeEmitInfo* info = state_for(x);
__ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
}
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { // when an operand with use count 1 is the left operand, then it is // likely that no move for 2-operand-LIR-form is necessary if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands();
}
ValueTag tag = x->type()->tag();
assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); switch (tag) { case floatTag: case doubleTag: do_ArithmeticOp_FPU(x); return; case longTag: do_ArithmeticOp_Long(x); return; case intTag: do_ArithmeticOp_Int(x); return; default: ShouldNotReachHere(); return;
}
}
// operands for arraycopy must use fixed registers, otherwise // LinearScan will fail allocation (because arraycopy always needs a // call)
// The java calling convention will give us enough registers // so that on the stub side the args will be perfect already. // On the other slow/special case side we call C and the arg // positions are not similar enough to pick one as the best. // Also because the java calling convention is a "shifted" version // of the C convention we can process the java args trivially into C // args without worry of overwriting during the xfer
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
assert(UseCRC32Intrinsics, "why are we here?"); // Make all state_for calls early since they can emit code
LIR_Opr result = rlock_result(x); switch (x->id()) { case vmIntrinsics::_updateCRC32: {
LIRItem crc(x->argument_at(0), this);
LIRItem val(x->argument_at(1), this); // val is destroyed by update_crc32
val.set_destroys_register();
crc.load_item();
val.load_item();
__ update_crc32(crc.result(), val.result(), result); break;
} case vmIntrinsics::_updateBytesCRC32: case vmIntrinsics::_updateByteBufferCRC32: { bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
assert(UseCRC32CIntrinsics, "why are we here?"); // Make all state_for calls early since they can emit code
LIR_Opr result = rlock_result(x); switch (x->id()) { case vmIntrinsics::_updateBytesCRC32C: case vmIntrinsics::_updateDirectByteBufferCRC32C: { bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C); int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIRItem length(x->length(), this); // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // and therefore provide the state before the parameters have been consumed
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
}
length.load_item_force(FrameMap::r19_opr);
LIR_Opr len = length.result();
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
klass2reg_with_patching(klass_reg, obj, patching_info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims(); int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL); while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size);
}
// Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so // clone all handlers (NOTE: Usually this is handled transparently // by the CodeEmitInfo cloning logic in CodeStub constructors but // is done explicitly here because a stub isn't being used).
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
}
CodeEmitInfo* info = state_for(x, x->state());
i = dims->length(); while (i-- > 0) {
LIRItem* size = items->at(i);
size->load_item();
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization)
patching_info = state_for(x, x->state_before());
}
obj.load_item();
// info for exceptions
CodeEmitInfo* info_for_exception =
(x->needs_exception_state() ? state_for(x) :
state_for(x, x->state_before(), true/*ignore_xhandler*/));
// result and test object may not be in same register
LIR_Opr reg = rlock_result(x);
CodeEmitInfo* patching_info = NULL; if ((!x->klass()->is_loaded() || PatchALot)) { // must do this before locking the destination register as an oop register
patching_info = state_for(x, x->state_before());
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr; if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ instanceof(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
}
LIR_Opr left = xin->result();
LIR_Opr right = yin->result();
// add safepoint before generating condition code so it can be recomputed if (x->is_safepoint()) { // increment backedge counter if needed
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
}
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) { // 8179954: We need to make sure that the code generated for // volatile accesses forms a sequentially-consistent set of // operations when combined with STLR and LDAR. Without a leading // membar it's possible for a simple Dekker test to fail if loads // use LD;DMB but stores use STLR. This can happen if C2 compiles // the stores in one method and C1 compiles the loads in another. if (!CompilerConfig::is_c1_only_no_jvmci()) {
__ membar();
}
__ volatile_load_mem_reg(address, result, info);
}
¤ Dauer der Verarbeitung: 0.19 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.