/* * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
bool LIRGenerator::can_inline_as_constant(Value v) const { if (v->type()->as_IntConstant() != NULL) { int value = v->type()->as_IntConstant()->value(); // "-value" must be defined for value may be used for sub return Assembler::operand_valid_for_add_immediate(value) &&
Assembler::operand_valid_for_add_immediate(- value);
} elseif (v->type()->as_ObjectConstant() != NULL) { return v->type()->as_ObjectConstant()->value()->is_null_object();
} elseif (v->type()->as_LongConstant() != NULL) { long value = v->type()->as_LongConstant()->value(); // "-value" must be defined for value may be used for sub return Assembler::operand_valid_for_add_immediate(value) &&
Assembler::operand_valid_for_add_immediate(- value);
} elseif (v->type()->as_FloatConstant() != NULL) { return v->type()->as_FloatConstant()->value() == 0.0f;
} elseif (v->type()->as_DoubleConstant() != NULL) { return v->type()->as_DoubleConstant()->value() == 0.0;
} returnfalse;
}
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { if (c->as_constant() != NULL) { long constant = 0; switch (c->type()) { case T_INT: constant = c->as_jint(); break; case T_LONG: constant = c->as_jlong(); break; default: returnfalse;
} // "-constant" must be defined for c may be used for sub return Assembler::operand_valid_for_add_immediate(constant) &&
Assembler::operand_valid_for_add_immediate(- constant);
} returnfalse;
}
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) {
info_for_exception = state_for(x);
} // this CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
monitor_enter(obj.result(), lock, syncTempOpr(), LIR_OprFact::illegalOpr,
x->monitor_no(), info_for_exception, info);
}
if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
// float remainder is implemented as a direct call into the runtime
BasicTypeList signature(2); if (x->op() == Bytecodes::_frem) {
signature.append(T_FLOAT);
signature.append(T_FLOAT);
} else {
signature.append(T_DOUBLE);
signature.append(T_DOUBLE);
}
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
// missing test if instr is commutative and if we should swap
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
left.load_item();
bool need_zero_check = true; if (right.is_constant()) {
jlong c = right.get_jlong_constant(); // no need to do div-by-zero check if the divisor is a non-zero constant if (c != 0) { need_zero_check = false; } // do not load right if the divisor is a power-of-2 constant if (c > 0 && is_power_of_2(c)) {
right.dont_load_item();
} else {
right.load_item();
}
} else {
right.load_item();
} if (need_zero_check) {
CodeEmitInfo* info = state_for(x);
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
}
// Test if instr is commutative and if we should swap
LIRItem left(x->x(), this);
LIRItem right(x->y(), this);
LIRItem* left_arg = &left;
LIRItem* right_arg = &right; if (x->is_commutative() && left.is_stack() && right.is_register()) { // swap them if left is real stack (or cached) and right is real register(not cached)
left_arg = &right;
right_arg = &left;
}
left_arg->load_item(); // do not need to load right, as we can handle stack and constants if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
rlock_result(x);
bool need_zero_check = true; if (right.is_constant()) {
jint c = right.get_jint_constant(); // no need to do div-by-zero check if the divisor is a non-zero constant if (c != 0) { need_zero_check = false; } // do not load right if the divisor is a power-of-2 constant if (c > 0 && is_power_of_2(c)) {
right_arg->dont_load_item();
} else {
right_arg->load_item();
}
} else {
right_arg->load_item();
} if (need_zero_check) {
CodeEmitInfo* info = state_for(x);
__ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
__ branch(lir_cond_equal, new DivByZeroStub(info));
}
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { // when an operand with use count 1 is the left operand, then it is // likely that no move for 2-operand-LIR-form is necessary if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
x->swap_operands();
}
ValueTag tag = x->type()->tag();
assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); switch (tag) { case floatTag: case doubleTag: do_ArithmeticOp_FPU(x); return; case longTag: do_ArithmeticOp_Long(x); return; case intTag: do_ArithmeticOp_Int(x); return; default: ShouldNotReachHere(); return;
}
}
switch (x->op()) { case Bytecodes::_iand: // fall through case Bytecodes::_land:
__ logical_and(left.result(), right.result(), x->operand()); break; case Bytecodes::_ior: // fall through case Bytecodes::_lor:
__ logical_or(left.result(), right.result(), x->operand()); break; case Bytecodes::_ixor: // fall through case Bytecodes::_lxor:
__ logical_xor(left.result(), right.result(), x->operand()); break; default: Unimplemented();
}
}
switch (x->id()) { case vmIntrinsics::_dexp: // fall through case vmIntrinsics::_dlog: // fall through case vmIntrinsics::_dpow: // fall through case vmIntrinsics::_dcos: // fall through case vmIntrinsics::_dsin: // fall through case vmIntrinsics::_dtan: // fall through case vmIntrinsics::_dlog10:
do_LibmIntrinsic(x); break; case vmIntrinsics::_dabs: // fall through case vmIntrinsics::_dsqrt: // fall through case vmIntrinsics::_dsqrt_strict: {
assert(x->number_of_arguments() == 1, "wrong type");
LIRItem value(x->argument_at(0), this);
value.load_item();
LIR_Opr dst = rlock_result(x);
switch (x->id()) { case vmIntrinsics::_dsqrt: // fall through case vmIntrinsics::_dsqrt_strict: {
__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); break;
} case vmIntrinsics::_dabs: {
__ abs(value.result(), dst, LIR_OprFact::illegalOpr); break;
} default:
ShouldNotReachHere();
} break;
} default:
ShouldNotReachHere();
}
}
// operands for arraycopy must use fixed registers, otherwise // LinearScan will fail allocation (because arraycopy always needs a // call)
// The java calling convention will give us enough registers // so that on the stub side the args will be perfect already. // On the other slow/special case side we call C and the arg // positions are not similar enough to pick one as the best. // Also because the java calling convention is a "shifted" version // of the C convention we can process the java args trivially into C // args without worry of overwriting during the xfer
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
LIRItem length(x->length(), this); // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // and therefore provide the state before the parameters have been consumed
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
}
length.load_item_force(FrameMap::r9_opr);
LIR_Opr len = length.result();
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass()); if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
klass2reg_with_patching(klass_reg, obj, patching_info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
Values* dims = x->dims(); int i = dims->length();
LIRItemList* items = new LIRItemList(i, i, NULL); while (i-- > 0) {
LIRItem* size = new LIRItem(dims->at(i), this);
items->at_put(i, size);
}
// Evaluate state_for early since it may emit code.
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before());
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so // clone all handlers (NOTE: Usually this is handled transparently // by the CodeEmitInfo cloning logic in CodeStub constructors but // is done explicitly here because a stub isn't being used).
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
}
CodeEmitInfo* info = state_for(x, x->state());
i = dims->length(); while (i-- > 0) {
LIRItem* size = items->at(i);
size->load_item();
CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() ||
(PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization)
patching_info = state_for(x, x->state_before());
}
obj.load_item();
// info for exceptions
CodeEmitInfo* info_for_exception =
(x->needs_exception_state() ? state_for(x) :
state_for(x, x->state_before(), true/*ignore_xhandler*/ ));
// result and test object may not be in same register
LIR_Opr reg = rlock_result(x);
CodeEmitInfo* patching_info = NULL; if ((!x->klass()->is_loaded() || PatchALot)) { // must do this before locking the destination register as an oop register
patching_info = state_for(x, x->state_before());
}
obj.load_item();
LIR_Opr tmp3 = LIR_OprFact::illegalOpr; if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ instanceof(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
}
void LIRGenerator::do_If(If* x) { // If should have two successors
assert(x->number_of_sux() == 2, "inconsistency");
ValueTag tag = x->x()->type()->tag(); bool is_safepoint = x->is_safepoint();
if (tag == longTag) { // for longs, only conditions "eql", "neq", "lss", "geq" are valid; // mirror for other conditions if (cond == If::gtr || cond == If::leq) {
cond = Instruction::mirror(cond);
xin = &yitem;
yin = &xitem;
}
xin->set_destroys_register();
}
xin->load_item();
yin->load_item();
set_no_result(x);
LIR_Opr left = xin->result();
LIR_Opr right = yin->result();
// add safepoint before generating condition code so it can be recomputed if (x->is_safepoint()) { // increment backedge counter if needed
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.