/* * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Traverse assignment graph in depth first order and generate moves in post order // ie. two assignments: b := c, a := b start with node c: // Call graph: move(NULL, c) -> move(c, b) -> move(b, a) // Generates moves in this order: move b to a and move c to b // ie. cycle a := b, b := a start with node a // Call graph: move(NULL, a) -> move(a, b) -> move(b, a) // Generates moves in this order: move b to temp, move a to b, move temp to a void PhiResolver::move(ResolveNode* src, ResolveNode* dest) { if (!dest->visited()) {
dest->set_visited(); for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
move(dest, dest->destination_at(i));
}
} elseif (!dest->start_node()) { // cylce in graph detected
assert(_loop == NULL, "only one loop valid!");
_loop = dest;
move_to_temp(src->operand()); return;
} // else dest is a start node
PhiResolver::~PhiResolver() { int i; // resolve any cycles in moves from and to virtual registers for (i = virtual_operands().length() - 1; i >= 0; i --) {
ResolveNode* node = virtual_operands().at(i); if (!node->visited()) {
_loop = NULL;
move(NULL, node);
node->set_start_node();
assert(_temp->is_illegal(), "move_temp_to() call missing");
}
}
// generate move for move from non virtual register to abitrary destination for (i = other_operands().length() - 1; i >= 0; i --) {
ResolveNode* node = other_operands().at(i); for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
emit_move(node->operand(), node->destination_at(j)->operand());
}
}
}
ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
ResolveNode* node; if (opr->is_virtual()) { int vreg_num = opr->vreg_number();
node = vreg_table().at_grow(vreg_num, NULL);
assert(node == NULL || node->operand() == opr, ""); if (node == NULL) {
node = new ResolveNode(opr);
vreg_table().at_put(vreg_num, node);
} // Make sure that all virtual operands show up in the list when // they are used as the source of a move. if (source && !virtual_operands().contains(node)) {
virtual_operands().append(node);
}
} else {
assert(source, "");
node = new ResolveNode(opr);
other_operands().append(node);
} return node;
}
// set up the list of LIR instructions
assert(block->lir() == NULL, "LIR list already computed for this block");
_lir = new LIR_List(compilation(), block);
block->set_lir(_lir);
__ branch_destination(block->label());
if (LIRTraceExecution &&
Compilation::current()->hir()->start()->block_id() != block->block_id() &&
!block->is_set(BlockBegin::exception_entry_flag)) {
assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
trace_block_entry(block);
}
}
// LIR_Opr for unpinned constants shouldn't be referenced by other // blocks so clear them out after processing the block. for (int i = 0; i < _unpinned_constants.length(); i++) {
_unpinned_constants.at(i)->clear_operand();
}
_unpinned_constants.trunc_to(0);
// clear our any registers for other local constants
_constants.trunc_to(0);
_reg_for_constants.trunc_to(0);
}
// This is called for each node in tree; the walk stops if a root is reached void LIRGenerator::walk(Value instr) {
InstructionMark im(compilation(), instr); //stop walk when encounter a root if ((instr->is_pinned() && instr->as_Phi() == NULL) || instr->operand()->is_valid()) {
assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
} else {
assert(instr->subst() == instr, "shouldn't have missed substitution");
instr->visit(this); // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
}
}
CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
assert(state != NULL, "state must be defined");
#ifndef PRODUCT
state->verify(); #endif
ValueStack* s = state;
for_each_state(s) { if (s->kind() == ValueStack::EmptyExceptionState) {
assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty"); continue;
}
int index;
Value value;
for_each_stack_value(s, index, value) {
assert(value->subst() == value, "missed substitution"); if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
walk(value);
assert(value->operand()->is_valid(), "must be evaluated now");
}
}
MethodLivenessResult liveness = method->liveness_at_bci(bci); if (bci == SynchronizationEntryBCI) { if (x->as_ExceptionObject() || x->as_Throw()) { // all locals are dead on exit from the synthetic unlocker
liveness.clear();
} else {
assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
}
} if (!liveness.is_valid()) { // Degenerate or breakpointed method.
bailout("Degenerate or breakpointed method");
} else {
assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
for_each_local_value(s, index, value) {
assert(value->subst() == value, "missed substitution"); if (liveness.at(index) && !value->type()->is_illegal()) { if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
walk(value);
assert(value->operand()->is_valid(), "must be evaluated now");
}
} else { // NULL out this local so that linear scan can assume that all non-NULL values are live.
s->invalidate_local(index);
}
}
}
}
void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) { /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation * is active and the class hasn't yet been resolved we need to emit a patch that resolves
* the class. */ if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
assert(info != NULL, "info must be set if class is not loaded");
__ klass2reg_patch(NULL, r, info);
} else { // no patching needed
__ metadata2reg(obj->constant_encoding(), r);
}
}
switch(code) { case Bytecodes::_dadd: case Bytecodes::_fadd: case Bytecodes::_ladd: case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break; case Bytecodes::_fmul: case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
case Bytecodes::_dmul: __ mul(left_op, right_op, result_op, tmp_op); break;
case Bytecodes::_imul:
{ bool did_strength_reduce = false;
if (right->is_constant()) {
jint c = right->as_jint(); if (c > 0 && is_power_of_2(c)) { // do not need tmp here
__ shift_left(left_op, exact_log2(c), result_op);
did_strength_reduce = true;
} else {
did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
}
} // we couldn't strength reduce so just emit the multiply if (!did_strength_reduce) {
__ mul(left_op, right_op, result_op);
}
} break;
case Bytecodes::_dsub: case Bytecodes::_fsub: case Bytecodes::_lsub: case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break; // ldiv and lrem are implemented with a direct runtime call
case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break;
case Bytecodes::_drem: case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
if (TwoOperandLIRForm && value != result_op // Only 32bit right shifts require two operand form on S390.
S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {
assert(count != result_op, "malformed");
__ move(value, result_op);
value = result_op;
}
assert(count->is_constant() || count->is_register(), "must be"); switch(code) { case Bytecodes::_ishl: case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break; case Bytecodes::_ishr: case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break; case Bytecodes::_iushr: case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break; default: ShouldNotReachHere();
}
}
switch(code) { case Bytecodes::_iand: case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
case Bytecodes::_ior: case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
case Bytecodes::_ixor: case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
default: ShouldNotReachHere();
}
}
void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { if (!GenerateSynchronizationCode) return; // for slow path, use debug info for state after successful locking
CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
__ load_stack_address_monitor(monitor_no, lock); // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
__ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
}
#ifndef PRODUCT void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) { if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
} elseif (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
}
} #endif
void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
klass2reg_with_patching(klass_reg, klass, info, is_unresolved); // If klass is not loaded we do not know if the klass has finalizers: if (UseFastNewInstance && klass->is_loaded()
&& !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
// if a probable array type has been identified, figure out if any // of the required checks for a fast case can be elided. int flags = LIR_OpArrayCopy::all_flags;
if (!src_objarray)
flags &= ~LIR_OpArrayCopy::src_objarray; if (!dst_objarray)
flags &= ~LIR_OpArrayCopy::dst_objarray;
if (!x->arg_needs_null_check(0))
flags &= ~LIR_OpArrayCopy::src_null_check; if (!x->arg_needs_null_check(2))
flags &= ~LIR_OpArrayCopy::dst_null_check;
if (expected_type != NULL) {
Value length_limit = NULL;
IfOp* ifop = length->as_IfOp(); if (ifop != NULL) { // look for expressions like min(v, a.length) which ends up as // x > y ? y : x or x >= y ? y : x if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
ifop->x() == ifop->fval() &&
ifop->y() == ifop->tval()) {
length_limit = ifop->y();
}
}
// try to skip null checks and range checks
NewArray* src_array = src->as_NewArray(); if (src_array != NULL) {
flags &= ~LIR_OpArrayCopy::src_null_check; if (length_limit != NULL &&
src_array->length() == length_limit &&
is_constant_zero(src_pos)) {
flags &= ~LIR_OpArrayCopy::src_range_check;
}
}
// check from incoming constant values if (positive_constant(src_pos))
flags &= ~LIR_OpArrayCopy::src_pos_positive_check; if (positive_constant(dst_pos))
flags &= ~LIR_OpArrayCopy::dst_pos_positive_check; if (positive_constant(length))
flags &= ~LIR_OpArrayCopy::length_positive_check;
// see if the range check can be elided, which might also imply // that src or dst is non-null.
ArrayLength* al = length->as_ArrayLength(); if (al != NULL) { if (al->array() == src) { // it's the length of the source array
flags &= ~LIR_OpArrayCopy::length_positive_check;
flags &= ~LIR_OpArrayCopy::src_null_check; if (is_constant_zero(src_pos))
flags &= ~LIR_OpArrayCopy::src_range_check;
} if (al->array() == dst) { // it's the length of the destination array
flags &= ~LIR_OpArrayCopy::length_positive_check;
flags &= ~LIR_OpArrayCopy::dst_null_check; if (is_constant_zero(dst_pos))
flags &= ~LIR_OpArrayCopy::dst_range_check;
}
} if (is_exact) {
flags &= ~LIR_OpArrayCopy::type_check;
}
}
IntConstant* src_int = src_pos->type()->as_IntConstant();
IntConstant* dst_int = dst_pos->type()->as_IntConstant(); if (src_int && dst_int) { int s_offs = src_int->value(); int d_offs = dst_int->value(); if (src_int->value() >= dst_int->value()) {
flags &= ~LIR_OpArrayCopy::overlapping;
} if (expected_type != NULL) {
BasicType t = expected_type->element_type()->basic_type(); int element_size = type2aelembytes(t); if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
flags &= ~LIR_OpArrayCopy::unaligned;
}
}
} elseif (src_pos == dst_pos || is_constant_zero(dst_pos)) { // src and dest positions are the same, or dst is zero so assume // nonoverlapping copy.
flags &= ~LIR_OpArrayCopy::overlapping;
}
if (src == dst) { // moving within a single array so no type checks are needed if (flags & LIR_OpArrayCopy::type_check) {
flags &= ~LIR_OpArrayCopy::type_check;
}
}
*flagsp = flags;
*expected_typep = (ciArrayKlass*)expected_type;
}
LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
assert(opr->is_register(), "why spill if item is not register?");
if (strict_fp_requires_explicit_rounding) { #ifdef IA32 if (UseSSE < 1 && opr->is_single_fpu()) {
LIR_Opr result = new_register(T_FLOAT);
set_vreg_flag(result, must_start_in_memory);
assert(opr->is_register(), "only a register can be spilled");
assert(opr->value_type()->is_float(), "rounding only for floats available");
__ roundfp(opr, LIR_OprFact::illegalOpr, result); return result;
} #else
Unimplemented(); #endif// IA32
} return opr;
}
LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
assert(type2size[t] == type2size[value->type()], "size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())); if (!value->is_register()) { // force into a register
LIR_Opr r = new_register(value->type());
__ move(value, r);
value = r;
}
// MDO cells are intptr_t, so the data_reg width is arch-dependent.
LIR_Opr data_reg = new_pointer_register();
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
__ move(data_addr, data_reg); // Use leal instead of add to avoid destroying condition codes on x86
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
__ leal(LIR_OprFact::address(fake_incr_value), data_reg);
__ move(data_reg, data_addr);
}
}
// Phi technique: // This is about passing live values from one basic block to the other. // In code generated with Java it is rather rare that more than one // value is on the stack from one basic block to the other. // We optimize our technique for efficient passing of one value // (of type long, int, double..) but it can be extended. // When entering or leaving a basic block, all registers and all spill // slots are release and empty. We use the released registers // and spill slots to pass the live values from one block // to the other. The topmost value, i.e., the value on TOS of expression // stack is passed in registers. All other values are stored in spilling // area. Every Phi has an index which designates its spill slot // At exit of a basic block, we fill the register(s) and spill slots. // At entry of a basic block, the block_prolog sets up the content of phi nodes // and locks necessary registers and spilling slots.
// move current value to referenced phi function void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
Phi* phi = sux_val->as_Phi(); // cur_val can be null without phi being null in conjunction with inlining if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) { if (phi->is_local()) { for (int i = 0; i < phi->operand_count(); i++) {
Value op = phi->operand_at(i); if (op != NULL && op->type()->is_illegal()) {
bailout("illegal phi operand");
}
}
}
Phi* cur_phi = cur_val->as_Phi(); if (cur_phi != NULL && cur_phi->is_illegal()) { // Phi and local would need to get invalidated // (which is unexpected for Linear Scan). // But this case is very rare so we simply bail out.
bailout("propagation of illegal phi"); return;
}
LIR_Opr operand = cur_val->operand(); if (operand->is_illegal()) {
assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL, "these can be produced lazily");
operand = operand_for_instruction(cur_val);
}
resolver->move(operand, operand_for_instruction(phi));
}
}
// Moves all stack values into their PHI position void LIRGenerator::move_to_phi(ValueStack* cur_state) {
BlockBegin* bb = block(); if (bb->number_of_sux() == 1) {
BlockBegin* sux = bb->sux_at(0);
assert(sux->number_of_preds() > 0, "invalid CFG");
// a block with only one predecessor never has phi functions if (sux->number_of_preds() > 1) {
PhiResolver resolver(this);
ValueStack* sux_state = sux->state();
Value sux_value; int index;
assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
}
}
}
LIR_Opr LIRGenerator::new_register(BasicType type) { int vreg_num = _virtual_register_number; // Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out // a few extra registers before we really run out which helps to avoid to trip over assertions. if (vreg_num + 20 >= LIR_Opr::vreg_max) {
bailout("out of virtual registers in LIR generator"); if (vreg_num + 2 >= LIR_Opr::vreg_max) { // Wrap it around and continue until bailout really happens to avoid hitting assertions.
_virtual_register_number = LIR_Opr::vreg_base;
vreg_num = LIR_Opr::vreg_base;
}
}
_virtual_register_number += 1;
LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);
assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers"); return vreg;
}
// Try to lock using register in hint
LIR_Opr LIRGenerator::rlock(Value instr) { return new_register(instr->type());
}
// does an rlock and sets result
LIR_Opr LIRGenerator::rlock_result(Value x) {
LIR_Opr reg = rlock(x);
set_result(x, reg); return reg;
}
// does an rlock and sets result
LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
LIR_Opr reg; switch (type) { case T_BYTE: case T_BOOLEAN:
reg = rlock_byte(type); break; default:
reg = rlock(x); break;
}
void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
assert(block()->next() == x, "ExceptionObject must be first instruction of block");
// no moves are created for phi functions at the begin of exception // handlers, so assign operands manually here
for_each_phi_fun(block(), phi, if (!phi->is_illegal()) { operand_for_instruction(phi); });
void LIRGenerator::do_Phi(Phi* x) { // phi functions are never visited directly
ShouldNotReachHere();
}
// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined. void LIRGenerator::do_Constant(Constant* x) { if (x->state_before() != NULL) { // Any constant with a ValueStack requires patching so emit the patch here
LIR_Opr reg = rlock_result(x);
CodeEmitInfo* info = state_for(x, x->state_before());
__ oop2reg_patch(NULL, reg, info);
} elseif (x->use_count() > 1 && !can_inline_as_constant(x)) { if (!x->is_pinned()) { // unpinned constants are handled specially so that they can be // put into registers when they are used multiple times within a // block. After the block completes their operand will be // cleared so that other blocks can't refer to that register.
set_result(x, load_constant(x));
} else {
LIR_Opr res = x->operand(); if (!res->is_valid()) {
res = LIR_OprFact::value_type(x->type());
} if (res->is_constant()) {
LIR_Opr reg = rlock_result(x);
__ move(res, reg);
} else {
set_result(x, res);
}
}
} else {
set_result(x, LIR_OprFact::value_type(x->type()));
}
}
void LIRGenerator::do_Local(Local* x) { // operand_for_instruction has the side effect of setting the result // so there's no need to do it here.
operand_for_instruction(x);
}
// TODO could try to substitute this node with an equivalent InstanceOf // if clazz is known to be a constant Class. This will pick up newly found // constants after HIR construction. I'll leave this to a future change.
// as a first cut, make a simple leaf call to runtime to stay platform independent. // could follow the aastore example in a future change.
LIRItem receiver(x->argument_at(0), this);
receiver.load_item();
LIR_Opr result = rlock_result(x);
CodeEmitInfo* info = NULL; if (x->needs_null_check()) {
info = state_for(x);
}
// While reading off the universal constant mirror is less efficient than doing // another branch and returning the constant answer, this branchless code runs into // much less risk of confusion for C1 register allocator. The choice of the universe // object here is correct as long as it returns the same modifiers we would expect // from the primitive class itself. See spec for Class.getModifiers that provides // the typed array klasses with similar modifiers as their component types.
// Check if this is a Java mirror of primitive type, and select the appropriate klass.
LIR_Opr klass = new_register(T_METADATA);
__ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0));
__ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);
// Get the answer.
__ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result);
}
// Array case: size is round(header + element_size*arraylength). // Since arraylength is different for every array instance, we have to // compute the whole thing at runtime.
// Shift-left awkwardness. Normally it is just: // __ shift_left(length, layout, length); // But C1 cannot perform shift_left with non-constant count, so we end up // doing the per-bit loop dance here. x86_32 also does not know how to shift // longs, so we have to act on ints.
LabelObj* L_shift_loop = new LabelObj();
LabelObj* L_shift_exit = new LabelObj();
LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) { if (x->operand()->is_illegal()) {
Constant* c = x->as_Constant(); if (c != NULL) {
x->set_operand(LIR_OprFact::value_type(c->type()));
} else {
assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local"); // allocate a virtual register for this local or phi
x->set_operand(rlock(x));
_instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
}
} return x->operand();
}
// Block local constant handling. This code is useful for keeping // unpinned constants and constants which aren't exposed in the IR in // registers. Unpinned Constant instructions have their operands // cleared when the block is finished so that other blocks can't end // up referring to their registers.
LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
BasicType t = c->type(); for (int i = 0; i < _constants.length(); i++) {
LIR_Const* other = _constants.at(i); if (t == other->type()) { switch (t) { case T_INT: case T_FLOAT: if (c->as_jint_bits() != other->as_jint_bits()) continue; break; case T_LONG: case T_DOUBLE: if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue; if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue; break; case T_OBJECT: if (c->as_jobject() != other->as_jobject()) continue; break; default: break;
} return _reg_for_constants.at(i);
}
}
// Comment copied form templateTable_i486.cpp // ---------------------------------------------------------------------------- // Volatile variables demand their effects be made known to all CPU's in // order. Store buffers on most chips allow reads & writes to reorder; the // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of // memory barrier (i.e., it's not sufficient that the interpreter does not // reorder volatile references, the hardware also must not reorder them). // // According to the new Java Memory Model (JMM): // (1) All volatiles are serialized wrt to each other. // ALSO reads & writes act as acquire & release, so: // (2) A read cannot let unrelated NON-volatile memory refs that happen after // the read float up to before the read. It's OK for non-volatile memory refs // that happen before the volatile read to float down below it. // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs // that happen BEFORE the write float down to after the write. It's OK for // non-volatile memory refs that happen after the volatile write to float up // before it. // // We only put in barriers around volatile refs (they are expensive), not // _between_ memory refs (that would require us to track the flavor of the // previous memory refs). Requirements (2) and (3) require some barriers // before volatile stores and after volatile loads. These nearly cover // requirement (1) but miss the volatile-store-volatile-load case. This final // case is placed after volatile-stores although it could just as well go // before volatile-loads.
if (is_volatile || needs_patching) { // load item if field is volatile (fewer special cases for volatiles) // load item if field not initialized // load item if field not constant // because of code patching we cannot inline constants if (field_type == T_BYTE || field_type == T_BOOLEAN) {
value.load_byte_item();
} else {
value.load_item();
}
} else {
value.load_for_store(field_type);
}
set_no_result(x);
#ifndef PRODUCT if (PrintNotLoaded && needs_patching) {
tty->print_cr(" ###class not loaded at store_%s bci %d",
x->is_static() ? "static" : "field", x->printable_bci());
} #endif
if (x->needs_null_check() &&
(needs_patching ||
MacroAssembler::needs_explicit_null_check(x->offset()))) { // Emit an explicit null check because the offset is too large. // If the class is not loaded and the object is NULL, we need to deoptimize to throw a // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
__ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
}
DecoratorSet decorators = IN_HEAP; if (is_volatile) {
decorators |= MO_SEQ_CST;
} if (needs_patching) {
decorators |= C1_NEEDS_PATCHING;
}
access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
}
// the CodeEmitInfo must be duplicated for each different // LIR-instruction because spilling can occur anywhere between two // instructions and so the debug information must be different
CodeEmitInfo* range_check_info = state_for(x);
CodeEmitInfo* null_check_info = NULL; if (x->needs_null_check()) {
null_check_info = new CodeEmitInfo(range_check_info);
}
if (GenerateRangeChecks && needs_range_check) { if (use_length) {
__ cmp(lir_cond_belowEqual, length.result(), index.result());
__ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));
} else {
array_range_check(array.result(), index.result(), null_check_info, range_check_info); // range_check also does the null check
null_check_info = NULL;
}
}
if (GenerateArrayStoreCheck && needs_store_check) {
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
}
CodeEmitInfo* info = NULL; if (needs_patching) {
assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
info = state_for(x, x->state_before());
} elseif (x->needs_null_check()) {
NullCheck* nc = x->explicit_null_check(); if (nc == NULL) {
info = state_for(x);
} else {
info = state_for(nc);
}
}
LIRItem object(x->obj(), this);
object.load_item();
#ifndef PRODUCT if (PrintNotLoaded && needs_patching) {
tty->print_cr(" ###class not loaded at load_%s bci %d",
x->is_static() ? "static" : "field", x->printable_bci());
} #endif
bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception(); if (x->needs_null_check() &&
(needs_patching ||
MacroAssembler::needs_explicit_null_check(x->offset()) ||
stress_deopt)) {
LIR_Opr obj = object.result(); if (stress_deopt) {
obj = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(NULL), obj);
} // Emit an explicit null check because the offset is too large. // If the class is not loaded and the object is NULL, we need to deoptimize to throw a // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
__ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
}
DecoratorSet decorators = IN_HEAP; if (is_volatile) {
decorators |= MO_SEQ_CST;
} if (needs_patching) {
decorators |= C1_NEEDS_PATCHING;
}
LIR_Opr result = rlock_result(x, field_type);
access_load_at(decorators, field_type,
object, LIR_OprFact::intConst(x->offset()), result,
info ? new CodeEmitInfo(info) : NULL, info);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.