/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
//------------------------------warp_incoming_stk_arg------------------------ // This warps a VMReg into an OptoReg::Name
OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
OptoReg::Name warped; if( reg->is_stack() ) { // Stack slot argument?
warped = OptoReg::add(_old_SP, reg->reg2stack() );
warped = OptoReg::add(warped, C->out_preserve_stack_slots()); if( warped >= _in_arg_limit )
_in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen if (!RegMask::can_represent_arg(warped)) { // the compiler cannot represent this method's calling sequence
C->record_method_not_compilable("unsupported incoming calling sequence"); return OptoReg::Bad;
} return warped;
} return OptoReg::as_OptoReg(reg);
}
//---------------------------compute_old_SP------------------------------------
OptoReg::Name Compile::compute_old_SP() { int fixed = fixed_slots(); int preserve = in_preserve_stack_slots(); return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
}
#ifdef ASSERT void Matcher::verify_new_nodes_only(Node* xroot) { // Make sure that the new graph only references new nodes
ResourceMark rm;
Unique_Node_List worklist;
VectorSet visited;
worklist.push(xroot); while (worklist.size() > 0) {
Node* n = worklist.pop();
visited.set(n->_idx);
assert(C->node_arena()->contains(n), "dead node"); for (uint j = 0; j < n->req(); j++) {
Node* in = n->in(j); if (in != NULL) {
assert(C->node_arena()->contains(in), "dead node"); if (!visited.test(in->_idx)) {
worklist.push(in);
}
}
}
}
} #endif
//---------------------------match--------------------------------------------- void Matcher::match( ) { if( MaxLabelRootDepth < 100 ) { // Too small?
assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
MaxLabelRootDepth = 100;
} // One-time initialization of some register masks.
init_spill_mask( C->root()->in(1) );
_return_addr_mask = return_addr(); #ifdef _LP64 // Pointers take 2 slots in 64-bit land
_return_addr_mask.Insert(OptoReg::add(return_addr(),1)); #endif
// Map a Java-signature return type into return register-value // machine registers for 0, 1 and 2 returned values. const TypeTuple *range = C->tf()->range(); if( range->cnt() > TypeFunc::Parms ) { // If not a void function // Get ideal-register return type
uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg(); // Get machine return register
uint sop = C->start()->Opcode();
OptoRegPair regs = return_value(ireg);
// And mask for same
_return_value_mask = RegMask(regs.first()); if( OptoReg::is_valid(regs.second()) )
_return_value_mask.Insert(regs.second());
}
// --------------- // Frame Layout
// Need the method signature to determine the incoming argument types, // because the types determine which registers the incoming arguments are // in, and this affects the matched code. const TypeTuple *domain = C->tf()->domain();
uint argcnt = domain->cnt() - TypeFunc::Parms;
BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
_parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
_calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
uint i; for( i = 0; i<argcnt; i++ ) {
sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
}
// Pass array of ideal registers and length to USER code (from the AD file) // that will convert this to an array of register numbers. const StartNode *start = C->start();
start->calling_convention( sig_bt, vm_parm_regs, argcnt ); #ifdef ASSERT // Sanity check users' calling convention. Real handy while trying to // get the initial port correct.
{ for (uint i = 0; i<argcnt; i++) { if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
_parm_regs[i].set_bad(); continue;
}
VMReg parm_reg = vm_parm_regs[i].first();
assert(parm_reg->is_valid(), "invalid arg?"); if (parm_reg->is_reg()) {
OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
assert(can_be_java_arg(opto_parm_reg) ||
C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
opto_parm_reg == inline_cache_reg(), "parameters in register must be preserved by runtime stubs");
} for (uint j = 0; j < i; j++) {
assert(parm_reg != vm_parm_regs[j].first(), "calling conv. must produce distinct regs");
}
}
} #endif
// Do some initial frame layout.
// Compute the old incoming SP (may be called FP) as // OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
_old_SP = C->compute_old_SP();
assert( is_even(_old_SP), "must be even" );
// Compute highest incoming stack argument as // _old_SP + out_preserve_stack_slots + incoming argument size.
_in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
assert( is_even(_in_arg_limit), "out_preserve must be even" ); for( i = 0; i < argcnt; i++ ) { // Permit args to have no register
_calling_convention_mask[i].Clear(); if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
_parm_regs[i].set_bad(); continue;
} // calling_convention returns stack arguments as a count of // slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to // the allocators point of view, taking into account all the // preserve area, locks & pad2.
// Saved biased stack-slot register number
_parm_regs[i].set_pair(reg2, reg1);
}
// Finally, make sure the incoming arguments take up an even number of // words, in case the arguments or locals need to contain doubleword stack // slots. The rest of the system assumes that stack slot pairs (in // particular, in the spill area) which look aligned will in fact be // aligned relative to the stack pointer in the target machine. Double // stack slots will always be allocated aligned.
_new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
// Compute highest outgoing stack argument as // _new_SP + out_preserve_stack_slots + max(outgoing argument size).
_out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
assert( is_even(_out_arg_limit), "out_preserve must be even" );
if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) { // the compiler cannot represent this method's calling sequence
C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
}
if (C->failing()) return; // bailed out on incoming arg failure
// --------------- // Collect roots of matcher trees. Every node for which // _shared[_idx] is cleared is guaranteed to not be shared, and thus // can be a valid interior of some tree.
find_shared( C->root() );
find_shared( C->top() );
C->print_method(PHASE_BEFORE_MATCHING, 1);
// Create new ideal node ConP #NULL even if it does exist in old space // to avoid false sharing if the corresponding mach node is not used. // The corresponding mach node is only used in rare cases for derived // pointers.
Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
// Swap out to old-space; emptying new-space
Arena *old = C->node_arena()->move_contents(C->old_arena());
// Save debug and profile information for nodes in old space:
_old_node_note_array = C->node_note_array(); if (_old_node_note_array != NULL) {
C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
(C->comp_arena(), _old_node_note_array->length(),
0, NULL));
}
// Pre-size the new_node table to avoid the need for range checks.
grow_new_node_array(C->unique());
// Reset node counter so MachNodes start with _idx at 0 int live_nodes = C->live_nodes();
C->set_unique(0);
C->reset_dead_node_list();
// Recursively match trees from old space into new space. // Correct leaves of new-space Nodes; they point to old-space.
_visited.clear();
C->set_cached_top_node(xform( C->top(), live_nodes )); if (!C->failing()) {
Node* xroot = xform( C->root(), 1 ); if (xroot == NULL) {
Matcher::soft_match_failure(); // recursive matching process failed
C->record_method_not_compilable("instruction match failed");
} else { // During matching shared constants were attached to C->root() // because xroot wasn't available yet, so transfer the uses to // the xroot. for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
Node* n = C->root()->fast_out(j); if (C->node_arena()->contains(n)) {
assert(n->in(0) == C->root(), "should be control user");
n->set_req(0, xroot);
--j;
--jmax;
}
}
// Generate new mach node for ConP #NULL
assert(new_ideal_null != NULL, "sanity");
_mach_null = match_tree(new_ideal_null); // Don't set control, it will confuse GCM since there are no uses. // The control will be set when this node is used first time // in find_base_for_derived().
assert(_mach_null != NULL, "");
// Now smoke old-space
NOT_DEBUG( old->destruct_contents() );
// ------------------------ // Set up save-on-entry registers.
Fixup_Save_On_Entry( );
{ // Cleanup mach IR after selection phase is over.
Compile::TracePhase tp("postselect_cleanup", &timers[_t_postselect_cleanup]);
do_postselect_cleanup(); if (C->failing()) return;
assert(verify_after_postselect_cleanup(), "");
}
}
//------------------------------Fixup_Save_On_Entry---------------------------- // The stated purpose of this routine is to take care of save-on-entry // registers. However, the overall goal of the Match phase is to convert into // machine-specific instructions which have RegMasks to guide allocation. // So what this procedure really does is put a valid RegMask on each input // to the machine-specific variations of all Return, TailCall and Halt // instructions. It also adds edgs to define the save-on-entry values (and of // course gives them a mask).
constint Matcher::scalable_predicate_reg_slots() {
assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(), "scalable predicate vector should be supported"); int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte; // We assume each predicate register is one-eighth of the size of // scalable vector register, one mask bit per vector byte. int predicate_reg_bit_size = vector_reg_bit_size >> 3; // Compute number of slots which is required when scalable predicate // register is spilled. E.g. if scalable vector register is 640 bits, // predicate register is 80 bits, which is 2.5 * slots. // We will round up the slot number to power of 2, which is required // by find_first_set(). int slots = predicate_reg_bit_size & (BitsPerInt - 1)
? (predicate_reg_bit_size >> LogBitsPerInt) + 1
: predicate_reg_bit_size >> LogBitsPerInt; return round_up_power_of_2(slots);
}
#define NOF_STACK_MASKS (3*13)
// Create the initial stack mask used by values spilling to the stack. // Disallow any debug info in outgoing argument areas by setting the // initial mask accordingly. void Matcher::init_first_stack_mask() {
// Allocate storage for spill masks as masks for the appropriate load type.
RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS);
// Initialize empty placeholder masks into the newly allocated arena for (int i = 0; i < NOF_STACK_MASKS; i++) { new (rms + i) RegMask();
}
// At first, start with the empty mask
C->FIRST_STACK_mask().Clear();
// Add in the incoming argument area
OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots()); for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
C->FIRST_STACK_mask().Insert(i);
} // Add in all bits past the outgoing argument area
guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)), "must be able to represent all call arguments in reg mask");
OptoReg::Name init = _out_arg_limit; for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
C->FIRST_STACK_mask().Insert(i);
} // Finally, set the "infinite stack" bit.
C->FIRST_STACK_mask().set_AllStack();
// Make spill masks. Registers for their class, plus FIRST_STACK_mask.
RegMask aligned_stack_mask = C->FIRST_STACK_mask(); // Keep spill masks aligned.
aligned_stack_mask.clear_to_pairs();
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
RegMask scalable_stack_mask = aligned_stack_mask;
if (Matcher::vector_size_supported(T_FLOAT,2)) { // For VecD we need dual alignment and 8 bytes (2 slots) for spills. // RA guarantees such alignment since it is needed for Double and Long values.
*idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
} else {
*idealreg2spillmask[Op_VecD] = RegMask::Empty;
}
if (Matcher::vector_size_supported(T_FLOAT,4)) { // For VecX we need quadro alignment and 16 bytes (4 slots) for spills. // // RA can use input arguments stack slots for spills but until RA // we don't know frame size and offset of input arg stack slots. // // Exclude last input arg stack slots to avoid spilling vectors there // otherwise vector spills could stomp over stack slots in caller frame.
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
aligned_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
} else {
*idealreg2spillmask[Op_VecX] = RegMask::Empty;
}
if (Matcher::vector_size_supported(T_FLOAT,8)) { // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
aligned_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
} else {
*idealreg2spillmask[Op_VecY] = RegMask::Empty;
}
if (Matcher::vector_size_supported(T_FLOAT,16)) { // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
aligned_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
} else {
*idealreg2spillmask[Op_VecZ] = RegMask::Empty;
}
if (Matcher::supports_scalable_vector()) { int k = 1;
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); if (Matcher::has_predicated_vectors()) { // Exclude last input arg stack slots to avoid spilling vector register there, // otherwise RegVectMask spills could stomp over stack slots in caller frame. for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
scalable_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
// For RegVectMask
scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
}
// Exclude last input arg stack slots to avoid spilling vector register there, // otherwise vector spills could stomp over stack slots in caller frame. for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
scalable_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
// For VecA
scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
*idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
} else {
*idealreg2spillmask[Op_VecA] = RegMask::Empty;
}
if (UseFPUForSpilling) { // This mask logic assumes that the spill operations are // symmetric and that the registers involved are the same size. // On sparc for instance we may have to use 64 bit moves will // kill 2 registers when used with F0-F31.
idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]); #ifdef _LP64
idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]); #else
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]); #ifdef ARM // ARM has support for moving 64bit values between a pair of // integer registers and a double register
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]); #endif #endif
}
// Make up debug masks. Any spill slot plus callee-save (SOE) registers. // Caller-save (SOC, AS) registers are assumed to be trashable by the various // inline-cache fixup routines.
*idealreg2debugmask [Op_RegN] = *idealreg2spillmask[Op_RegN];
*idealreg2debugmask [Op_RegI] = *idealreg2spillmask[Op_RegI];
*idealreg2debugmask [Op_RegL] = *idealreg2spillmask[Op_RegL];
*idealreg2debugmask [Op_RegF] = *idealreg2spillmask[Op_RegF];
*idealreg2debugmask [Op_RegD] = *idealreg2spillmask[Op_RegD];
*idealreg2debugmask [Op_RegP] = *idealreg2spillmask[Op_RegP];
*idealreg2debugmask [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
Node *root = C->root(); // Short name for root // Count number of save-on-entry registers.
uint soe_cnt = number_of_saved_registers();
uint i;
// Find the procedure Start Node
StartNode *start = C->start();
assert( start, "Expect a start node" );
// Input RegMask array shared by all Returns. // The type for doubles and longs has a count of 2, but // there is only 1 returned value
uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); // Returns have 0 or 1 returned values depending on call signature. // Return register is specified by return_value in the AD file. if (ret_edge_cnt > TypeFunc::Parms)
ret_rms[TypeFunc::Parms+0] = _return_value_mask;
// Input RegMask array shared by all Rethrows.
uint reth_edge_cnt = TypeFunc::Parms+1;
RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); // Rethrow takes exception oop only, but in the argument 0 slot.
OptoReg::Name reg = find_receiver(); if (reg >= 0) {
reth_rms[TypeFunc::Parms] = mreg2regmask[reg]; #ifdef _LP64 // Need two slots for ptrs in 64-bit land
reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1)); #endif
}
// TailCalls have 2 returned values (target & moop), whose masks come // from the usual MachNode/MachOper mechanism. Find a sample // TailCall to extract these masks and put the correct masks into // the tail_call_rms array. for( i=1; i < root->req(); i++ ) {
MachReturnNode *m = root->in(i)->as_MachReturn(); if( m->ideal_Opcode() == Op_TailCall ) {
tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1); break;
}
}
// TailJumps have 2 returned values (target & ex_oop), whose masks come // from the usual MachNode/MachOper mechanism. Find a sample // TailJump to extract these masks and put the correct masks into // the tail_jump_rms array. for( i=1; i < root->req(); i++ ) {
MachReturnNode *m = root->in(i)->as_MachReturn(); if( m->ideal_Opcode() == Op_TailJump ) {
tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1); break;
}
}
// Capture the return input masks into each exit flavor for( i=1; i < root->req(); i++ ) {
MachReturnNode *exit = root->in(i)->as_MachReturn(); switch( exit->ideal_Opcode() ) { case Op_Return : exit->_in_rms = ret_rms; break; case Op_Rethrow : exit->_in_rms = reth_rms; break; case Op_TailCall : exit->_in_rms = tail_call_rms; break; case Op_TailJump : exit->_in_rms = tail_jump_rms; break; case Op_Halt : exit->_in_rms = halt_rms; break; default : ShouldNotReachHere();
}
}
// Next unused projection number from Start. int proj_cnt = C->tf()->domain()->cnt();
// Do all the save-on-entry registers. Make projections from Start for // them, and give them a use at the exit points. To the allocator, they // look like incoming register arguments. for( i = 0; i < _last_Mach_Reg; i++ ) { if( is_save_on_entry(i) ) {
// Add the save-on-entry to the mask array
ret_rms [ ret_edge_cnt] = mreg2regmask[i];
reth_rms [ reth_edge_cnt] = mreg2regmask[i];
tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i]; // Halts need the SOE registers, but only in the stack as debug info. // A just-prior uncommon-trap or deoptimization will use the SOE regs.
halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
Node *mproj;
// Is this a RegF low half of a RegD? Double up 2 adjacent RegF's // into a single RegD. if( (i&1) == 0 &&
_register_save_type[i ] == Op_RegF &&
_register_save_type[i+1] == Op_RegF &&
is_save_on_entry(i+1) ) { // Add other bit for double
ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
proj_cnt += 2; // Skip 2 for doubles
} elseif( (i&1) == 1 && // Else check for high half of double
_register_save_type[i-1] == Op_RegF &&
_register_save_type[i ] == Op_RegF &&
is_save_on_entry(i-1) ) {
ret_rms [ ret_edge_cnt] = RegMask::Empty;
reth_rms [ reth_edge_cnt] = RegMask::Empty;
tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
halt_rms [ halt_edge_cnt] = RegMask::Empty;
mproj = C->top();
} // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's // into a single RegL. elseif( (i&1) == 0 &&
_register_save_type[i ] == Op_RegI &&
_register_save_type[i+1] == Op_RegI &&
is_save_on_entry(i+1) ) { // Add other bit for long
ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
proj_cnt += 2; // Skip 2 for longs
} elseif( (i&1) == 1 && // Else check for high half of long
_register_save_type[i-1] == Op_RegI &&
_register_save_type[i ] == Op_RegI &&
is_save_on_entry(i-1) ) {
ret_rms [ ret_edge_cnt] = RegMask::Empty;
reth_rms [ reth_edge_cnt] = RegMask::Empty;
tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
halt_rms [ halt_edge_cnt] = RegMask::Empty;
mproj = C->top();
} else { // Make a projection for it off the Start
mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
}
// Add a use of the SOE register to all exit paths for( uint j=1; j < root->req(); j++ )
root->in(j)->add_req(mproj);
} // End of if a save-on-entry register
} // End of for all machine registers
}
//------------------------------init_spill_mask-------------------------------- void Matcher::init_spill_mask( Node *ret ) { if( idealreg2regmask[Op_RegI] ) return; // One time only init
OptoReg::c_frame_pointer = c_frame_pointer();
c_frame_ptr_mask = c_frame_pointer(); #ifdef _LP64 // pointers are twice as big
c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1)); #endif
// Start at OptoReg::stack0()
STACK_ONLY_mask.Clear();
OptoReg::Name init = OptoReg::stack2reg(0); // STACK_ONLY_mask is all stack bits
OptoReg::Name i; for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
STACK_ONLY_mask.Insert(i); // Also set the "infinite stack" bit.
STACK_ONLY_mask.set_AllStack();
for (i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i, 1)) { // Copy the register names over into the shared world. // SharedInfo::regName[i] = regName[i]; // Handy RegMasks per machine register
mreg2regmask[i].Insert(i);
// Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks. if (_register_save_policy[i] == 'C' ||
_register_save_policy[i] == 'A') {
caller_save_regmask.Insert(i);
mh_caller_save_regmask.Insert(i);
} // Exclude save-on-entry registers from debug masks for stub compilations. if (_register_save_policy[i] == 'C' ||
_register_save_policy[i] == 'A' ||
_register_save_policy[i] == 'E') {
caller_save_regmask_exclude_soe.Insert(i);
mh_caller_save_regmask_exclude_soe.Insert(i);
}
}
// Also exclude the register we use to save the SP for MethodHandle // invokes to from the corresponding MH debug masks const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
mh_caller_save_regmask.OR(sp_save_mask);
mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
// Grab the Frame Pointer
Node *fp = ret->in(TypeFunc::FramePtr); // Share frame pointer while making spill ops
set_shared(fp);
// Get the ADLC notion of the right regmask, for each basic type. #ifdef _LP64
idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret); #endif
idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
idealreg2regmask[Op_RegVectMask] = regmask_for_ideal_register(Op_RegVectMask, ret);
}
#ifdef ASSERT staticvoid match_alias_type(Compile* C, Node* n, Node* m) { if (!VerifyAliases) return; // do not go looking for trouble by default const TypePtr* nat = n->adr_type(); const TypePtr* mat = m->adr_type(); int nidx = C->get_alias_index(nat); int midx = C->get_alias_index(mat); // Detune the assert for cases like (AndI 0xFF (LoadB p)). if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) { for (uint i = 1; i < n->req(); i++) {
Node* n1 = n->in(i); const TypePtr* n1at = n1->adr_type(); if (n1at != NULL) {
nat = n1at;
nidx = C->get_alias_index(n1at);
}
}
} // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases: if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) { switch (n->Opcode()) { case Op_PrefetchAllocation:
nidx = Compile::AliasIdxRaw;
nat = TypeRawPtr::BOTTOM; break;
}
} if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) { switch (n->Opcode()) { case Op_ClearArray:
midx = Compile::AliasIdxRaw;
mat = TypeRawPtr::BOTTOM; break;
}
} if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) { switch (n->Opcode()) { case Op_Return: case Op_Rethrow: case Op_Halt: case Op_TailCall: case Op_TailJump:
nidx = Compile::AliasIdxBot;
nat = TypePtr::BOTTOM; break;
}
} if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) { switch (n->Opcode()) { case Op_StrComp: case Op_StrEquals: case Op_StrIndexOf: case Op_StrIndexOfChar: case Op_AryEq: case Op_CountPositives: case Op_MemBarVolatile: case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type? case Op_StrInflatedCopy: case Op_StrCompressedCopy: case Op_OnSpinWait: case Op_EncodeISOArray:
nidx = Compile::AliasIdxTop;
nat = NULL; break;
}
} if (nidx != midx) { if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
n->dump();
m->dump();
}
assert(C->subsume_loads() && C->must_alias(nat, midx), "must not lose alias info when matching");
}
} #endif
//------------------------------xform------------------------------------------ // Given a Node in old-space, Match him (Label/Reduce) to produce a machine // Node in new-space. Given a new-space Node, recursively walk his children.
Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
Node *Matcher::xform( Node *n, int max_stack ) { // Use one stack to keep both: child's node/state and parent's node/index
MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root while (mstack.is_nonempty()) {
C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions"); if (C->failing()) return NULL;
n = mstack.node(); // Leave node on stack
Node_State nstate = mstack.state(); if (nstate == Visit) {
mstack.set_state(Post_Visit);
Node *oldn = n; // Old-space or new-space check if (!C->node_arena()->contains(n)) { // Old space!
Node* m; if (has_new_node(n)) { // Not yet Label/Reduced
m = new_node(n);
} else { if (!is_dontcare(n)) { // Matcher can match this guy // Calls match special. They match alone with no children. // Their children, the incoming arguments, match normally.
m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n); if (C->failing()) return NULL; if (m == NULL) { Matcher::soft_match_failure(); return NULL; } if (n->is_MemBar()) {
m->as_MachMemBar()->set_adr_type(n->adr_type());
}
} else { // Nothing the matcher cares about if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) { // Projections? // Convert to machine-dependent projection
m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
NOT_PRODUCT(record_new2old(m, n);) if (m->in(0) != NULL) // m might be top
collect_null_checks(m, n);
} else { // Else just a regular 'ol guy
m = n->clone(); // So just clone into new-space
NOT_PRODUCT(record_new2old(m, n);) // Def-Use edges will be added incrementally as Uses // of this node are matched.
assert(m->outcnt() == 0, "no Uses of this clone yet");
}
}
set_new_node(n, m); // Map old to new if (_old_node_note_array != NULL) {
Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
n->_idx);
C->set_node_notes_at(m->_idx, nn);
}
debug_only(match_alias_type(C, n, m));
}
n = m; // n is now a new-space node
mstack.set_node(n);
}
// New space! if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
int i; // Put precedence edges on stack first (match them last). for (i = oldn->req(); (uint)i < oldn->len(); i++) {
Node *m = oldn->in(i); if (m == NULL) break; // set -1 to call add_prec() instead of set_req() during Step1
mstack.push(m, Visit, n, -1);
}
// Handle precedence edges for interior nodes for (i = n->len()-1; (uint)i >= n->req(); i--) {
Node *m = n->in(i); if (m == NULL || C->node_arena()->contains(m)) continue;
n->rm_prec(i); // set -1 to call add_prec() instead of set_req() during Step1
mstack.push(m, Visit, n, -1);
}
// For constant debug info, I'd rather have unmatched constants. int cnt = n->req();
JVMState* jvms = n->jvms(); int debug_cnt = jvms ? jvms->debug_start() : cnt;
// Now do only debug info. Clone constants rather than matching. // Constants are represented directly in the debug info without // the need for executable machine instructions. // Monitor boxes are also represented directly. for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
Node *m = n->in(i); // Get input int op = m->Opcode();
assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites"); if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
op == Op_ConF || op == Op_ConD || op == Op_ConL // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
) {
m = m->clone();
NOT_PRODUCT(record_new2old(m, n));
mstack.push(m, Post_Visit, n, i); // Don't need to visit
mstack.push(m->in(0), Visit, m, 0);
} else {
mstack.push(m, Visit, n, i);
}
}
// And now walk his children, and convert his inputs to new-space. for( ; i >= 0; --i ) { // For all normal inputs do
Node *m = n->in(i); // Get input if(m != NULL)
mstack.push(m, Visit, n, i);
}
} elseif (nstate == Post_Visit) { // Set xformed input
Node *p = mstack.parent(); if (p != NULL) { // root doesn't have parent int i = (int)mstack.index(); if (i >= 0)
p->set_req(i, n); // required input elseif (i == -1)
p->add_prec(n); // precedence input else
ShouldNotReachHere();
}
mstack.pop(); // remove processed node from stack
} else {
ShouldNotReachHere();
}
} // while (mstack.is_nonempty()) return n; // Return new-space Node
}
//------------------------------warp_outgoing_stk_arg------------------------
OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) { // Convert outgoing argument location to a pre-biased stack offset if (reg->is_stack()) {
OptoReg::Name warped = reg->reg2stack(); // Adjust the stack slot offset to be the register number used // by the allocator.
warped = OptoReg::add(begin_out_arg_area, warped); // Keep track of the largest numbered stack slot used for an arg. // Largest used slot per call-site indicates the amount of stack // that is killed by the call. if( warped >= out_arg_limit_per_call )
out_arg_limit_per_call = OptoReg::add(warped,1); if (!RegMask::can_represent_arg(warped)) {
C->record_method_not_compilable("unsupported calling sequence"); return OptoReg::Bad;
} return warped;
} return OptoReg::as_OptoReg(reg);
}
//------------------------------match_sfpt------------------------------------- // Helper function to match call instructions. Calls match special. // They match alone with no children. Their children, the incoming // arguments, match normally.
MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
MachSafePointNode *msfpt = NULL;
MachCallNode *mcall = NULL;
uint cnt; // Split out case for SafePoint vs Call
CallNode *call; const TypeTuple *domain;
ciMethod* method = NULL; bool is_method_handle_invoke = false; // for special kill effects if( sfpt->is_Call() ) {
call = sfpt->as_Call();
domain = call->tf()->domain();
cnt = domain->cnt();
// Match just the call, nothing else
MachNode *m = match_tree(call); if (C->failing()) return NULL; if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
// Copy data from the Ideal SafePoint to the machine version
mcall = m->as_MachCall();
// Advertise the correct memory effects (for anti-dependence computation).
msfpt->set_adr_type(sfpt->adr_type());
// Allocate a private array of RegMasks. These RegMasks are not shared.
msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt ); // Empty them all. for (uint i = 0; i < cnt; i++) ::new (&(msfpt->_in_rms[i])) RegMask();
// Do all the pre-defined non-Empty register masks
msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
// Place first outgoing argument can possibly be put.
OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
assert( is_even(begin_out_arg_area), "" ); // Compute max outgoing register number per call site.
OptoReg::Name out_arg_limit_per_call = begin_out_arg_area; // Calls to C may hammer extra stack slots above and beyond any arguments. // These are usually backing store for register arguments for varargs. if( call != NULL && call->is_CallRuntime() )
out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
// Do the normal argument list (parameters) register masks int argcnt = cnt - TypeFunc::Parms; if( argcnt > 0 ) { // Skip it all if we have no args
BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt ); int i; for( i = 0; i < argcnt; i++ ) {
sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
} // V-call to pick proper calling convention
call->calling_convention( sig_bt, parm_regs, argcnt );
#ifdef ASSERT // Sanity check users' calling convention. Really handy during // the initial porting effort. Fairly expensive otherwise.
{ for (int i = 0; i<argcnt; i++) { if( !parm_regs[i].first()->is_valid() &&
!parm_regs[i].second()->is_valid() ) continue;
VMReg reg1 = parm_regs[i].first();
VMReg reg2 = parm_regs[i].second(); for (int j = 0; j < i; j++) { if( !parm_regs[j].first()->is_valid() &&
!parm_regs[j].second()->is_valid() ) continue;
VMReg reg3 = parm_regs[j].first();
VMReg reg4 = parm_regs[j].second(); if( !reg1->is_valid() ) {
assert( !reg2->is_valid(), "valid halvsies" );
} elseif( !reg3->is_valid() ) {
assert( !reg4->is_valid(), "valid halvsies" );
} else {
assert( reg1 != reg2, "calling conv. must produce distinct regs");
assert( reg1 != reg3, "calling conv. must produce distinct regs");
assert( reg1 != reg4, "calling conv. must produce distinct regs");
assert( reg2 != reg3, "calling conv. must produce distinct regs");
assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
assert( reg3 != reg4, "calling conv. must produce distinct regs");
}
}
}
} #endif
// Visit each argument. Compute its outgoing register mask. // Return results now can have 2 bits returned. // Compute max over all outgoing arguments both per call-site // and over the entire method. for( i = 0; i < argcnt; i++ ) { // Address of incoming argument mask to fill in
RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
VMReg first = parm_regs[i].first();
VMReg second = parm_regs[i].second(); if(!first->is_valid() &&
!second->is_valid()) { continue; // Avoid Halves
} // Handle case where arguments are in vector registers. if(call->in(TypeFunc::Parms + i)->bottom_type()->isa_vect()) {
OptoReg::Name reg_fst = OptoReg::as_OptoReg(first);
OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd); for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
rm->Insert(r);
}
} // Grab first register, adjust stack slots and insert in mask.
OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call ); if (OptoReg::is_valid(reg1))
rm->Insert( reg1 ); // Grab second register (if any), adjust stack slots and insert in mask.
OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call ); if (OptoReg::is_valid(reg2))
rm->Insert( reg2 );
} // End of for all arguments
}
// Compute the max stack slot killed by any call. These will not be // available for debug info, and will be used to adjust FIRST_STACK_mask // after all call sites have been visited. if( _out_arg_limit < out_arg_limit_per_call)
_out_arg_limit = out_arg_limit_per_call;
if (mcall) { // Kill the outgoing argument area, including any non-argument holes and // any legacy C-killed slots. Use Fat-Projections to do the killing. // Since the max-per-method covers the max-per-call-site and debug info // is excluded on the max-per-method basis, debug info cannot land in // this killed area.
uint r_cnt = mcall->tf()->range()->cnt();
MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj ); if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
C->record_method_not_compilable("unsupported outgoing calling sequence");
} else { for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
proj->_rout.Insert(OptoReg::Name(i));
} if (proj->_rout.is_NotEmpty()) {
push_projection(proj);
}
} // Transfer the safepoint information from the call to the mcall // Move the JVMState list
msfpt->set_jvms(sfpt->jvms()); for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
jvms->set_map(sfpt);
}
// Debug inputs begin just after the last incoming parameter
assert((mcall == NULL) || (mcall->jvms() == NULL) ||
(mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
// Add additional edges. if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) { // For these calls we can not add MachConstantBase in expand(), as the // ins are not complete then.
msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node()); if (msfpt->jvms() &&
msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) { // We added an edge before jvms, so we must adapt the position of the ins.
msfpt->jvms()->adapt_position(+1);
}
}
// Registers killed by the call are set in the local scheduling pass // of Global Code Motion. return msfpt;
}
//---------------------------match_tree---------------------------------------- // Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part // of the whole-sale conversion from Ideal to Mach Nodes. Also used for // making GotoNodes while building the CFG and in init_spill_mask() to identify // a Load's result RegMask for memoization in idealreg2regmask[]
MachNode *Matcher::match_tree( const Node *n ) {
assert( n->Opcode() != Op_Phi, "cannot match" );
assert( !n->is_block_start(), "cannot match" ); // Set the mark for all locally allocated State objects. // When this call returns, the _states_arena arena will be reset // freeing all State objects.
ResourceMark rm( &_states_arena );
LabelRootDepth = 0;
// StoreNodes require their Memory input to match any LoadNodes
Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ; #ifdef ASSERT
Node* save_mem_node = _mem_node;
_mem_node = n->is_Store() ? (Node*)n : NULL; #endif // State object for root node of match tree // Allocate it on _states_arena - stack allocation can cause stack overflow.
State *s = new (&_states_arena) State;
s->_kids[0] = NULL;
s->_kids[1] = NULL;
s->_leaf = (Node*)n; // Label the input tree, allocating labels from top-level arena
Node* root_mem = mem;
Label_Root(n, s, n->in(0), root_mem); if (C->failing()) return NULL;
// The minimum cost match for the whole tree is found at the root State
uint mincost = max_juint;
uint cost = max_juint;
uint i; for (i = 0; i < NUM_OPERANDS; i++) { if (s->valid(i) && // valid entry and
s->cost(i) < cost && // low cost and
s->rule(i) >= NUM_OPERANDS) {// not an operand
mincost = i;
cost = s->cost(i);
}
} if (mincost == max_juint) { #ifndef PRODUCT
tty->print("No matching rule for:");
s->dump(); #endif
Matcher::soft_match_failure(); return NULL;
} // Reduce input tree based upon the state labels to machine Nodes
MachNode *m = ReduceInst(s, s->rule(mincost), mem); // New-to-old mapping is done in ReduceInst, to cover complex instructions.
NOT_PRODUCT(_old2new_map.map(n->_idx, m);)
// Add any Matcher-ignored edges
uint cnt = n->req();
uint start = 1; if( mem != (Node*)1 ) start = MemNode::Memory+1; if( n->is_AddP() ) {
assert( mem == (Node*)1, "" );
start = AddPNode::Base+1;
} for( i = start; i < cnt; i++ ) { if( !n->match_edge(i) ) { if( i < m->req() )
m->ins_req( i, n->in(i) ); else
m->add_req( n->in(i) );
}
}
debug_only( _mem_node = save_mem_node; ) return m;
}
//------------------------------match_into_reg--------------------------------- // Choose to either match this Node in a register or part of the current // match tree. Return true for requiring a register and false for matching // as part of the current match tree. staticbool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
const Type *t = m->bottom_type();
if (t->singleton()) { // Never force constants into registers. Allow them to match as // constants or registers. Copies of the same value will share // the same register. See find_shared_node. returnfalse;
} else { // Not a constant // Stop recursion if they have different Controls.
Node* m_control = m->in(0); // Control of load's memory can post-dominates load's control. // So use it since load can't float above its memory.
Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL; if (control && m_control && control != m_control && control != mem_control) {
// Actually, we can live with the most conservative control we // find, if it post-dominates the others. This allows us to // pick up load/op/store trees where the load can float a little // above the store.
Node *x = control; const uint max_scan = 6; // Arbitrary scan cutoff
uint j; for (j=0; j<max_scan; j++) { if (x->is_Region()) // Bail out at merge points returntrue;
x = x->in(0); if (x == m_control) // Does 'control' post-dominate break; // m->in(0)? If so, we can use it if (x == mem_control) // Does 'control' post-dominate
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.47 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.