/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
class AbstractLockNode; class AddNode; class AddPNode; class AliasInfo; class AllocateArrayNode; class AllocateNode; class ArrayCopyNode; class BaseCountedLoopNode; class BaseCountedLoopEndNode; class BlackholeNode; class Block; class BoolNode; class BoxLockNode; class CMoveNode; class CallDynamicJavaNode; class CallJavaNode; class CallLeafNode; class CallLeafNoFPNode; class CallNode; class CallRuntimeNode; class CallStaticJavaNode; class CastFFNode; class CastDDNode; class CastVVNode; class CastIINode; class CastLLNode; class CatchNode; class CatchProjNode; class CheckCastPPNode; class ClearArrayNode; class CmpNode; class CodeBuffer; class ConstraintCastNode; class ConNode; class CompareAndSwapNode; class CompareAndExchangeNode; class CountedLoopNode; class CountedLoopEndNode; class DecodeNarrowPtrNode; class DecodeNNode; class DecodeNKlassNode; class EncodeNarrowPtrNode; class EncodePNode; class EncodePKlassNode; class FastLockNode; class FastUnlockNode; class HaltNode; class IfNode; class IfProjNode; class IfFalseNode; class IfTrueNode; class InitializeNode; class JVMState; class JumpNode; class JumpProjNode; class LoadNode; class LoadStoreNode; class LoadStoreConditionalNode; class LockNode; class LongCountedLoopNode; class LongCountedLoopEndNode; class LoopNode; class LShiftNode; class MachBranchNode; class MachCallDynamicJavaNode; class MachCallJavaNode; class MachCallLeafNode; class MachCallNode; class MachCallRuntimeNode; class MachCallStaticJavaNode; class MachConstantBaseNode; class MachConstantNode; class MachGotoNode; class MachIfNode; class MachJumpNode; class MachNode; class MachNullCheckNode; class MachProjNode; class MachReturnNode; class MachSafePointNode; class MachSpillCopyNode; class MachTempNode; class MachMergeNode; class MachMemBarNode; class Matcher; class MemBarNode; class MemBarStoreStoreNode; class MemNode; class MergeMemNode; class MoveNode; class MulNode; class MultiNode; class MultiBranchNode; class NeverBranchNode; class Opaque1Node; class OuterStripMinedLoopNode; class OuterStripMinedLoopEndNode; class Node; class Node_Array; class Node_List; class Node_Stack; class OopMap; class ParmNode; class PCTableNode; class PhaseCCP; class PhaseGVN; class PhaseIterGVN; class PhaseRegAlloc; class PhaseTransform; class PhaseValues; class PhiNode; class Pipeline; class PopulateIndexNode; class ProjNode; class RangeCheckNode; class RegMask; class RegionNode; class RootNode; class SafePointNode; class SafePointScalarObjectNode; class StartNode; class State; class StoreNode; class SubNode; class SubTypeCheckNode; class Type; class TypeNode; class UnlockNode; class VectorNode; class LoadVectorNode; class LoadVectorMaskedNode; class StoreVectorMaskedNode; class LoadVectorGatherNode; class StoreVectorNode; class StoreVectorScatterNode; class VectorMaskCmpNode; class VectorUnboxNode; class VectorSet; class VectorReinterpretNode; class ShiftVNode; class ExpandVNode; class CompressVNode; class CompressMNode;
#if OPTO_DU_ITERATOR_ASSERT class DUIterator; class DUIterator_Fast; class DUIterator_Last; #else typedef uint DUIterator; typedef Node** DUIterator_Fast; typedef Node** DUIterator_Last; #endif
// Node Sentinel #define NodeSentinel (Node*)-1
// Unknown count frequency #define COUNT_UNKNOWN (-1.0f)
//------------------------------Node------------------------------------------- // Nodes define actions in the program. They create values, which have types. // They are both vertices in a directed graph and program primitives. Nodes // are labeled; the label is the "opcode", the primitive function in the lambda // calculus sense that gives meaning to the Node. Node inputs are ordered (so // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to // the Node's function. These inputs also define a Type equation for the Node. // Solving these Type equations amounts to doing dataflow analysis. // Control and data are uniformly represented in the graph. Finally, Nodes // have a unique dense integer index which is used to index into side arrays // whenever I have phase-specific information.
class Node { friendclass VMStructs;
// Lots of restrictions on cloning Nodes
NONCOPYABLE(Node);
// Because Nodes come and go, I define an Arena of Node structures to pull // from. This should allow fast access to node creation & deletion. This // field is a local cache of a value defined in some "program fragment" for // which these Nodes are just a part of.
inlinevoid* operatornew(size_t x) throw() {
Compile* C = Compile::current();
Node* n = (Node*)C->node_arena()->AmallocWords(x); return (void*)n;
}
// Delete is a NOP voidoperatordelete( void *ptr ) {} // Fancy destructor; eagerly attempt to reclaim Node numberings and storage void destruct(PhaseValues* phase);
// Create a new Node. Required is the number is of inputs required for // semantic correctness.
Node( uint required );
// Create a new Node with given input edges. // This version requires use of the "edge-count" new. // E.g. new (C,3) FooNode( C, NULL, left, right );
Node( Node *n0 );
Node( Node *n0, Node *n1 );
Node( Node *n0, Node *n1, Node *n2 );
Node( Node *n0, Node *n1, Node *n2, Node *n3 );
Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
Node( Node *n0, Node *n1, Node *n2, Node *n3,
Node *n4, Node *n5, Node *n6 );
// Clone an inherited Node given only the base Node type.
Node* clone() const;
// Clone a Node, immediately supplying one or two new edges. // The first and second arguments, if non-null, replace in(1) and in(2), // respectively.
Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
Node* nn = clone(); if (in1 != NULL) nn->set_req(1, in1); if (in2 != NULL) nn->set_req(2, in2); return nn;
}
private: // Shared setup for the above constructors. // Handles all interactions with Compile::current. // Puts initial values in all Node fields except _idx. // Returns the initial value for _idx, which cannot // be initialized by assignment. inlineint Init(int req);
//----------------- input edge handling protected: friendclass PhaseCFG; // Access to address of _in array elements
Node **_in; // Array of use-def references to Nodes
Node **_out; // Array of def-use references to Nodes
// Input edges are split into two categories. Required edges are required // for semantic correctness; order is important and NULLs are allowed. // Precedence edges are used to help determine execution order and are // added, e.g., for scheduling purposes. They are unordered and not // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 // are required, from _cnt to _max-1 are precedence edges.
node_idx_t _cnt; // Total number of required Node inputs.
node_idx_t _max; // Actual length of input array.
// Output edges are an unordered list of def-use edges which exactly // correspond to required input edges which point from other nodes // to this one. Thus the count of the output edges is the number of // users of this node.
node_idx_t _outcnt; // Total number of Node outputs.
node_idx_t _outmax; // Actual length of output array.
// Grow the actual input array to the next larger power-of-2 bigger than len. void grow( uint len ); // Grow the output array to the next larger power-of-2 bigger than len. void out_grow( uint len );
public: // Each Node is assigned a unique small/dense number. This number is used // to index into auxiliary arrays of data and bit vectors. // The field _idx is declared constant to defend against inadvertent assignments, // since it is used by clients as a naked field. However, the field's value can be // changed using the set_idx() method. // // The PhaseRenumberLive phase renumbers nodes based on liveness information. // Therefore, it updates the value of the _idx field. The parse-time _idx is // preserved in _parse_idx. const node_idx_t _idx;
DEBUG_ONLY(const node_idx_t _parse_idx;) // IGV node identifier. Two nodes, possibly in different compilation phases, // have the same IGV identifier if (and only if) they are the very same node // (same memory address) or one is "derived" from the other (by e.g. // renumbering or matching). This identifier makes it possible to follow the // entire lifetime of a node in IGV even if its C2 identifier (_idx) changes.
NOT_PRODUCT(node_idx_t _igv_idx;)
// Get the (read-only) number of input edges
uint req() const { return _cnt; }
uint len() const { return _max; } // Get the (read-only) number of output edges
uint outcnt() const { return _outcnt; }
#if OPTO_DU_ITERATOR_ASSERT // Iterate over the out-edges of this node. Deletions are illegal. inline DUIterator outs() const; // Use this when the out array might have changed to suppress asserts. inline DUIterator& refresh_out_pos(DUIterator& i) const; // Does the node have an out at this position? (Used for iteration.) inlinebool has_out(DUIterator& i) const; inline Node* out(DUIterator& i) const; // Iterate over the out-edges of this node. All changes are illegal. inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; inline Node* fast_out(DUIterator_Fast& i) const; // Iterate over the out-edges of this node, deleting one at a time. inline DUIterator_Last last_outs(DUIterator_Last& min) const; inline Node* last_out(DUIterator_Last& i) const; // The inline bodies of all these methods are after the iterator definitions. #else // Iterate over the out-edges of this node. Deletions are illegal. // This iteration uses integral indexes, to decouple from array reallocations.
DUIterator outs() const { return 0; } // Use this when the out array might have changed to suppress asserts.
DUIterator refresh_out_pos(DUIterator i) const { return i; }
// Reference to the i'th output Node. Error if out of bounds.
Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } // Does the node have an out at this position? (Used for iteration.) bool has_out(DUIterator i) const { return i < _outcnt; }
// Iterate over the out-edges of this node. All changes are illegal. // This iteration uses a pointer internal to the out array.
DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
Node** out = _out; // Assign a limit pointer to the reference argument:
max = out + (ptrdiff_t)_outcnt; // Return the base pointer: return out;
}
Node* fast_out(DUIterator_Fast i) const { return *i; } // Iterate over the out-edges of this node, deleting one at a time. // This iteration uses a pointer internal to the out array.
DUIterator_Last last_outs(DUIterator_Last& min) const {
Node** out = _out; // Assign a limit pointer to the reference argument:
min = out; // Return the pointer to the start of the iteration: return out + (ptrdiff_t)_outcnt - 1;
}
Node* last_out(DUIterator_Last i) const { return *i; } #endif
// Reference to the i'th input Node. Error if out of bounds.
Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; } // Reference to the i'th input Node. NULL if out of bounds.
Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); } // Reference to the i'th output Node. Error if out of bounds. // Use this accessor sparingly. We are going trying to use iterators instead.
Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } // Return the unique out edge.
Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } // Delete out edge at position 'i' by moving last out edge to position 'i' void raw_del_out(uint i) {
assert(i < _outcnt,"oob");
assert(_outcnt > 0,"oob"); #if OPTO_DU_ITERATOR_ASSERT // Record that a change happened here.
debug_only(_last_del = _out[i]; ++_del_tick); #endif
_out[i] = _out[--_outcnt]; // Smash the old edge so it can't be used accidentally.
debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
}
// Set a required input edge, also updates corresponding output edge void add_req( Node *n ); // Append a NEW required input void add_req( Node *n0, Node *n1 ) {
add_req(n0); add_req(n1); } void add_req( Node *n0, Node *n1, Node *n2 ) {
add_req(n0); add_req(n1); add_req(n2); } void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). void del_req( uint idx ); // Delete required edge & compact void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order void ins_req( uint i, Node *n ); // Insert a NEW required input void set_req( uint i, Node *n ) {
assert( is_not_dead(n), "can not use dead node");
assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt);
assert( !VerifyHashTableKeys || _hash_lock == 0, "remove node from hash table before modifying it");
Node** p = &_in[i]; // cache this._in, across the del_out call if (*p != NULL) (*p)->del_out((Node *)this);
(*p) = n; if (n != NULL) n->add_out((Node *)this);
Compile::current()->record_modified_node(this);
} // Light version of set_req() to init inputs after node creation. void init_req( uint i, Node *n ) {
assert( i == 0 && this == n ||
is_not_dead(n), "can not use dead node");
assert( i < _cnt, "oob");
assert( !VerifyHashTableKeys || _hash_lock == 0, "remove node from hash table before modifying it");
assert( _in[i] == NULL, "sanity");
_in[i] = n; if (n != NULL) n->add_out((Node *)this);
Compile::current()->record_modified_node(this);
} // Find first occurrence of n among my edges: int find_edge(Node* n); int find_prec_edge(Node* n) { for (uint i = req(); i < len(); i++) { if (_in[i] == n) return i; if (_in[i] == NULL) {
DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); ) break;
}
} return -1;
} int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = NULL); int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn); // NULL out all inputs to eliminate incoming Def-Use edges. void disconnect_inputs(Compile* C);
// Quickly, return true if and only if I am Compile::current()->top(). bool is_top() const {
assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); return (_out == NULL);
} // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) void setup_is_top();
// Strip away casting. (It is depth-limited.)
Node* uncast(bool keep_deps = false) const; // Return whether two Nodes are equivalent, after stripping casting. bool eqv_uncast(const Node* n, bool keep_deps = false) const { return (this->uncast(keep_deps) == n->uncast(keep_deps));
}
// Find out of current node that matches opcode.
Node* find_out_with(int opcode); // Return true if the current node has an out that matches opcode. bool has_out_with(int opcode); // Return true if the current node has an out that matches any of the opcodes. bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
private: static Node* uncast_helper(const Node* n, bool keep_deps);
// Add an output edge to the end of the list void add_out( Node *n ) { if (is_top()) return; if( _outcnt == _outmax ) out_grow(_outcnt);
_out[_outcnt++] = n;
} // Delete an output edge void del_out( Node *n ) { if (is_top()) return;
Node** outp = &_out[_outcnt]; // Find and remove n do {
assert(outp > _out, "Missing Def-Use edge");
} while (*--outp != n);
*outp = _out[--_outcnt]; // Smash the old edge so it can't be used accidentally.
debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); // Record that a change happened here. #if OPTO_DU_ITERATOR_ASSERT
debug_only(_last_del = n; ++_del_tick); #endif
} // Close gap after removing edge. void close_prec_gap_at(uint gap) {
assert(_cnt <= gap && gap < _max, "no valid prec edge");
uint i = gap;
Node *last = NULL; for (; i < _max-1; ++i) {
Node *next = _in[i+1]; if (next == NULL) break;
last = next;
}
_in[gap] = last; // Move last slot to empty one.
_in[i] = NULL; // NULL out last slot.
}
public: // Globally replace this node by a given new node, updating all uses. void replace_by(Node* new_node); // Globally replace this node by a given new node, updating all uses // and cutting input edges of old node. void subsume_by(Node* new_node, Compile* c) {
replace_by(new_node);
disconnect_inputs(c);
} void set_req_X(uint i, Node *n, PhaseIterGVN *igvn); void set_req_X(uint i, Node *n, PhaseGVN *gvn); // Find the one non-null required input. RegionNode only
Node *nonnull_req() const; // Add or remove precedence edges void add_prec( Node *n ); void rm_prec( uint i );
// Note: prec(i) will not necessarily point to n if edge already exists. void set_prec( uint i, Node *n ) {
assert(i < _max, "oob: i=%d, _max=%d", i, _max);
assert(is_not_dead(n), "can not use dead node");
assert(i >= _cnt, "not a precedence edge"); // Avoid spec violation: duplicated prec edge. if (_in[i] == n) return; if (n == NULL || find_prec_edge(n) != -1) {
rm_prec(i); return;
} if (_in[i] != NULL) _in[i]->del_out((Node *)this);
_in[i] = n;
n->add_out((Node *)this);
Compile::current()->record_modified_node(this);
}
// Set this node's index, used by cisc_version to replace current node void set_idx(uint new_idx) { const node_idx_t* ref = &_idx;
*(node_idx_t*)ref = new_idx;
} // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) void swap_edges(uint i1, uint i2) {
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); // Def-Use info is unchanged
Node* n1 = in(i1);
Node* n2 = in(i2);
_in[i1] = n2;
_in[i2] = n1; // If this node is in the hash table, make sure it doesn't need a rehash.
assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
}
// Iterators over input Nodes for a Node X are written as: // for( i = 0; i < X.req(); i++ ) ... X[i] ... // NOTE: Required edges can contain embedded NULL pointers.
//----------------- Other Node Properties
// Generate class IDs for (some) ideal nodes so that it is possible to determine // the type of a node using a non-virtual method call (the method is_<Node>() below). // // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines // the type of the node the ID represents; another subset of an ID's bits are reserved // for the superclasses of the node represented by the ID. // // By design, if A is a supertype of B, A.is_B() returns true and B.is_A() // returns false. A.is_A() returns true. // // If two classes, A and B, have the same superclass, a different bit of A's class id // is reserved for A's type than for B's type. That bit is specified by the third // parameter in the macro DEFINE_CLASS_ID. // // By convention, classes with deeper hierarchy are declared first. Moreover, // classes with the same hierarchy depth are sorted by usage frequency. // // The query method masks the bits to cut off bits of subclasses and then compares // the result with the class id (see the macro DEFINE_CLASS_QUERY below). // // Class_MachCall=30, ClassMask_MachCall=31 // 12 8 4 0 // 0 0 0 0 0 0 0 0 1 1 1 1 0 // | | | | // | | | Bit_Mach=2 // | | Bit_MachReturn=4 // | Bit_MachSafePoint=8 // Bit_MachCall=16 // // Class_CountedLoop=56, ClassMask_CountedLoop=63 // 12 8 4 0 // 0 0 0 0 0 0 0 1 1 1 0 0 0 // | | | // | | Bit_Region=8 // | Bit_Loop=16 // Bit_CountedLoop=32
// This enum is used only for C2 ideal and mach nodes with is_<node>() methods // so that its values fit into 32 bits. enum NodeClasses {
Bit_Node = 0x00000000,
Class_Node = 0x00000000,
ClassMask_Node = 0xFFFFFFFF,
bool is_Con () const { return (_flags & Flag_is_Con) != 0; } // The data node which is safe to leave in dead loop during IGVN optimization. bool is_dead_loop_safe() const;
// is_Copy() returns copied edge index (0 or 1)
uint is_Copy() const { return (_flags & Flag_is_Copy); }
virtualbool is_CFG() const { returnfalse; }
// If this node is control-dependent on a test, can it be // rerouted to a dominating equivalent test? This is usually // true of non-CFG nodes, but can be false for operations which // depend for their correct sequencing on more than one test. // (In that case, hoisting to a dominating test may silently // skip some other important test.) virtualbool depends_only_on_test() const { assert(!is_CFG(), ""); returntrue; };
// When building basic blocks, I need to have a notion of block beginning // Nodes, next block selector Nodes (block enders), and next block // projections. These calls need to work on their machine equivalents. The // Ideal beginning Nodes are RootNode, RegionNode and StartNode. bool is_block_start() const { if ( is_Region() ) returnthis == (const Node*)in(0); else return is_Start();
}
// The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, // Goto and Return. This call also returns the block ending Node. virtualconst Node *is_block_proj() const;
// The node is a "macro" node which needs to be expanded before matching bool is_macro() const { return (_flags & Flag_is_macro) != 0; } // The node is expensive: the best control is set during loop opts bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; }
// An arithmetic node which accumulates a data in a loop. // It must have the loop's phi as input and provide a def to the phi. bool is_reduction() const { return (_flags & Flag_is_reduction) != 0; }
// Get the worst-case Type output for this Node. virtualconstclass Type *bottom_type() const;
// If we find a better type for a node, try to record it permanently. // Return true if this node actually changed. // Be sure to do the hash_delete game in the "rehash" variant. void raise_bottom_type(const Type* new_type);
// Get the address type with which this node uses and/or defs memory, // or NULL if none. The address type is conservatively wide. // Returns non-null for calls, membars, loads, stores, etc. // Returns TypePtr::BOTTOM if the node touches memory "broadly". virtualconstclass TypePtr *adr_type() const { return NULL; }
// Return an existing node which computes the same function as this node. // The optimistic combined algorithm requires this to return a Node which // is a small number of steps away (e.g., one of my inputs). virtual Node* Identity(PhaseGVN* phase);
// Return the set of values this Node can take on at runtime. virtualconst Type* Value(PhaseGVN* phase) const;
// Return a node which is more "ideal" than the current node. // The invariants on this call are subtle. If in doubt, read the // treatise in node.cpp above the default implementation AND TEST WITH // +VerifyIterativeGVN! virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
// Some nodes have specific Ideal subgraph transformations only if they are // unique users of specific nodes. Such nodes should be put on IGVN worklist // for the transformations to happen. bool has_special_unique_user() const;
// Skip Proj and CatchProj nodes chains. Check for Null and Top.
Node* find_exact_control(Node* ctrl);
// Check if 'this' node dominates or equal to 'sub'. bool dominates(Node* sub, Node_List &nlist);
// See if there is valid pipeline info staticconst Pipeline *pipeline_class(); virtualconst Pipeline *pipeline() const;
// Compute the latency from the def to this instruction of the ith input node
uint latency(uint i);
// Hash & compare functions, for pessimistic value numbering
// If the hash function returns the special sentinel value NO_HASH, // the node is guaranteed never to compare equal to any other node. // If we accidentally generate a hash with value NO_HASH the node // won't go into the table and we'll lose a little optimization. staticconst uint NO_HASH = 0; virtual uint hash() const; virtualbool cmp( const Node &n ) const;
// Operation appears to be iteratively computed (such as an induction variable) // It is possible for this operation to return false for a loop-varying // value, if it appears (by local graph inspection) to be computed by a simple conditional. bool is_iteratively_computed();
// Determine if a node is a counted loop induction variable. // NOTE: The method is defined in "loopnode.cpp". bool is_cloop_ind_var() const;
// Return a node with opcode "opc" and same inputs as "this" if one can // be found; Otherwise return NULL;
Node* find_similar(int opc);
// Return the unique control out if only one. Null if none or more than one.
Node* unique_ctrl_out_or_null() const; // Return the unique control out. Asserts if none or more than one control out.
Node* unique_ctrl_out() const;
// Set control or add control as precedence edge void ensure_control_or_add_prec(Node* c);
//----------------- Code Generation
// Ideal register class for Matching. Zero means unmatched instruction // (these are cloned instead of converted to machine nodes). virtual uint ideal_reg() const;
staticconst uint NotAMachineReg; // must be > max. machine register
// Do we Match on this edge index or not? Generally false for Control // and true for everything else. Weird for calls & returns. virtual uint match_edge(uint idx) const;
// Register class output is returned in virtualconst RegMask &out_RegMask() const; // Register class input is expected in virtualconst RegMask &in_RegMask(uint) const; // Should we clone rather than spill this instruction? bool rematerialize() const;
// Return JVM State Object if this Node carries debug info, or NULL otherwise virtual JVMState* jvms() const;
// Print as assembly virtualvoid format( PhaseRegAlloc *, outputStream* st = tty ) const; // Emit bytes starting at parameter 'ptr' // Bump 'ptr' by the number of output bytes virtualvoid emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; // Size of instruction in bytes virtual uint size(PhaseRegAlloc *ra_) const;
// Convenience function to extract an integer constant from a node. // If it is not an integer constant (either Con, CastII, or Mach), // return value_if_unknown.
jint find_int_con(jint value_if_unknown) const { const TypeInt* t = find_int_type(); return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
} // Return the constant, knowing it is an integer constant already
jint get_int() const { const TypeInt* t = find_int_type();
guarantee(t != NULL, "must be con"); return t->get_con();
} // Here's where the work is done. Can produce non-constant int types too. const TypeInt* find_int_type() const; const TypeInteger* find_integer_type(BasicType bt) const;
// Same thing for long (and intptr_t, via type.hpp):
jlong get_long() const { const TypeLong* t = find_long_type();
guarantee(t != NULL, "must be con"); return t->get_con();
}
jlong find_long_con(jint value_if_unknown) const { const TypeLong* t = find_long_type(); return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
} const TypeLong* find_long_type() const;
// These guys are called by code generated by ADLC:
intptr_t get_ptr() const;
intptr_t get_narrowcon() const;
jdouble getd() const;
jfloat getf() const;
// Nodes which are pinned into basic blocks virtualbool pinned() const { returnfalse; }
// Nodes which use memory without consuming it, hence need antidependences // More specifically, needs_anti_dependence_check returns true iff the node // (a) does a load, and (b) does not perform a store (except perhaps to a // stack slot or some other unaliased location). bool needs_anti_dependence_check() const;
// Return which operand this instruction may cisc-spill. In other words, // return operand position that can convert from reg to memory access virtualint cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
// Whether this is a memory-writing machine node. bool is_memory_writer() const { return is_Mach() && bottom_type()->has_memory(); }
//----------------- Printing, etc #ifndef PRODUCT public:
Node* find(int idx, bool only_ctrl = false); // Search the graph for the given idx.
Node* find_ctrl(int idx); // Search control ancestors for the given idx. void dump_bfs(constint max_distance, Node* target, constchar* options) const; // Print BFS traversal void dump_bfs(constint max_distance) const; // dump_bfs(max_distance, nullptr, nullptr) class DumpConfig { public: // overridden to implement coloring of node idx virtualvoid pre_dump(outputStream *st, const Node* n) = 0; virtualvoid post_dump(outputStream *st) = 0;
}; void dump_idx(bool align = false, outputStream* st = tty, DumpConfig* dc = nullptr) const; void dump_name(outputStream* st = tty, DumpConfig* dc = nullptr) const; void dump() const; // print node with newline void dump(constchar* suffix, bool mark = false, outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print this node. void dump(int depth) const; // Print this node, recursively to depth d void dump_ctrl(int depth) const; // Print control nodes, to depth d void dump_comp() const; // Print this node in compact representation. // Print this node in compact representation. void dump_comp(constchar* suffix, outputStream *st = tty) const; private: virtualvoid dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print required-edge info virtualvoid dump_prec(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print precedence-edge info virtualvoid dump_out(outputStream* st = tty, DumpConfig* dc = nullptr) const; // Print the output edge info public: virtualvoid dump_spec(outputStream *st) const {}; // Print per-node info // Print compact per-node info virtualvoid dump_compact_spec(outputStream *st) const { dump_spec(st); }
// This call defines a class-unique string used to identify class instances virtualconstchar *Name() const;
void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) staticbool in_dump() { return Compile::current()->_in_dump_cnt > 0; } // check if we are in a dump call #endif #ifdef ASSERT void verify_construction(); bool verify_jvms(const JVMState* jvms) const;
Node* _debug_orig; // Original version of this, if any.
Node* debug_orig() const { return _debug_orig; } void set_debug_orig(Node* orig); // _debug_orig = orig void dump_orig(outputStream *st, bool print_key = true) const;
int _debug_idx; // Unique value assigned to every node. int debug_idx() const { return _debug_idx; } void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
int _hash_lock; // Barrier to modifications of nodes in the hash table void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
staticvoid init_NodeProperty();
#if OPTO_DU_ITERATOR_ASSERT const Node* _last_del; // The last deleted node.
uint _del_tick; // Bumped when a deletion happens.. #endif #endif
};
inlinebool not_a_node(const Node* n) { if (n == NULL) returntrue; if (((intptr_t)n & 1) != 0) returntrue; // uninitialized, etc. if (*(address*)n == badAddress) returntrue; // kill by Node::destruct returnfalse;
}
//----------------------------------------------------------------------------- // Iterators over DU info, and associated Node functions.
#if OPTO_DU_ITERATOR_ASSERT
// Common code for assertion checking on DU iterators. class DUIterator_Common { #ifdef ASSERT protected: bool _vdui; // cached value of VerifyDUIterators const Node* _node; // the node containing the _out array
uint _outcnt; // cached node->_outcnt
uint _del_tick; // cached node->_del_tick
Node* _last; // last value produced by the iterator
void sample(const Node* node); // used by c'tor to set up for verifies void verify(const Node* node, bool at_end_ok = false); void verify_resync(); void reset(const DUIterator_Common& that);
// The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } #else #define I_VDUI_ONLY(i,x) { } #endif//ASSERT
};
#define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
// Default DU iterator. Allows appends onto the out array. // Allows deletion from the out array only at the current point. // Usage: // for (DUIterator i = x->outs(); x->has_out(i); i++) { // Node* y = x->out(i); // ... // } // Compiles in product mode to a unsigned integer index, which indexes // onto a repeatedly reloaded base pointer of x->_out. The loop predicate // also reloads x->_outcnt. If you delete, you must perform "--i" just // before continuing the loop. You must delete only the last-produced // edge. You must delete only a single copy of the last-produced edge, // or else you must delete all copies at once (the first time the edge // is produced by the iterator). class DUIterator : public DUIterator_Common { friendclass Node;
// This is the index which provides the product-mode behavior. // Whatever the product-mode version of the system does to the // DUI index is done to this index. All other fields in // this class are used only for assertion checking.
uint _idx;
#ifdef ASSERT
uint _refresh_tick; // Records the refresh activity.
void sample(const Node* node); // Initialize _refresh_tick etc. void verify(const Node* node, bool at_end_ok = false); void verify_increment(); // Verify an increment operation. void verify_resync(); // Verify that we can back up over a deletion. void verify_finish(); // Verify that the loop terminated properly. void refresh(); // Resample verification info. void reset(const DUIterator& that); // Resample after assignment. #endif
// Faster DU iterator. Disallows insertions into the out array. // Allows deletion from the out array only at the current point. // Usage: // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { // Node* y = x->fast_out(i); // ... // } // Compiles in product mode to raw Node** pointer arithmetic, with // no reloading of pointers from the original node x. If you delete, // you must perform "--i; --imax" just before continuing the loop. // If you delete multiple copies of the same edge, you must decrement // imax, but not i, multiple times: "--i, imax -= num_edges". class DUIterator_Fast : public DUIterator_Common { friendclass Node; friendclass DUIterator_Last;
// This is the pointer which provides the product-mode behavior. // Whatever the product-mode version of the system does to the // DUI pointer is done to this pointer. All other fields in // this class are used only for assertion checking.
Node** _outp;
// Note: offset must be signed, since -1 is sometimes passed
DUIterator_Fast(const Node* node, ptrdiff_t offset)
{ _outp = node->_out + offset; debug_only(sample(node)); }
public: // initialize to garbage; clear _vdui to disable asserts
DUIterator_Fast()
{ /*initialize to garbage*/ debug_only(_vdui = false); }
DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { // Assign a limit pointer to the reference argument:
imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); // Return the base pointer: return DUIterator_Fast(this, 0);
}
Node* Node::fast_out(DUIterator_Fast& i) const {
I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) *i._outp;
}
// Faster DU iterator. Requires each successive edge to be removed. // Does not allow insertion of any edges. // Usage: // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { // Node* y = x->last_out(i); // ... // } // Compiles in product mode to raw Node** pointer arithmetic, with // no reloading of pointers from the original node x. class DUIterator_Last : private DUIterator_Fast { friendclass Node;
DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { // Assign a limit pointer to the reference argument:
imin = DUIterator_Last(this, 0); // Return the initial pointer: return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
}
Node* Node::last_out(DUIterator_Last& i) const {
I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) *i._outp;
}
#endif//OPTO_DU_ITERATOR_ASSERT
#undef I_VDUI_ONLY #undef VDUI_ONLY
// An Iterator that truly follows the iterator pattern. Doesn't // support deletion but could be made to. // // for (SimpleDUIterator i(n); i.has_next(); i.next()) { // Node* m = i.get(); // class SimpleDUIterator : public StackObj { private:
Node* node;
DUIterator_Fast i;
DUIterator_Fast imax; public:
SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} bool has_next() { return i < imax; } void next() { i++; }
Node* get() { return node->fast_out(i); }
};
//----------------------------------------------------------------------------- // Map dense integer indices to Nodes. Uses classic doubling-array trick. // Abstractly provides an infinite array of Node*'s, initialized to NULL. // Note that the constructor just zeros things, and since I use Arena // allocation I do not need a destructor to reclaim storage. class Node_Array : public AnyObj { friendclass VMStructs; protected:
Arena* _a; // Arena to allocate in
uint _max;
Node** _nodes; void grow( uint i ); // Grow array node to fit public:
Node_Array(Arena* a, uint max = OptoNodeListSize) : _a(a), _max(max) {
_nodes = NEW_ARENA_ARRAY(a, Node*, max);
clear();
}
Node_Array(Node_Array* na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
Node *operator[] ( uint i ) const// Lookup, or NULL for not mapped
{ return (i<_max) ? _nodes[i] : (Node*)NULL; }
Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; }
Node** adr() { return _nodes; } // Extend the mapping: index i maps to Node *n. void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } void insert( uint i, Node *n ); void remove( uint i ); // Remove, preserving order // Clear all entries in _nodes to NULL but keep storage void clear() {
Copy::zero_to_bytes(_nodes, _max * sizeof(Node*));
}
//------------------------------Unique_Node_List------------------------------- class Unique_Node_List : public Node_List { friendclass VMStructs;
VectorSet _in_worklist;
uint _clock_index; // Index in list where to pop from next public:
Unique_Node_List() : Node_List(), _clock_index(0) {}
Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
// Unique_Mixed_Node_List // unique: nodes are added only once // mixed: allow new and old nodes class Unique_Mixed_Node_List : public ResourceObj { public:
Unique_Mixed_Node_List() : _visited_set(cmpkey, hashkey) {}
void add(Node* node) { if (not_a_node(node)) { return; // Gracefully handle NULL, -1, 0xabababab, etc.
} if (_visited_set[node] == nullptr) {
_visited_set.Insert(node, node);
_worklist.push(node);
}
}
// Node_Stack is used to map nodes.
Node* find(uint idx) const;
};
//-----------------------------Node_Notes-------------------------------------- // Debugging or profiling annotations loosely and sparsely associated // with some nodes. See Compile::node_notes_at for the accessor. class Node_Notes { friendclass VMStructs;
JVMState* _jvms;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.