/* * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Predefined classes class ciField; class ValueStack; class InstructionPrinter; class IRScope;
// Instruction class hierarchy // // All leaf classes in the class hierarchy are concrete classes // (i.e., are instantiated). All other classes are abstract and // serve factoring.
class Instruction; class Phi; class Local; class Constant; class AccessField; class LoadField; class StoreField; class AccessArray; class ArrayLength; class AccessIndexed; class LoadIndexed; class StoreIndexed; class NegateOp; class Op2; class ArithmeticOp; class ShiftOp; class LogicOp; class CompareOp; class IfOp; class Convert; class NullCheck; class TypeCast; class OsrEntry; class ExceptionObject; class StateSplit; class Invoke; class NewInstance; class NewArray; class NewTypeArray; class NewObjectArray; class NewMultiArray; class TypeCheck; class CheckCast; class InstanceOf; class AccessMonitor; class MonitorEnter; class MonitorExit; class Intrinsic; class BlockBegin; class BlockEnd; classGoto; classIf; classSwitch; class TableSwitch; class LookupSwitch; classReturn; classThrow; class Base; class RoundFP; class UnsafeOp; class UnsafeGet; class UnsafePut; class UnsafeGetAndSet; class ProfileCall; class ProfileReturnType; class ProfileInvoke; class RuntimeCall; class MemBar; class RangeCheckPredicate; #ifdef ASSERT class Assert; #endif
// A Value is a reference to the instruction creating the value typedef Instruction* Value; typedef GrowableArray<Value> Values; typedef GrowableArray<ValueStack*> ValueStackStack;
// BlockClosure is the base class for block traversal/iteration.
class BlockClosure: public CompilationResourceObj { public: virtualvoid block_do(BlockBegin* block) = 0;
};
// A simple closure class for visiting the values of an Instruction class ValueVisitor: public StackObj { public: virtualvoid visit(Value* v) = 0;
};
// Some array and list classes typedef GrowableArray<BlockBegin*> BlockBeginArray;
// InstructionVisitors provide type-based dispatch for instructions. // For each concrete Instruction class X, a virtual function do_X is // provided. Functionality that needs to be implemented for all classes // (e.g., printing, code generation) is factored out into a specialised // visitor instead of added to the Instruction classes itself.
// The following macros are used to implement instruction-specific hashing. // By default, each instruction implements hash() and is_equal(Value), used // for value numbering/common subexpression elimination. The default imple- // mentation disables value numbering. Each instruction which can be value- // numbered, should define corresponding hash() and is_equal(Value) functions // via the macros below. The f arguments specify all the values/op codes, etc. // that need to be identical for two instructions to be identical. // // Note: The default implementation of hash() returns 0 in order to indicate // that the instruction should not be considered for value numbering. // The currently used hash functions do not guarantee that never a 0 // is produced. While this is still correct, it may be a performance // bug (no value numbering for that node). However, this situation is // so unlikely, that we are not going to handle it specially.
class Instruction: public CompilationResourceObj { private: int _id; // the unique instruction id #ifndef PRODUCT int _printable_bci; // the bci of the instruction for printing #endif int _use_count; // the number of instructions referring to this value (w/o prev/next); only roots can have use count = 0 or > 1 int _pin_state; // set of PinReason describing the reason for pinning
ValueType* _type; // the instruction value type
Instruction* _next; // the next instruction if any (NULL for BlockEnd instructions)
Instruction* _subst; // the substitution instruction if any
LIR_Opr _operand; // LIR specific information unsignedint _flags; // Flag bits
ValueStack* _state_before; // Copy of state with input operands still on stack (or NULL)
ValueStack* _exception_state; // Copy of state for exception handling
XHandlers* _exception_handlers; // Flat list of exception handlers covering this instruction
friendclass UseCountComputer;
void update_exception_state(ValueStack* state);
protected:
BlockBegin* _block; // Block that contains this instruction
// Helper class to keep track of which arguments need a null check class ArgsNonNullState { private: int _nonnull_state; // mask identifying which args are nonnull public:
ArgsNonNullState()
: _nonnull_state(AllBits) {}
// Does argument number i needs a null check? bool arg_needs_null_check(int i) const { // No data is kept for arguments starting at position 33 so // conservatively assume that they need a null check. if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { return is_set_nth_bit(_nonnull_state, i);
} returntrue;
}
// Set whether argument number i needs a null check or not void set_arg_needs_null_check(int i, bool check) { if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { if (check) {
_nonnull_state |= nth_bit(i);
} else {
_nonnull_state &= ~(nth_bit(i));
}
}
}
};
public: void* operatornew(size_t size) throw() {
Compilation* c = Compilation::current(); void* res = c->arena()->Amalloc(size); return res;
}
Instruction* set_next(Instruction* next) {
assert(next->has_printable_bci(), "_printable_bci should have been set");
assert(next != NULL, "must not be NULL");
assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next");
assert(next->can_be_linked(), "shouldn't link these instructions into list");
// when blocks are merged void fixup_block_pointers() {
Instruction *cur = next()->next(); // next()'s block is set in set_next while (cur && cur->_block != block()) {
cur->_block = block();
cur = cur->next();
}
}
// The following macros are used to define base (i.e., non-leaf) // and leaf instruction classes. They define class-name related // generic functionality in one place.
#define BASE(class_name, super_class_name) \ class class_name: public super_class_name { \ public: \ virtual class_name* as_##class_name() { returnthis; } \
// A Phi is a phi function in the sense of SSA form. It stands for // the value of a local variable at the beginning of a join block. // A Phi consists of n operands, one for every incoming branch.
LEAF(Phi, Instruction) private: int _pf_flags; // the flags of the phi function int _index; // to value on operand stack (index < 0) or to local public: // creation
Phi(ValueType* type, BlockBegin* b, int index)
: Instruction(type->base())
, _pf_flags(0)
, _index(index)
{
_block = b;
NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci())); if (type->is_illegal()) {
make_illegal();
}
}
// Invalidates phis corresponding to merges of locals of two different types // (these should never be referenced, otherwise the bytecodes are illegal) void make_illegal() {
set(cannot_simplify);
set_type(illegalType);
}
// A local is a placeholder for an incoming argument to a function call.
LEAF(Local, Instruction) private: int _java_index; // the local index within the method to which the local belongs bool _is_receiver; // if local variable holds the receiver: "this" for non-static methods
ciType* _declared_type; public: // creation
Local(ciType* declared, ValueType* type, int index, bool receiver)
: Instruction(type)
, _java_index(index)
, _is_receiver(receiver)
, _declared_type(declared)
{
NOT_PRODUCT(set_printable_bci(-1));
}
LEAF(Constant, Instruction) public: // creation
Constant(ValueType* type):
Instruction(type, NULL, /*type_is_constant*/ true)
{
assert(type->is_constant(), "must be a constant");
}
Constant(ValueType* type, ValueStack* state_before, bool kills_memory = false):
Instruction(type, state_before, /*type_is_constant*/ true)
{
assert(state_before != NULL, "only used for constants which need patching");
assert(type->is_constant(), "must be a constant");
set_flag(KillsMemoryFlag, kills_memory);
pin(); // since it's patching it needs to be pinned
}
// Unresolved getstatic and putstatic can cause initialization. // Technically it occurs at the Constant that materializes the base // of the static fields but it's simpler to model it here. bool is_init_point() const { return is_static() && (needs_patching() || !_field->holder()->is_initialized()); }
// manipulation
// Under certain circumstances, if a previous NullCheck instruction // proved the target object non-null, we can eliminate the explicit // null check and do an implicit one, simply specifying the debug // information from the NullCheck. This field should only be consulted // if needs_null_check() is true. void set_explicit_null_check(NullCheck* check) { _explicit_null_check = check; }
// generic; cannot be eliminated if needs patching or if volatile.
HASHING3(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset(), declared_type())
};
LEAF(StoreField, AccessField) private:
Value _value;
LEAF(ArithmeticOp, Op2) public: // creation
ArithmeticOp(Bytecodes::Code op, Value x, Value y, ValueStack* state_before)
: Op2(x->type()->meet(y->type()), op, x, y, state_before)
{ if (can_trap()) pin();
}
LEAF(CompareOp, Op2) public: // creation
CompareOp(Bytecodes::Code op, Value x, Value y, ValueStack* state_before)
: Op2(intType, op, x, y, state_before)
{}
// This node is supposed to cast the type of another node to a more precise // declared type.
LEAF(TypeCast, Instruction) private:
ciType* _declared_type;
Value _obj;
public: // The type of this node is the same type as the object type (and it might be constant).
TypeCast(ciType* type, Value obj, ValueStack* state_before)
: Instruction(obj->type(), state_before, obj->type()->is_constant()),
_declared_type(type),
_obj(obj) {}
BASE(NewArray, StateSplit) private:
Value _length;
public: // creation
NewArray(Value length, ValueStack* state_before)
: StateSplit(objectType, state_before)
, _length(length)
{ // Do not ASSERT_VALUES since length is NULL for NewMultiArray
}
// accessors
Value length() const { return _length; }
// generic virtualvoid input_values_do(ValueVisitor* f) { // NOTE: we do not call NewArray::input_values_do since "length" // is meaningless for a multi-dimensional array; passing the // zeroth element down to NewArray as its length is a bad idea // since there will be a copy in the "dims" array which doesn't // get updated, and the value must not be traversed twice. Was bug // - kbr 4/10/2001
StateSplit::input_values_do(f); for (int i = 0; i < _dims->length(); i++) f->visit(_dims->adr_at(i));
}
};
BASE(TypeCheck, StateSplit) private:
ciKlass* _klass;
Value _obj;
public: // preserves_state can be set to true for Intrinsics // which are guaranteed to preserve register state across any slow // cases; setting it to true does not mean that the Intrinsic can // not trap, only that if we continue execution in the same basic // block after the Intrinsic, all of the registers are intact. This // allows load elimination and common expression elimination to be // performed across the Intrinsic. The default value is false.
Intrinsic(ValueType* type,
vmIntrinsics::ID id,
Values* args, bool has_receiver,
ValueStack* state_before, bool preserves_state, bool cantrap = true)
: StateSplit(type, state_before)
, _id(id)
, _args(args)
, _recv(NULL)
{
assert(args != NULL, "args must exist");
ASSERT_VALUES
set_flag(PreservesStateFlag, preserves_state);
set_flag(CanTrapFlag, cantrap); if (has_receiver) {
_recv = argument_at(0);
}
set_needs_null_check(has_receiver);
// some intrinsics can't trap, so don't force them to be pinned if (!can_trap() && !vmIntrinsics::should_be_pinned(_id)) {
unpin(PinStateSplitConstructor);
}
}
void set_arg_needs_null_check(int i, bool check) {
_nonnull_state.set_arg_needs_null_check(i, check);
}
// generic virtualbool can_trap() const { return check_flag(CanTrapFlag); } virtualvoid input_values_do(ValueVisitor* f) {
StateSplit::input_values_do(f); for (int i = 0; i < _args->length(); i++) f->visit(_args->adr_at(i));
}
};
class LIR_List;
LEAF(BlockBegin, StateSplit) private: int _block_id; // the unique block id int _bci; // start-bci of block int _depth_first_number; // number of this block in a depth-first ordering int _linear_scan_number; // number of this block in linear-scan ordering int _dominator_depth; int _loop_depth; // the loop nesting level of this block int _loop_index; // number of the innermost loop of this block int _flags; // the flags associated with this block
// fields used by BlockListBuilder int _total_preds; // number of predecessors found by BlockListBuilder
ResourceBitMap _stores_to_locals; // bit is set when a local variable is stored in the block
// SSA specific fields: (factor out later)
BlockList _predecessors; // the predecessors of this block
BlockList _dominates; // list of blocks that are dominated by this block
BlockBegin* _dominator; // the dominator of this block // SSA specific ends
BlockEnd* _end; // the last instruction of this block
BlockList _exception_handlers; // the exception handlers potentially invoked by this block
ValueStackStack* _exception_states; // only for xhandler entries: states of all instructions that have an edge to this xhandler int _exception_handler_pco; // if this block is the start of an exception handler, // this records the PC offset in the assembly code of the // first instruction in this block
Label _label; // the label associated with this block
LIR_List* _lir; // the low level intermediate representation for this block
ResourceBitMap _live_in; // set of live LIR_Opr registers at entry to this block
ResourceBitMap _live_out; // set of live LIR_Opr registers at exit from this block
ResourceBitMap _live_gen; // set of registers used before any redefinition in this block
ResourceBitMap _live_kill; // set of registers defined in this block
ResourceBitMap _fpu_register_usage;
intArray* _fpu_stack_state; // For x86 FPU code generation with UseLinearScan int _first_lir_instruction_id; // ID of first LIR instruction in this block int _last_lir_instruction_id; // ID of last LIR instruction in this block
// exception handlers potentially invoked by this block void add_exception_handler(BlockBegin* b); bool is_exception_handler(BlockBegin* b) const { return _exception_handlers.contains(b); } int number_of_exception_handlers() const { return _exception_handlers.length(); }
BlockBegin* exception_handler_at(int i) const { return _exception_handlers.at(i); }
// states of the instructions that have an edge to this exception handler int number_of_exception_states() { assert(is_set(exception_entry_flag), "only for xhandlers"); return _exception_states == NULL ? 0 : _exception_states->length(); }
ValueStack* exception_state_at(int idx) const { assert(is_set(exception_entry_flag), "only for xhandlers"); return _exception_states->at(idx); } int add_exception_state(ValueStack* state);
// flags enum Flag {
no_flag = 0,
std_entry_flag = 1 << 0,
osr_entry_flag = 1 << 1,
exception_entry_flag = 1 << 2,
subroutine_entry_flag = 1 << 3,
backward_branch_target_flag = 1 << 4,
is_on_work_list_flag = 1 << 5,
was_visited_flag = 1 << 6,
parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand
critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split
linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan
linear_scan_loop_end_flag = 1 << 10, // set during loop-detection for LinearScan
donot_eliminate_range_checks = 1 << 11 // Should be try to eliminate range checks in this block
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.