/* * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Node objects form a directed graph of LIR_Opr // Edges between Nodes represent moves from one Node to its destinations class ResolveNode: public CompilationResourceObj { private:
LIR_Opr _operand; // the source or destinaton
NodeList _destinations; // for the operand bool _assigned; // Value assigned to this Node? bool _visited; // Node already visited? bool _start_node; // Start node already visited?
// This is shared state to be used by the PhiResolver so the operand // arrays don't have to be reallocated for each resolution. class PhiResolverState: public CompilationResourceObj { friendclass PhiResolver;
private:
NodeList _virtual_operands; // Nodes where the operand is a virtual register
NodeList _other_operands; // Nodes where the operand is not a virtual register
NodeList _vreg_table; // Mapping from virtual register to Node
public:
PhiResolverState() {}
void reset();
};
// class used to move value of phi operand to phi function class PhiResolver: public CompilationResourceObj { private:
LIRGenerator* _gen;
PhiResolverState& _state; // temporary state cached by LIRGenerator
// only the classes below belong in the same file class LIRGenerator: public InstructionVisitor, public BlockClosure { // LIRGenerator should never get instatiated on the heap. private: void* operatornew(size_t size) throw(); void* operatornew[](size_t size) throw(); voidoperatordelete(void* p) { ShouldNotReachHere(); } voidoperatordelete[](void* p) { ShouldNotReachHere(); }
Compilation* _compilation;
ciMethod* _method; // method that we are compiling
PhiResolverState _resolver_state;
BlockBegin* _block; int _virtual_register_number;
Values _instruction_for_operand;
BitMap2D _vreg_flags; // flags which can be set on a per-vreg basis
LIR_List* _lir;
private: // a simple cache of constants used within a block
GrowableArray<LIR_Const*> _constants;
LIR_OprList _reg_for_constants;
Values _unpinned_constants;
// get a constant into a register and get track of what register was used
LIR_Opr load_constant(Constant* x);
LIR_Opr load_constant(LIR_Const* constant);
// Given an immediate value, return an operand usable in logical ops.
LIR_Opr load_immediate(jlong x, BasicType type);
void set_result(Value x, LIR_Opr opr) {
assert(opr->is_valid(), "must set to valid value");
assert(x->operand()->is_illegal(), "operand should never change");
assert(!opr->is_register() || opr->is_virtual(), "should never set result to a physical register");
x->set_operand(opr);
assert(opr == x->operand(), "must be"); if (opr->is_virtual()) {
_instruction_for_operand.at_put_grow(opr->vreg_number(), x, NULL);
}
} void set_no_result(Value x) { assert(!x->has_uses(), "can't have use"); x->clear_operand(); }
// These need to guarantee JMM volatile semantics are preserved on each platform // and requires one implementation per architecture.
LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
// volatile field operations are never patchable because a klass // must be loaded to know it's volatile which means that the offset // it always known as well. void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info); void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
// this loads the length and compares against the index void array_range_check (LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info);
// returns a LIR_Address to address an array location. May also // emit some code as part of address calculation. If // needs_card_mark is true then compute the full address for use by // both the store and the card mark.
LIR_Address* generate_address(LIR_Opr base,
LIR_Opr index, int shift, int disp,
BasicType type);
LIR_Address* generate_address(LIR_Opr base, int disp, BasicType type) { return generate_address(base, LIR_OprFact::illegalOpr, 0, disp, type);
}
LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type);
// the helper for generate_address void add_large_constant(LIR_Opr src, int c, LIR_Opr dest);
// machine preferences and characteristics bool can_inline_as_constant(Value i S390_ONLY(COMMA int bits = 20)) const; bool can_inline_as_constant(LIR_Const* c) const; bool can_store_as_constant(Value i, BasicType type) const;
LIR_Opr safepoint_poll_register();
void profile_branch(If* if_instr, If::Condition cond); void increment_event_counter_impl(CodeEmitInfo* info,
ciMethod *method, LIR_Opr step, int frequency, int bci, bool backedge, bool notify); void increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge); void increment_invocation_counter(CodeEmitInfo *info) { if (compilation()->is_profiling()) {
increment_event_counter(info, LIR_OprFact::intConst(InvocationCounter::count_increment), InvocationEntryBci, false);
}
} void increment_backedge_counter(CodeEmitInfo* info, int bci) { if (compilation()->is_profiling()) {
increment_event_counter(info, LIR_OprFact::intConst(InvocationCounter::count_increment), bci, true);
}
} void increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci); void increment_backedge_counter(CodeEmitInfo* info, LIR_Opr step, int bci) { if (compilation()->is_profiling()) {
increment_event_counter(info, step, bci, true);
}
}
CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false);
CodeEmitInfo* state_for(Instruction* x);
// allocates a virtual register for this instruction if // one isn't already allocated. Only for Phi and Local.
LIR_Opr operand_for_instruction(Instruction *x);
int max_virtual_register_number() const { return _virtual_register_number; }
void block_do(BlockBegin* block);
// Flags that can be set on vregs enum VregFlag {
must_start_in_memory = 0 // needs to be assigned a memory location at beginning, but may then be loaded in a register
, callee_saved = 1 // must be in a callee saved register
, byte_reg = 2 // must be in a byte register
, num_vreg_flags
// for virtual registers, maps them back to Phi's or Local's
Instruction* instruction_for_opr(LIR_Opr opr);
Instruction* instruction_for_vreg(int reg_num);
void load_item(); void load_byte_item(); void load_nonconstant(S390_ONLY(int bits = 20)); // load any values which can't be expressed as part of a single store instruction void load_for_store(BasicType store_type); void load_item_force(LIR_Opr reg);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.