/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_CFGPrinter.hpp"
#include "c1/c1_Canonicalizer.hpp"
#include "c1/c1_Compilation.hpp"
#include "c1/c1_GraphBuilder.hpp"
#include "c1/c1_InstructionPrinter.hpp"
#include "ci/ciCallSite.hpp"
#include "ci/ciField.hpp"
#include "ci/ciKlass.hpp"
#include "ci/ciMemberName.hpp"
#include "ci/ciSymbols.hpp"
#include "ci/ciUtilities.inline.hpp"
#include "classfile/javaClasses.hpp"
#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compilerEvent.hpp"
#include "interpreter/bytecode.hpp"
#include "jfr/jfrEvents.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/powerOfTwo.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_JFR
#include "jfr/jfr.hpp"
#endif
class BlockListBuilder {
private:
Compilation* _compilation;
IRScope* _scope;
BlockList _blocks; // internal list of all blocks
BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder
GrowableArray<BlockList> _bci2block_successors; // Mapping bcis to their blocks successors while we dont have a blockend
// fields used by mark_loops
ResourceBitMap _active; // for iteration of control flow graph
ResourceBitMap _visited; // for iteration of control flow graph
GrowableArray<ResourceBitMap> _loop_map; // caches the information if a block is contained in a loop
int _next_loop_index; // next free loop number
int _next_block_number; // for reverse postorder numbering of blocks
int _block_id_start;
int bit_number(int block_id) const { return block_id - _block_id_start; }
// accessors
Compilation* compilation() const { return _compilation; }
IRScope* scope() const { return _scope; }
ciMethod* method() const { return scope()->method(); }
XHandlers* xhandlers() const { return scope()->xhandlers(); }
// unified bailout support
void bailout(const char* msg) const { compilation()->bailout(msg); }
bool bailed_out() const { return compilation()->bailed_out(); }
// helper functions
BlockBegin* make_block_at(int bci, BlockBegin* predecessor);
void handle_exceptions(BlockBegin* current, int cur_bci);
void handle_jsr(BlockBegin* current, int sr_bci, int next_bci);
void store_one(BlockBegin* current, int local);
void store_two(BlockBegin* current, int local);
void set_entries(int osr_bci);
void set_leaders();
void make_loop_header(BlockBegin* block);
void mark_loops();
BitMap& mark_loops(BlockBegin* b, bool in_subroutine);
// debugging
#ifndef PRODUCT
void print();
#endif
int number_of_successors(BlockBegin* block);
BlockBegin* successor_at(BlockBegin* block, int i);
void add_successor(BlockBegin* block, BlockBegin* sux);
bool is_successor(BlockBegin* block, BlockBegin* sux);
public:
// creation
BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci);
// accessors for GraphBuilder
BlockList* bci2block() const { return _bci2block; }
};
// Implementation of BlockListBuilder
BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci)
: _compilation(compilation)
, _scope(scope)
, _blocks(16)
, _bci2block(new BlockList(scope->method()->code_size(), NULL))
, _bci2block_successors(scope->method()->code_size())
, _active() // size not known yet
, _visited() // size not known yet
, _loop_map() // size not known yet
, _next_loop_index(0)
, _next_block_number(0)
, _block_id_start(0)
{
set_entries(osr_bci);
set_leaders();
CHECK_BAILOUT();
mark_loops();
NOT_PRODUCT(if (PrintInitialBlockList) print());
// _bci2block still contains blocks with _end == null and > 0 sux in _bci2block_successors.
#ifndef PRODUCT
if (PrintCFGToFile) {
stringStream title;
title.print("BlockListBuilder ");
scope->method()->print_name(&title);
CFGPrinter::print_cfg(_bci2block, title.freeze(), false, false);
}
#endif
}
void BlockListBuilder::set_entries(int osr_bci) {
// generate start blocks
BlockBegin* std_entry = make_block_at(0, NULL);
if (scope()->caller() == NULL) {
std_entry->set(BlockBegin::std_entry_flag);
}
if (osr_bci != -1) {
BlockBegin* osr_entry = make_block_at(osr_bci, NULL);
osr_entry->set(BlockBegin::osr_entry_flag);
}
// generate exception entry blocks
XHandlers* list = xhandlers();
const int n = list->length();
for (int i = 0; i < n; i++) {
XHandler* h = list->handler_at(i);
BlockBegin* entry = make_block_at(h->handler_bci(), NULL);
entry->set(BlockBegin::exception_entry_flag);
h->set_entry_block(entry);
}
}
BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) {
assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer");
BlockBegin* block = _bci2block->at(cur_bci);
if (block == NULL) {
block = new BlockBegin(cur_bci);
block->init_stores_to_locals(method()->max_locals());
_bci2block->at_put(cur_bci, block);
_bci2block_successors.at_put_grow(cur_bci, BlockList());
_blocks.append(block);
assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist");
}
if (predecessor != NULL) {
if (block->is_set(BlockBegin::exception_entry_flag)) {
BAILOUT_("Exception handler can be reached by both normal and exceptional control flow", block);
}
add_successor(predecessor, block);
block->increment_total_preds();
}
return block;
}
inline void BlockListBuilder::store_one(BlockBegin* current, int local) {
current->stores_to_locals().set_bit(local);
}
inline void BlockListBuilder::store_two(BlockBegin* current, int local) {
store_one(current, local);
store_one(current, local + 1);
}
void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) {
// Draws edges from a block to its exception handlers
XHandlers* list = xhandlers();
const int n = list->length();
for (int i = 0; i < n; i++) {
XHandler* h = list->handler_at(i);
if (h->covers(cur_bci)) {
BlockBegin* entry = h->entry_block();
assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set");
assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set");
// add each exception handler only once
if(!is_successor(current, entry)) {
add_successor(current, entry);
entry->increment_total_preds();
}
// stop when reaching catchall
if (h->catch_type() == 0) break;
}
}
}
void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) {
if (next_bci < method()->code_size()) {
// start a new block after jsr-bytecode and link this block into cfg
make_block_at(next_bci, current);
}
// start a new block at the subroutine entry at mark it with special flag
BlockBegin* sr_block = make_block_at(sr_bci, current);
if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) {
sr_block->set(BlockBegin::subroutine_entry_flag);
}
}
void BlockListBuilder::set_leaders() {
bool has_xhandlers = xhandlers()->has_handlers();
BlockBegin* current = NULL;
// The information which bci starts a new block simplifies the analysis
// Without it, backward branches could jump to a bci where no block was created
// during bytecode iteration. This would require the creation of a new block at the
// branch target and a modification of the successor lists.
const BitMap& bci_block_start = method()->bci_block_start();
int end_bci = method()->code_size();
ciBytecodeStream s(method());
while (s.next() != ciBytecodeStream::EOBC()) {
int cur_bci = s.cur_bci();
if (bci_block_start.at(cur_bci)) {
current = make_block_at(cur_bci, current);
}
assert(current != NULL, "must have current block");
if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) {
handle_exceptions(current, cur_bci);
}
switch (s.cur_bc()) {
// track stores to local variables for selective creation of phi functions
case Bytecodes::_iinc: store_one(current, s.get_index()); break;
case Bytecodes::_istore: store_one(current, s.get_index()); break;
case Bytecodes::_lstore: store_two(current, s.get_index()); break;
case Bytecodes::_fstore: store_one(current, s.get_index()); break;
case Bytecodes::_dstore: store_two(current, s.get_index()); break;
case Bytecodes::_astore: store_one(current, s.get_index()); break;
case Bytecodes::_istore_0: store_one(current, 0); break;
case Bytecodes::_istore_1: store_one(current, 1); break;
case Bytecodes::_istore_2: store_one(current, 2); break;
case Bytecodes::_istore_3: store_one(current, 3); break;
case Bytecodes::_lstore_0: store_two(current, 0); break;
case Bytecodes::_lstore_1: store_two(current, 1); break;
case Bytecodes::_lstore_2: store_two(current, 2); break;
case Bytecodes::_lstore_3: store_two(current, 3); break;
case Bytecodes::_fstore_0: store_one(current, 0); break;
case Bytecodes::_fstore_1: store_one(current, 1); break;
case Bytecodes::_fstore_2: store_one(current, 2); break;
case Bytecodes::_fstore_3: store_one(current, 3); break;
case Bytecodes::_dstore_0: store_two(current, 0); break;
case Bytecodes::_dstore_1: store_two(current, 1); break;
case Bytecodes::_dstore_2: store_two(current, 2); break;
case Bytecodes::_dstore_3: store_two(current, 3); break;
case Bytecodes::_astore_0: store_one(current, 0); break;
case Bytecodes::_astore_1: store_one(current, 1); break;
case Bytecodes::_astore_2: store_one(current, 2); break;
case Bytecodes::_astore_3: store_one(current, 3); break;
// track bytecodes that affect the control flow
case Bytecodes::_athrow: // fall through
case Bytecodes::_ret: // fall through
case Bytecodes::_ireturn: // fall through
case Bytecodes::_lreturn: // fall through
case Bytecodes::_freturn: // fall through
case Bytecodes::_dreturn: // fall through
case Bytecodes::_areturn: // fall through
case Bytecodes::_return:
current = NULL;
break;
case Bytecodes::_ifeq: // fall through
case Bytecodes::_ifne: // fall through
case Bytecodes::_iflt: // fall through
case Bytecodes::_ifge: // fall through
case Bytecodes::_ifgt: // fall through
case Bytecodes::_ifle: // fall through
case Bytecodes::_if_icmpeq: // fall through
case Bytecodes::_if_icmpne: // fall through
case Bytecodes::_if_icmplt: // fall through
case Bytecodes::_if_icmpge: // fall through
case Bytecodes::_if_icmpgt: // fall through
case Bytecodes::_if_icmple: // fall through
case Bytecodes::_if_acmpeq: // fall through
case Bytecodes::_if_acmpne: // fall through
case Bytecodes::_ifnull: // fall through
case Bytecodes::_ifnonnull:
if (s.next_bci() < end_bci) {
make_block_at(s.next_bci(), current);
}
make_block_at(s.get_dest(), current);
current = NULL;
break;
case Bytecodes::_goto:
make_block_at(s.get_dest(), current);
current = NULL;
break;
case Bytecodes::_goto_w:
make_block_at(s.get_far_dest(), current);
current = NULL;
break;
case Bytecodes::_jsr:
handle_jsr(current, s.get_dest(), s.next_bci());
current = NULL;
break;
case Bytecodes::_jsr_w:
handle_jsr(current, s.get_far_dest(), s.next_bci());
current = NULL;
break;
case Bytecodes::_tableswitch: {
// set block for each case
Bytecode_tableswitch sw(&s);
int l = sw.length();
for (int i = 0; i < l; i++) {
make_block_at(cur_bci + sw.dest_offset_at(i), current);
}
make_block_at(cur_bci + sw.default_offset(), current);
current = NULL;
break;
}
case Bytecodes::_lookupswitch: {
// set block for each case
Bytecode_lookupswitch sw(&s);
int l = sw.number_of_pairs();
for (int i = 0; i < l; i++) {
make_block_at(cur_bci + sw.pair_at(i).offset(), current);
}
make_block_at(cur_bci + sw.default_offset(), current);
current = NULL;
break;
}
default:
break;
}
}
}
void BlockListBuilder::mark_loops() {
ResourceMark rm;
const int number_of_blocks = _blocks.length();
_active.initialize(number_of_blocks);
_visited.initialize(number_of_blocks);
_loop_map = GrowableArray<ResourceBitMap>(number_of_blocks, number_of_blocks, ResourceBitMap());
for (int i = 0; i < number_of_blocks; i++) {
_loop_map.at(i).initialize(number_of_blocks);
}
_next_loop_index = 0;
_next_block_number = _blocks.length();
// The loop detection algorithm works as follows:
// - We maintain the _loop_map, where for each block we have a bitmap indicating which loops contain this block.
// - The CFG is recursively traversed (depth-first) and if we detect a loop, we assign the loop a unique number that is stored
// in the bitmap associated with the loop header block. Until we return back through that loop header the bitmap contains
// only a single bit corresponding to the loop number.
// - The bit is then propagated for all the blocks in the loop after we exit them (post-order). There could be multiple bits
// of course in case of nested loops.
// - When we exit the loop header we remove that single bit and assign the real loop state for it.
// - Now, the tricky part here is how we detect irreducible loops. In the algorithm above the loop state bits
// are propagated to the predecessors. If we encounter an irreducible loop (a loop with multiple heads) we would see
// a node with some loop bit set that would then propagate back and be never cleared because we would
// never go back through the original loop header. Therefore if there are any irreducible loops the bits in the states
// for these loops are going to propagate back to the root.
BlockBegin* start = _bci2block->at(0);
_block_id_start = start->block_id();
BitMap& loop_state = mark_loops(start, false);
if (!loop_state.is_empty()) {
compilation()->set_has_irreducible_loops(true);
}
assert(_next_block_number >= 0, "invalid block numbers");
// Remove dangling Resource pointers before the ResourceMark goes out-of-scope.
_active.resize(0);
_visited.resize(0);
_loop_map.clear();
}
void BlockListBuilder::make_loop_header(BlockBegin* block) {
int block_id = block->block_id();
int block_bit = bit_number(block_id);
if (block->is_set(BlockBegin::exception_entry_flag)) {
// exception edges may look like loops but don't mark them as such
// since it screws up block ordering.
return;
}
if (!block->is_set(BlockBegin::parser_loop_header_flag)) {
block->set(BlockBegin::parser_loop_header_flag);
assert(_loop_map.at(block_bit).is_empty(), "must not be set yet");
assert(0 <= _next_loop_index && _next_loop_index < _loop_map.length(), "_next_loop_index is too large");
_loop_map.at(block_bit).set_bit(_next_loop_index++);
} else {
// block already marked as loop header
assert(_loop_map.at(block_bit).count_one_bits() == 1, "exactly one bit must be set");
}
}
BitMap& BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) {
int block_id = block->block_id();
int block_bit = bit_number(block_id);
if (_visited.at(block_bit)) {
if (_active.at(block_bit)) {
// reached block via backward branch
make_loop_header(block);
}
// return cached loop information for this block
return _loop_map.at(block_bit);
}
if (block->is_set(BlockBegin::subroutine_entry_flag)) {
in_subroutine = true;
}
// set active and visited bits before successors are processed
_visited.set_bit(block_bit);
_active.set_bit(block_bit);
ResourceMark rm;
ResourceBitMap loop_state(_loop_map.length());
for (int i = number_of_successors(block) - 1; i >= 0; i--) {
BlockBegin* sux = successor_at(block, i);
// recursively process all successors
loop_state.set_union(mark_loops(sux, in_subroutine));
}
// clear active-bit after all successors are processed
_active.clear_bit(block_bit);
// reverse-post-order numbering of all blocks
block->set_depth_first_number(_next_block_number);
_next_block_number--;
if (!loop_state.is_empty() || in_subroutine ) {
// block is contained at least in one loop, so phi functions are necessary
// phi functions are also necessary for all locals stored in a subroutine
scope()->requires_phi_function().set_union(block->stores_to_locals());
}
if (block->is_set(BlockBegin::parser_loop_header_flag)) {
BitMap& header_loop_state = _loop_map.at(block_bit);
assert(header_loop_state.count_one_bits() == 1, "exactly one bit must be set");
// remove the bit with the loop number for the state (header is outside of the loop)
loop_state.set_difference(header_loop_state);
}
// cache and return loop information for this block
_loop_map.at(block_bit).set_from(loop_state);
return _loop_map.at(block_bit);
}
inline int BlockListBuilder::number_of_successors(BlockBegin* block)
{
assert(_bci2block_successors.length() > block->bci(), "sux must exist");
return _bci2block_successors.at(block->bci()).length();
}
inline BlockBegin* BlockListBuilder::successor_at(BlockBegin* block, int i)
{
assert(_bci2block_successors.length() > block->bci(), "sux must exist");
return _bci2block_successors.at(block->bci()).at(i);
}
inline void BlockListBuilder::add_successor(BlockBegin* block, BlockBegin* sux)
{
assert(_bci2block_successors.length() > block->bci(), "sux must exist");
_bci2block_successors.at(block->bci()).append(sux);
}
inline bool BlockListBuilder::is_successor(BlockBegin* block, BlockBegin* sux) {
assert(_bci2block_successors.length() > block->bci(), "sux must exist");
return _bci2block_successors.at(block->bci()).contains(sux);
}
#ifndef PRODUCT
int compare_depth_first(BlockBegin** a, BlockBegin** b) {
return (*a)->depth_first_number() - (*b)->depth_first_number();
}
void BlockListBuilder::print() {
tty->print("----- initial block list of BlockListBuilder for method ");
method()->print_short_name();
tty->cr();
// better readability if blocks are sorted in processing order
_blocks.sort(compare_depth_first);
for (int i = 0; i < _blocks.length(); i++) {
BlockBegin* cur = _blocks.at(i);
tty->print("%4d: B%-4d bci: %-4d preds: %-4d ", cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds());
tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " ");
tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " ");
tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " ");
tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " ");
tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " ");
if (number_of_successors(cur) > 0) {
tty->print(" sux: ");
for (int j = 0; j < number_of_successors(cur); j++) {
BlockBegin* sux = successor_at(cur, j);
tty->print("B%d ", sux->block_id());
}
}
tty->cr();
}
}
#endif
// A simple growable array of Values indexed by ciFields
class FieldBuffer: public CompilationResourceObj {
private:
GrowableArray<Value> _values;
public:
FieldBuffer() {}
void kill() {
_values.trunc_to(0);
}
Value at(ciField* field) {
assert(field->holder()->is_loaded(), "must be a loaded field");
int offset = field->offset();
if (offset < _values.length()) {
return _values.at(offset);
} else {
return NULL;
}
}
void at_put(ciField* field, Value value) {
assert(field->holder()->is_loaded(), "must be a loaded field");
int offset = field->offset();
_values.at_put_grow(offset, value, NULL);
}
};
// MemoryBuffer is fairly simple model of the current state of memory.
// It partitions memory into several pieces. The first piece is
// generic memory where little is known about the owner of the memory.
// This is conceptually represented by the tuple <O, F, V> which says
// that the field F of object O has value V. This is flattened so
// that F is represented by the offset of the field and the parallel
// arrays _objects and _values are used for O and V. Loads of O.F can
// simply use V. Newly allocated objects are kept in a separate list
// along with a parallel array for each object which represents the
// current value of its fields. Stores of the default value to fields
// which have never been stored to before are eliminated since they
// are redundant. Once newly allocated objects are stored into
// another object or they are passed out of the current compile they
// are treated like generic memory.
class MemoryBuffer: public CompilationResourceObj {
private:
FieldBuffer _values;
GrowableArray<Value> _objects;
GrowableArray<Value> _newobjects;
GrowableArray<FieldBuffer*> _fields;
public:
MemoryBuffer() {}
StoreField* store(StoreField* st) {
if (!EliminateFieldAccess) {
return st;
}
Value object = st->obj();
Value value = st->value();
ciField* field = st->field();
if (field->holder()->is_loaded()) {
int offset = field->offset();
int index = _newobjects.find(object);
if (index != -1) {
// newly allocated object with no other stores performed on this field
FieldBuffer* buf = _fields.at(index);
if (buf->at(field) == NULL && is_default_value(value)) {
#ifndef PRODUCT
if (PrintIRDuringConstruction && Verbose) {
tty->print_cr("Eliminated store for object %d:", index);
st->print_line();
}
#endif
return NULL;
} else {
buf->at_put(field, value);
}
} else {
_objects.at_put_grow(offset, object, NULL);
_values.at_put(field, value);
}
store_value(value);
} else {
// if we held onto field names we could alias based on names but
// we don't know what's being stored to so kill it all.
kill();
}
return st;
}
// return true if this value correspond to the default value of a field.
bool is_default_value(Value value) {
Constant* con = value->as_Constant();
if (con) {
switch (con->type()->tag()) {
case intTag: return con->type()->as_IntConstant()->value() == 0;
case longTag: return con->type()->as_LongConstant()->value() == 0;
case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0;
case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0);
case objectTag: return con->type() == objectNull;
default: ShouldNotReachHere();
}
}
return false;
}
// return either the actual value of a load or the load itself
Value load(LoadField* load) {
if (!EliminateFieldAccess) {
return load;
}
if (strict_fp_requires_explicit_rounding && load->type()->is_float_kind()) {
#ifdef IA32
if (UseSSE < 2) {
// can't skip load since value might get rounded as a side effect
return load;
}
#else
Unimplemented();
#endif // IA32
}
ciField* field = load->field();
Value object = load->obj();
if (field->holder()->is_loaded() && !field->is_volatile()) {
int offset = field->offset();
Value result = NULL;
int index = _newobjects.find(object);
if (index != -1) {
result = _fields.at(index)->at(field);
} else if (_objects.at_grow(offset, NULL) == object) {
result = _values.at(field);
}
if (result != NULL) {
#ifndef PRODUCT
if (PrintIRDuringConstruction && Verbose) {
tty->print_cr("Eliminated load: ");
load->print_line();
}
#endif
assert(result->type()->tag() == load->type()->tag(), "wrong types");
return result;
}
}
return load;
}
// Record this newly allocated object
void new_instance(NewInstance* object) {
int index = _newobjects.length();
_newobjects.append(object);
if (_fields.at_grow(index, NULL) == NULL) {
_fields.at_put(index, new FieldBuffer());
} else {
_fields.at(index)->kill();
}
}
void store_value(Value value) {
int index = _newobjects.find(value);
if (index != -1) {
// stored a newly allocated object into another object.
// Assume we've lost track of it as separate slice of memory.
// We could do better by keeping track of whether individual
// fields could alias each other.
_newobjects.remove_at(index);
// pull out the field info and store it at the end up the list
// of field info list to be reused later.
_fields.append(_fields.at(index));
_fields.remove_at(index);
}
}
void kill() {
_newobjects.trunc_to(0);
_objects.trunc_to(0);
_values.kill();
}
};
// Implementation of GraphBuilder's ScopeData
GraphBuilder::ScopeData::ScopeData(ScopeData* parent)
: _parent(parent)
, _bci2block(NULL)
, _scope(NULL)
, _has_handler(false)
, _stream(NULL)
, _work_list(NULL)
, _caller_stack_size(-1)
, _continuation(NULL)
, _parsing_jsr(false)
, _jsr_xhandlers(NULL)
, _num_returns(0)
, _cleanup_block(NULL)
, _cleanup_return_prev(NULL)
, _cleanup_state(NULL)
, _ignore_return(false)
{
if (parent != NULL) {
_max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f);
} else {
_max_inline_size = C1MaxInlineSize;
}
if (_max_inline_size < C1MaxTrivialSize) {
_max_inline_size = C1MaxTrivialSize;
}
}
void GraphBuilder::kill_all() {
if (UseLocalValueNumbering) {
vmap()->kill_all();
}
_memory->kill();
}
BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
if (parsing_jsr()) {
// It is necessary to clone all blocks associated with a
// subroutine, including those for exception handlers in the scope
// of the method containing the jsr (because those exception
// handlers may contain ret instructions in some cases).
BlockBegin* block = bci2block()->at(bci);
if (block != NULL && block == parent()->bci2block()->at(bci)) {
BlockBegin* new_block = new BlockBegin(block->bci());
if (PrintInitialBlockList) {
tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
block->block_id(), block->bci(), new_block->block_id());
}
// copy data from cloned blocked
new_block->set_depth_first_number(block->depth_first_number());
if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
// Preserve certain flags for assertion checking
if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag);
if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag);
// copy was_visited_flag to allow early detection of bailouts
// if a block that is used in a jsr has already been visited before,
// it is shared between the normal control flow and a subroutine
// BlockBegin::try_merge returns false when the flag is set, this leads
// to a compilation bailout
if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag);
bci2block()->at_put(bci, new_block);
block = new_block;
}
return block;
} else {
return bci2block()->at(bci);
}
}
XHandlers* GraphBuilder::ScopeData::xhandlers() const {
if (_jsr_xhandlers == NULL) {
assert(!parsing_jsr(), "");
return scope()->xhandlers();
}
assert(parsing_jsr(), "");
return _jsr_xhandlers;
}
void GraphBuilder::ScopeData::set_scope(IRScope* scope) {
_scope = scope;
bool parent_has_handler = false;
if (parent() != NULL) {
parent_has_handler = parent()->has_handler();
}
_has_handler = parent_has_handler || scope->xhandlers()->has_handlers();
}
void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block,
Instruction* return_prev,
ValueStack* return_state) {
_cleanup_block = block;
_cleanup_return_prev = return_prev;
_cleanup_state = return_state;
}
void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) {
if (_work_list == NULL) {
_work_list = new BlockList();
}
if (!block->is_set(BlockBegin::is_on_work_list_flag)) {
// Do not start parsing the continuation block while in a
// sub-scope
if (parsing_jsr()) {
if (block == jsr_continuation()) {
return;
}
} else {
if (block == continuation()) {
return;
}
}
block->set(BlockBegin::is_on_work_list_flag);
_work_list->push(block);
sort_top_into_worklist(_work_list, block);
}
}
void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) {
assert(worklist->top() == top, "");
// sort block descending into work list
const int dfn = top->depth_first_number();
assert(dfn != -1, "unknown depth first number");
int i = worklist->length()-2;
while (i >= 0) {
BlockBegin* b = worklist->at(i);
if (b->depth_first_number() < dfn) {
worklist->at_put(i+1, b);
} else {
break;
}
i --;
}
if (i >= -1) worklist->at_put(i + 1, top);
}
BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() {
if (is_work_list_empty()) {
return NULL;
}
return _work_list->pop();
}
bool GraphBuilder::ScopeData::is_work_list_empty() const {
return (_work_list == NULL || _work_list->length() == 0);
}
void GraphBuilder::ScopeData::setup_jsr_xhandlers() {
assert(parsing_jsr(), "");
// clone all the exception handlers from the scope
XHandlers* handlers = new XHandlers(scope()->xhandlers());
const int n = handlers->length();
for (int i = 0; i < n; i++) {
// The XHandlers need to be adjusted to dispatch to the cloned
// handler block instead of the default one but the synthetic
// unlocker needs to be handled specially. The synthetic unlocker
// should be left alone since there can be only one and all code
// should dispatch to the same one.
XHandler* h = handlers->handler_at(i);
assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
h->set_entry_block(block_at(h->handler_bci()));
}
_jsr_xhandlers = handlers;
}
int GraphBuilder::ScopeData::num_returns() {
if (parsing_jsr()) {
return parent()->num_returns();
}
return _num_returns;
}
void GraphBuilder::ScopeData::incr_num_returns() {
if (parsing_jsr()) {
parent()->incr_num_returns();
} else {
++_num_returns;
}
}
// Implementation of GraphBuilder
#define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; }
void GraphBuilder::load_constant() {
ciConstant con = stream()->get_constant();
if (con.is_valid()) {
ValueType* t = illegalType;
ValueStack* patch_state = NULL;
switch (con.basic_type()) {
case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break;
case T_BYTE : t = new IntConstant (con.as_byte ()); break;
case T_CHAR : t = new IntConstant (con.as_char ()); break;
case T_SHORT : t = new IntConstant (con.as_short ()); break;
case T_INT : t = new IntConstant (con.as_int ()); break;
case T_LONG : t = new LongConstant (con.as_long ()); break;
case T_FLOAT : t = new FloatConstant (con.as_float ()); break;
case T_DOUBLE : t = new DoubleConstant(con.as_double ()); break;
case T_ARRAY : // fall-through
case T_OBJECT : {
ciObject* obj = con.as_object();
if (!obj->is_loaded() || (PatchALot && !stream()->is_string_constant())) {
// A Class, MethodType, MethodHandle, Dynamic, or String.
patch_state = copy_state_before();
t = new ObjectConstant(obj);
} else {
// Might be a Class, MethodType, MethodHandle, or Dynamic constant
// result, which might turn out to be an array.
if (obj->is_null_object()) {
t = objectNull;
} else if (obj->is_array()) {
t = new ArrayConstant(obj->as_array());
} else {
t = new InstanceConstant(obj->as_instance());
}
}
break;
}
default: ShouldNotReachHere();
}
Value x;
if (patch_state != NULL) {
// Arbitrary memory effects from running BSM or class loading (using custom loader) during linkage.
bool kills_memory = stream()->is_dynamic_constant() ||
(!stream()->is_string_constant() && !method()->holder()->has_trusted_loader());
x = new Constant(t, patch_state, kills_memory);
} else {
x = new Constant(t);
}
// Unbox the value at runtime, if needed.
// ConstantDynamic entry can be of a primitive type, but it is cached in boxed form.
if (patch_state != NULL) {
int index = stream()->get_constant_pool_index();
BasicType type = stream()->get_basic_type_for_constant_at(index);
if (is_java_primitive(type)) {
ciInstanceKlass* box_klass = ciEnv::current()->get_box_klass_for_primitive_type(type);
assert(box_klass->is_loaded(), "sanity");
int offset = java_lang_boxing_object::value_offset(type);
ciField* value_field = box_klass->get_field_by_offset(offset, false /*is_static*/);
x = new LoadField(append(x), offset, value_field, false /*is_static*/, patch_state, false /*needs_patching*/);
t = as_ValueType(type);
} else {
assert(is_reference_type(type), "not a reference: %s", type2name(type));
}
}
push(t, append(x));
} else {
BAILOUT("could not resolve a constant");
}
}
void GraphBuilder::load_local(ValueType* type, int index) {
Value x = state()->local_at(index);
assert(x != NULL && !x->type()->is_illegal(), "access of illegal local variable");
push(type, x);
}
void GraphBuilder::store_local(ValueType* type, int index) {
Value x = pop(type);
store_local(state(), x, index);
}
void GraphBuilder::store_local(ValueStack* state, Value x, int index) {
if (parsing_jsr()) {
// We need to do additional tracking of the location of the return
// address for jsrs since we don't handle arbitrary jsr/ret
// constructs. Here we are figuring out in which circumstances we
// need to bail out.
if (x->type()->is_address()) {
scope_data()->set_jsr_return_address_local(index);
// Also check parent jsrs (if any) at this time to see whether
// they are using this local. We don't handle skipping over a
// ret.
for (ScopeData* cur_scope_data = scope_data()->parent();
cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
cur_scope_data = cur_scope_data->parent()) {
if (cur_scope_data->jsr_return_address_local() == index) {
BAILOUT("subroutine overwrites return address from previous subroutine");
}
}
} else if (index == scope_data()->jsr_return_address_local()) {
scope_data()->set_jsr_return_address_local(-1);
}
}
state->store_local(index, round_fp(x));
}
void GraphBuilder::load_indexed(BasicType type) {
// In case of in block code motion in range check elimination
ValueStack* state_before = copy_state_indexed_access();
compilation()->set_has_access_indexed(true);
Value index = ipop();
Value array = apop();
Value length = NULL;
if (CSEArrayLength ||
(array->as_Constant() != NULL) ||
(array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
(array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
(array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
length = append(new ArrayLength(array, state_before));
}
push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before)));
}
void GraphBuilder::store_indexed(BasicType type) {
// In case of in block code motion in range check elimination
ValueStack* state_before = copy_state_indexed_access();
compilation()->set_has_access_indexed(true);
Value value = pop(as_ValueType(type));
Value index = ipop();
Value array = apop();
Value length = NULL;
if (CSEArrayLength ||
(array->as_Constant() != NULL) ||
(array->as_AccessField() && array->as_AccessField()->field()->is_constant()) ||
(array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant()) ||
(array->as_NewMultiArray() && array->as_NewMultiArray()->dims()->at(0)->type()->is_constant())) {
length = append(new ArrayLength(array, state_before));
}
ciType* array_type = array->declared_type();
bool check_boolean = false;
if (array_type != NULL) {
if (array_type->is_loaded() &&
array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) {
assert(type == T_BYTE, "boolean store uses bastore");
Value mask = append(new Constant(new IntConstant(1)));
value = append(new LogicOp(Bytecodes::_iand, value, mask));
}
} else if (type == T_BYTE) {
check_boolean = true;
}
StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before, check_boolean);
append(result);
_memory->store_value(value);
if (type == T_OBJECT && is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_checkcasts()) {
result->set_profiled_method(method());
result->set_profiled_bci(bci());
result->set_should_profile(true);
}
}
}
void GraphBuilder::stack_op(Bytecodes::Code code) {
switch (code) {
case Bytecodes::_pop:
{ state()->raw_pop();
}
break;
case Bytecodes::_pop2:
{ state()->raw_pop();
state()->raw_pop();
}
break;
case Bytecodes::_dup:
{ Value w = state()->raw_pop();
state()->raw_push(w);
state()->raw_push(w);
}
break;
case Bytecodes::_dup_x1:
{ Value w1 = state()->raw_pop();
Value w2 = state()->raw_pop();
state()->raw_push(w1);
state()->raw_push(w2);
state()->raw_push(w1);
}
break;
case Bytecodes::_dup_x2:
{ Value w1 = state()->raw_pop();
Value w2 = state()->raw_pop();
Value w3 = state()->raw_pop();
state()->raw_push(w1);
state()->raw_push(w3);
state()->raw_push(w2);
state()->raw_push(w1);
}
break;
case Bytecodes::_dup2:
{ Value w1 = state()->raw_pop();
Value w2 = state()->raw_pop();
state()->raw_push(w2);
state()->raw_push(w1);
state()->raw_push(w2);
state()->raw_push(w1);
}
break;
case Bytecodes::_dup2_x1:
{ Value w1 = state()->raw_pop();
Value w2 = state()->raw_pop();
Value w3 = state()->raw_pop();
state()->raw_push(w2);
state()->raw_push(w1);
state()->raw_push(w3);
state()->raw_push(w2);
state()->raw_push(w1);
}
break;
case Bytecodes::_dup2_x2:
{ Value w1 = state()->raw_pop();
Value w2 = state()->raw_pop();
Value w3 = state()->raw_pop();
Value w4 = state()->raw_pop();
state()->raw_push(w2);
state()->raw_push(w1);
state()->raw_push(w4);
state()->raw_push(w3);
state()->raw_push(w2);
state()->raw_push(w1);
}
break;
case Bytecodes::_swap:
{ Value w1 = state()->raw_pop();
Value w2 = state()->raw_pop();
state()->raw_push(w1);
state()->raw_push(w2);
}
break;
default:
ShouldNotReachHere();
break;
}
}
void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) {
Value y = pop(type);
Value x = pop(type);
Value res = new ArithmeticOp(code, x, y, state_before);
// Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level
res = append(res);
res = round_fp(res);
push(type, res);
}
void GraphBuilder::negate_op(ValueType* type) {
push(type, append(new NegateOp(pop(type))));
}
void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) {
Value s = ipop();
Value x = pop(type);
// try to simplify
// Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node.
if (CanonicalizeNodes && code == Bytecodes::_iushr) {
// pattern: x >>> s
IntConstant* s1 = s->type()->as_IntConstant();
if (s1 != NULL) {
// pattern: x >>> s1, with s1 constant
ShiftOp* l = x->as_ShiftOp();
if (l != NULL && l->op() == Bytecodes::_ishl) {
// pattern: (a << b) >>> s1
IntConstant* s0 = l->y()->type()->as_IntConstant();
if (s0 != NULL) {
// pattern: (a << s0) >>> s1
const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts
const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts
if (s0c == s1c) {
if (s0c == 0) {
// pattern: (a << 0) >>> 0 => simplify to: a
ipush(l->x());
} else {
// pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant
assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases");
const int m = (1 << (BitsPerInt - s0c)) - 1;
Value s = append(new Constant(new IntConstant(m)));
ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s)));
}
return;
}
}
}
}
}
// could not simplify
push(type, append(new ShiftOp(code, x, s)));
}
void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) {
Value y = pop(type);
Value x = pop(type);
push(type, append(new LogicOp(code, x, y)));
}
void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) {
ValueStack* state_before = copy_state_before();
Value y = pop(type);
Value x = pop(type);
ipush(append(new CompareOp(code, x, y, state_before)));
}
void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) {
push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to))));
}
void GraphBuilder::increment() {
int index = stream()->get_index();
int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]);
load_local(intType, index);
ipush(append(new Constant(new IntConstant(delta))));
arithmetic_op(intType, Bytecodes::_iadd);
store_local(intType, index);
}
void GraphBuilder::_goto(int from_bci, int to_bci) {
Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
if (is_profiling()) {
compilation()->set_would_profile(true);
x->set_profiled_bci(bci());
if (profile_branches()) {
x->set_profiled_method(method());
x->set_should_profile(true);
}
}
append(x);
}
void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) {
BlockBegin* tsux = block_at(stream()->get_dest());
BlockBegin* fsux = block_at(stream()->next_bci());
bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : NULL, is_bb));
assert(i->as_Goto() == NULL ||
(i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
(i->as_Goto()->sux_at(0) == fsux && i->as_Goto()->is_safepoint() == fsux->bci() < stream()->cur_bci()),
"safepoint state of Goto returned by canonicalizer incorrect");
if (is_profiling()) {
If* if_node = i->as_If();
if (if_node != NULL) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
// At level 2 we need the proper bci to count backedges
if_node->set_profiled_bci(bci());
if (profile_branches()) {
// Successors can be rotated by the canonicalizer, check for this case.
if_node->set_profiled_method(method());
if_node->set_should_profile(true);
if (if_node->tsux() == fsux) {
if_node->set_swapped(true);
}
}
return;
}
// Check if this If was reduced to Goto.
Goto *goto_node = i->as_Goto();
if (goto_node != NULL) {
compilation()->set_would_profile(true);
goto_node->set_profiled_bci(bci());
if (profile_branches()) {
goto_node->set_profiled_method(method());
goto_node->set_should_profile(true);
// Find out which successor is used.
if (goto_node->default_sux() == tsux) {
goto_node->set_direction(Goto::taken);
} else if (goto_node->default_sux() == fsux) {
goto_node->set_direction(Goto::not_taken);
} else {
ShouldNotReachHere();
}
}
return;
}
}
}
void GraphBuilder::if_zero(ValueType* type, If::Condition cond) {
Value y = append(new Constant(intZero));
ValueStack* state_before = copy_state_before();
Value x = ipop();
if_node(x, cond, y, state_before);
}
void GraphBuilder::if_null(ValueType* type, If::Condition cond) {
Value y = append(new Constant(objectNull));
ValueStack* state_before = copy_state_before();
Value x = apop();
if_node(x, cond, y, state_before);
}
void GraphBuilder::if_same(ValueType* type, If::Condition cond) {
ValueStack* state_before = copy_state_before();
Value y = pop(type);
Value x = pop(type);
if_node(x, cond, y, state_before);
}
void GraphBuilder::jsr(int dest) {
// We only handle well-formed jsrs (those which are "block-structured").
// If the bytecodes are strange (jumping out of a jsr block) then we
// might end up trying to re-parse a block containing a jsr which
// has already been activated. Watch for this case and bail out.
for (ScopeData* cur_scope_data = scope_data();
cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope();
cur_scope_data = cur_scope_data->parent()) {
if (cur_scope_data->jsr_entry_bci() == dest) {
BAILOUT("too-complicated jsr/ret structure");
}
}
push(addressType, append(new Constant(new AddressConstant(next_bci()))));
if (!try_inline_jsr(dest)) {
return; // bailed out while parsing and inlining subroutine
}
}
void GraphBuilder::ret(int local_index) {
if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine");
if (local_index != scope_data()->jsr_return_address_local()) {
BAILOUT("can not handle complicated jsr/ret constructs");
}
// Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation
append(new Goto(scope_data()->jsr_continuation(), false));
}
void GraphBuilder::table_switch() {
Bytecode_tableswitch sw(stream());
const int l = sw.length();
if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) {
// total of 2 successors => use If instead of switch
// Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node.
Value key = append(new Constant(new IntConstant(sw.low_key())));
BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(is_bb);
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
} else {
// collect successors
BlockList* sux = new BlockList(l + 1, NULL);
int i;
bool has_bb = false;
for (i = 0; i < l; i++) {
sux->at_put(i, block_at(bci() + sw.dest_offset_at(i)));
if (sw.dest_offset_at(i) < 0) has_bb = true;
}
// add default successor
if (sw.default_offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + sw.default_offset()));
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(has_bb);
Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
#ifdef ASSERT
if (res->as_Goto()) {
for (i = 0; i < l; i++) {
if (sux->at(i) == res->as_Goto()->sux_at(0)) {
assert(res->as_Goto()->is_safepoint() == sw.dest_offset_at(i) < 0, "safepoint state of Goto returned by canonicalizer incorrect");
}
}
}
#endif
}
}
void GraphBuilder::lookup_switch() {
Bytecode_lookupswitch sw(stream());
const int l = sw.number_of_pairs();
if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) {
// total of 2 successors => use If instead of switch
// Note: This code should go into the canonicalizer as soon as it can
// can handle canonicalized forms that contain more than one node.
// simplify to If
LookupswitchPair pair = sw.pair_at(0);
Value key = append(new Constant(new IntConstant(pair.match())));
BlockBegin* tsux = block_at(bci() + pair.offset());
BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(is_bb);;
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
} else {
// collect successors & keys
BlockList* sux = new BlockList(l + 1, NULL);
intArray* keys = new intArray(l, l, 0);
int i;
bool has_bb = false;
for (i = 0; i < l; i++) {
LookupswitchPair pair = sw.pair_at(i);
if (pair.offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + pair.offset()));
keys->at_put(i, pair.match());
}
// add default successor
if (sw.default_offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + sw.default_offset()));
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(has_bb);
Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
#ifdef ASSERT
if (res->as_Goto()) {
for (i = 0; i < l; i++) {
if (sux->at(i) == res->as_Goto()->sux_at(0)) {
assert(res->as_Goto()->is_safepoint() == sw.pair_at(i).offset() < 0, "safepoint state of Goto returned by canonicalizer incorrect");
}
}
}
#endif
}
}
void GraphBuilder::call_register_finalizer() {
// If the receiver requires finalization then emit code to perform
// the registration on return.
// Gather some type information about the receiver
Value receiver = state()->local_at(0);
assert(receiver != NULL, "must have a receiver");
ciType* declared_type = receiver->declared_type();
ciType* exact_type = receiver->exact_type();
if (exact_type == NULL &&
receiver->as_Local() &&
receiver->as_Local()->java_index() == 0) {
ciInstanceKlass* ik = compilation()->method()->holder();
if (ik->is_final()) {
exact_type = ik;
} else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) {
// test class is leaf class
compilation()->dependency_recorder()->assert_leaf_type(ik);
exact_type = ik;
} else {
declared_type = ik;
}
}
// see if we know statically that registration isn't required
bool needs_check = true;
if (exact_type != NULL) {
needs_check = exact_type->as_instance_klass()->has_finalizer();
} else if (declared_type != NULL) {
ciInstanceKlass* ik = declared_type->as_instance_klass();
if (!Dependencies::has_finalizable_subclass(ik)) {
compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik);
needs_check = false;
}
}
if (needs_check) {
// Perform the registration of finalizable objects.
ValueStack* state_before = copy_state_for_exception();
load_local(objectType, 0);
append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init,
state()->pop_arguments(1),
true, state_before, true));
}
}
void GraphBuilder::method_return(Value x, bool ignore_return) {
if (RegisterFinalizersAtInit &&
method()->intrinsic_id() == vmIntrinsics::_Object_init) {
call_register_finalizer();
}
// The conditions for a memory barrier are described in Parse::do_exits().
bool need_mem_bar = false;
if (method()->name() == ciSymbols::object_initializer_name() &&
(scope()->wrote_final() ||
(AlwaysSafeConstructors && scope()->wrote_fields()) ||
(support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()))) {
need_mem_bar = true;
}
BasicType bt = method()->return_type()->basic_type();
switch (bt) {
case T_BYTE:
{
Value shift = append(new Constant(new IntConstant(24)));
x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
break;
}
case T_SHORT:
{
Value shift = append(new Constant(new IntConstant(16)));
x = append(new ShiftOp(Bytecodes::_ishl, x, shift));
x = append(new ShiftOp(Bytecodes::_ishr, x, shift));
break;
}
case T_CHAR:
{
Value mask = append(new Constant(new IntConstant(0xFFFF)));
x = append(new LogicOp(Bytecodes::_iand, x, mask));
break;
}
case T_BOOLEAN:
{
Value mask = append(new Constant(new IntConstant(1)));
x = append(new LogicOp(Bytecodes::_iand, x, mask));
break;
}
default:
break;
}
// Check to see whether we are inlining. If so, Return
// instructions become Gotos to the continuation point.
if (continuation() != NULL) {
int invoke_bci = state()->caller_state()->bci();
if (x != NULL && !ignore_return) {
ciMethod* caller = state()->scope()->caller()->method();
Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci);
if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) {
ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type();
if (declared_ret_type->is_klass() && x->exact_type() == NULL &&
x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) {
x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before()));
}
}
}
assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
if (compilation()->env()->dtrace_method_probes()) {
// Report exit from inline methods
Values* args = new Values(1);
args->push(append(new Constant(new MethodConstant(method()))));
append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args));
}
// If the inlined method is synchronized, the monitor must be
// released before we jump to the continuation block.
if (method()->is_synchronized()) {
assert(state()->locks_size() == 1, "receiver must be locked here");
monitorexit(state()->lock_at(0), SynchronizationEntryBCI);
}
if (need_mem_bar) {
append(new MemBar(lir_membar_storestore));
}
// State at end of inlined method is the state of the caller
// without the method parameters on stack, including the
// return value, if any, of the inlined method on operand stack.
set_state(state()->caller_state()->copy_for_parsing());
if (x != NULL) {
if (!ignore_return) {
state()->push(x->type(), x);
}
if (profile_return() && x->type()->is_object_kind()) {
ciMethod* caller = state()->scope()->method();
profile_return_type(x, method(), caller, invoke_bci);
}
}
Goto* goto_callee = new Goto(continuation(), false);
// See whether this is the first return; if so, store off some
// of the state for later examination
if (num_returns() == 0) {
set_inline_cleanup_info();
}
// The current bci() is in the wrong scope, so use the bci() of
// the continuation point.
append_with_bci(goto_callee, scope_data()->continuation()->bci());
incr_num_returns();
return;
}
state()->truncate_stack(0);
if (method()->is_synchronized()) {
// perform the unlocking before exiting the method
Value receiver;
if (!method()->is_static()) {
receiver = _initial_state->local_at(0);
} else {
receiver = append(new Constant(new ClassConstant(method()->holder())));
}
append_split(new MonitorExit(receiver, state()->unlock()));
}
if (need_mem_bar) {
append(new MemBar(lir_membar_storestore));
}
assert(!ignore_return, "Ignoring return value works only for inlining");
append(new Return(x));
}
Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) {
if (!field_value.is_valid()) return NULL;
BasicType field_type = field_value.basic_type();
ValueType* value = as_ValueType(field_value);
// Attach dimension info to stable arrays.
if (FoldStableValues &&
field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) {
ciArray* array = field_value.as_object()->as_array();
jint dimension = field->type()->as_array_klass()->dimension();
value = new StableArrayConstant(array, dimension);
}
switch (field_type) {
case T_ARRAY:
case T_OBJECT:
if (field_value.as_object()->should_be_constant()) {
return new Constant(value);
}
return NULL; // Not a constant.
default:
return new Constant(value);
}
}
void GraphBuilder::access_field(Bytecodes::Code code) {
bool will_link;
ciField* field = stream()->get_field(will_link);
ciInstanceKlass* holder = field->holder();
BasicType field_type = field->type()->basic_type();
ValueType* type = as_ValueType(field_type);
// call will_link again to determine if the field is valid.
const bool needs_patching = !holder->is_loaded() ||
!field->will_link(method(), code) ||
PatchALot;
ValueStack* state_before = NULL;
if (!holder->is_initialized() || needs_patching) {
// save state before instruction for debug info when
// deoptimization happens during patching
state_before = copy_state_before();
}
Value obj = NULL;
if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
if (state_before != NULL) {
// build a patching constant
obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before);
} else {
obj = new Constant(new InstanceConstant(holder->java_mirror()));
}
}
if (field->is_final() && (code == Bytecodes::_putfield)) {
scope()->set_wrote_final();
}
if (code == Bytecodes::_putfield) {
scope()->set_wrote_fields();
if (field->is_volatile()) {
scope()->set_wrote_volatile();
}
}
const int offset = !needs_patching ? field->offset() : -1;
switch (code) {
case Bytecodes::_getstatic: {
// check for compile-time constants, i.e., initialized static final fields
Value constant = NULL;
if (field->is_static_constant() && !PatchALot) {
ciConstant field_value = field->constant_value();
assert(!field->is_stable() || !field_value.is_null_or_zero(),
"stable static w/ default value shouldn't be a constant");
constant = make_constant(field_value, field);
}
if (constant != NULL) {
push(type, append(constant));
} else {
if (state_before == NULL) {
state_before = copy_state_for_exception();
}
push(type, append(new LoadField(append(obj), offset, field, true,
state_before, needs_patching)));
}
break;
}
case Bytecodes::_putstatic: {
Value val = pop(type);
if (state_before == NULL) {
state_before = copy_state_for_exception();
}
if (field->type()->basic_type() == T_BOOLEAN) {
Value mask = append(new Constant(new IntConstant(1)));
val = append(new LogicOp(Bytecodes::_iand, val, mask));
}
append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
break;
}
case Bytecodes::_getfield: {
// Check for compile-time constants, i.e., trusted final non-static fields.
Value constant = NULL;
obj = apop();
ObjectType* obj_type = obj->type()->as_ObjectType();
if (field->is_constant() && obj_type->is_constant() && !PatchALot) {
ciObject* const_oop = obj_type->constant_value();
if (!const_oop->is_null_object() && const_oop->is_loaded()) {
ciConstant field_value = field->constant_value_of(const_oop);
if (field_value.is_valid()) {
constant = make_constant(field_value, field);
// For CallSite objects add a dependency for invalidation of the optimization.
if (field->is_call_site_target()) {
ciCallSite* call_site = const_oop->as_call_site();
if (!call_site->is_fully_initialized_constant_call_site()) {
ciMethodHandle* target = field_value.as_object()->as_method_handle();
dependency_recorder()->assert_call_site_target_value(call_site, target);
}
}
}
}
}
if (constant != NULL) {
push(type, append(constant));
} else {
if (state_before == NULL) {
state_before = copy_state_for_exception();
}
LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
Value replacement = !needs_patching ? _memory->load(load) : load;
if (replacement != load) {
assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
// Writing an (integer) value to a boolean, byte, char or short field includes an implicit narrowing
// conversion. Emit an explicit conversion here to get the correct field value after the write.
BasicType bt = field->type()->basic_type();
switch (bt) {
case T_BOOLEAN:
case T_BYTE:
replacement = append(new Convert(Bytecodes::_i2b, replacement, as_ValueType(bt)));
break;
case T_CHAR:
replacement = append(new Convert(Bytecodes::_i2c, replacement, as_ValueType(bt)));
break;
case T_SHORT:
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.79 Sekunden
(vorverarbeitet)
¤
|
Haftungshinweis
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.
|