/*
* Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "compiler/compileLog.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "opto/c2compiler.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/callnode.hpp"
#include "opto/cfgnode.hpp"
#include "opto/compile.hpp"
#include "opto/escape.hpp"
#include "opto/phaseX.hpp"
#include "opto/movenode.hpp"
#include "opto/rootnode.hpp"
#include "utilities/macros.hpp"
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
_nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
_in_worklist(C->comp_arena()),
_next_pidx(0),
_collecting(true),
_verify(false),
_compile(C),
_igvn(igvn),
_invocation(invocation),
_build_iterations(0),
_build_time(0.),
_node_map(C->comp_arena()) {
// Add unknown java object.
add_java_object(C->top(), PointsToNode::GlobalEscape);
phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
// Add ConP(#NULL) and ConN(#NULL) nodes.
Node* oop_null = igvn->zerocon(T_OBJECT);
assert(oop_null->_idx < nodes_size(), "should be created already");
add_java_object(oop_null, PointsToNode::NoEscape);
null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
if (UseCompressedOops) {
Node* noop_null = igvn->zerocon(T_NARROWOOP);
assert(noop_null->_idx < nodes_size(), "should be created already");
map_ideal_node(noop_null, null_obj);
}
}
bool ConnectionGraph::has_candidates(Compile *C) {
// EA brings benefits only when the code has allocations and/or locks which
// are represented by ideal Macro nodes.
int cnt = C->macro_count();
for (int i = 0; i < cnt; i++) {
Node *n = C->macro_node(i);
if (n->is_Allocate()) {
return true;
}
if (n->is_Lock()) {
Node* obj = n->as_Lock()->obj_node()->uncast();
if (!(obj->is_Parm() || obj->is_Con())) {
return true;
}
}
if (n->is_CallStaticJava() &&
n->as_CallStaticJava()->is_boxing_method()) {
return true;
}
}
return false;
}
void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]);
ResourceMark rm;
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
// to create space for them in ConnectionGraph::_nodes[].
Node* oop_null = igvn->zerocon(T_OBJECT);
Node* noop_null = igvn->zerocon(T_NARROWOOP);
int invocation = 0;
if (C->congraph() != NULL) {
invocation = C->congraph()->_invocation + 1;
}
ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation);
// Perform escape analysis
if (congraph->compute_escape()) {
// There are non escaping objects.
C->set_congraph(congraph);
}
// Cleanup.
if (oop_null->outcnt() == 0) {
igvn->hash_delete(oop_null);
}
if (noop_null->outcnt() == 0) {
igvn->hash_delete(noop_null);
}
}
bool ConnectionGraph::compute_escape() {
Compile* C = _compile;
PhaseGVN* igvn = _igvn;
// Worklists used by EA.
Unique_Node_List delayed_worklist;
GrowableArray<Node*> alloc_worklist;
GrowableArray<Node*> ptr_cmp_worklist;
GrowableArray<MemBarStoreStoreNode*> storestore_worklist;
GrowableArray<ArrayCopyNode*> arraycopy_worklist;
GrowableArray<PointsToNode*> ptnodes_worklist;
GrowableArray<JavaObjectNode*> java_objects_worklist;
GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;
GrowableArray<FieldNode*> oop_fields_worklist;
GrowableArray<SafePointNode*> sfn_worklist;
GrowableArray<MergeMemNode*> mergemem_worklist;
DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
{ Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
// 1. Populate Connection Graph (CG) with PointsTo nodes.
ideal_nodes.map(C->live_nodes(), NULL); // preallocate space
// Initialize worklist
if (C->root() != NULL) {
ideal_nodes.push(C->root());
}
// Processed ideal nodes are unique on ideal_nodes list
// but several ideal nodes are mapped to the phantom_obj.
// To avoid duplicated entries on the following worklists
// add the phantom_obj only once to them.
ptnodes_worklist.append(phantom_obj);
java_objects_worklist.append(phantom_obj);
for( uint next = 0; next < ideal_nodes.size(); ++next ) {
Node* n = ideal_nodes.at(next);
// Create PointsTo nodes and add them to Connection Graph. Called
// only once per ideal node since ideal_nodes is Unique_Node list.
add_node_to_connection_graph(n, &delayed_worklist);
PointsToNode* ptn = ptnode_adr(n->_idx);
if (ptn != NULL && ptn != phantom_obj) {
ptnodes_worklist.append(ptn);
if (ptn->is_JavaObject()) {
java_objects_worklist.append(ptn->as_JavaObject());
if ((n->is_Allocate() || n->is_CallStaticJava()) &&
(ptn->escape_state() < PointsToNode::GlobalEscape)) {
// Only allocations and java static calls results are interesting.
non_escaped_allocs_worklist.append(ptn->as_JavaObject());
}
} else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
oop_fields_worklist.append(ptn->as_Field());
}
}
// Collect some interesting nodes for further use.
switch (n->Opcode()) {
case Op_MergeMem:
// Collect all MergeMem nodes to add memory slices for
// scalar replaceable objects in split_unique_types().
mergemem_worklist.append(n->as_MergeMem());
break;
case Op_CmpP:
case Op_CmpN:
// Collect compare pointers nodes.
if (OptimizePtrCompare) {
ptr_cmp_worklist.append(n);
}
break;
case Op_MemBarStoreStore:
// Collect all MemBarStoreStore nodes so that depending on the
// escape status of the associated Allocate node some of them
// may be eliminated.
storestore_worklist.append(n->as_MemBarStoreStore());
break;
case Op_MemBarRelease:
if (n->req() > MemBarNode::Precedent) {
record_for_optimizer(n);
}
break;
#ifdef ASSERT
case Op_AddP:
// Collect address nodes for graph verification.
addp_worklist.append(n);
break;
#endif
case Op_ArrayCopy:
// Keep a list of ArrayCopy nodes so if one of its input is non
// escaping, we can record a unique type
arraycopy_worklist.append(n->as_ArrayCopy());
break;
default:
// not interested now, ignore...
break;
}
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* m = n->fast_out(i); // Get user
ideal_nodes.push(m);
}
if (n->is_SafePoint()) {
sfn_worklist.append(n->as_SafePoint());
}
}
#ifndef PRODUCT
if (_compile->directive()->TraceEscapeAnalysisOption) {
tty->print("+++++ Initial worklist for ");
_compile->method()->print_name();
tty->print_cr(" (ea_inv=%d)", _invocation);
for (int i = 0; i < ptnodes_worklist.length(); i++) {
PointsToNode* ptn = ptnodes_worklist.at(i);
ptn->dump();
}
tty->print_cr("+++++ Calculating escape states and scalar replaceability");
}
#endif
if (non_escaped_allocs_worklist.length() == 0) {
_collecting = false;
NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
return false; // Nothing to do.
}
// Add final simple edges to graph.
while(delayed_worklist.size() > 0) {
Node* n = delayed_worklist.pop();
add_final_edges(n);
}
#ifdef ASSERT
if (VerifyConnectionGraph) {
// Verify that no new simple edges could be created and all
// local vars has edges.
_verify = true;
int ptnodes_length = ptnodes_worklist.length();
for (int next = 0; next < ptnodes_length; ++next) {
PointsToNode* ptn = ptnodes_worklist.at(next);
add_final_edges(ptn->ideal_node());
if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
ptn->dump();
assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
}
}
_verify = false;
}
#endif
// Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
// processing, calls to CI to resolve symbols (types, fields, methods)
// referenced in bytecode. During symbol resolution VM may throw
// an exception which CI cleans and converts to compilation failure.
if (C->failing()) {
NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
return false;
}
// 2. Finish Graph construction by propagating references to all
// java objects through graph.
if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
java_objects_worklist, oop_fields_worklist)) {
// All objects escaped or hit time or iterations limits.
_collecting = false;
NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
return false;
}
// 3. Adjust scalar_replaceable state of nonescaping objects and push
// scalar replaceable allocations on alloc_worklist for processing
// in split_unique_types().
GrowableArray<JavaObjectNode*> jobj_worklist;
int non_escaped_length = non_escaped_allocs_worklist.length();
bool found_nsr_alloc = false;
for (int next = 0; next < non_escaped_length; next++) {
JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
Node* n = ptn->ideal_node();
if (n->is_Allocate()) {
n->as_Allocate()->_is_non_escaping = noescape;
}
if (noescape && ptn->scalar_replaceable()) {
adjust_scalar_replaceable_state(ptn);
if (ptn->scalar_replaceable()) {
jobj_worklist.push(ptn);
} else {
found_nsr_alloc = true;
}
}
}
// Propagate NSR (Not Scalar Replaceable) state.
if (found_nsr_alloc) {
find_scalar_replaceable_allocs(jobj_worklist);
}
for (int next = 0; next < jobj_worklist.length(); ++next) {
JavaObjectNode* jobj = jobj_worklist.at(next);
if (jobj->scalar_replaceable()) {
alloc_worklist.append(jobj->ideal_node());
}
}
#ifdef ASSERT
if (VerifyConnectionGraph) {
// Verify that graph is complete - no new edges could be added or needed.
verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
java_objects_worklist, addp_worklist);
}
assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
assert(null_obj->escape_state() == PointsToNode::NoEscape &&
null_obj->edge_count() == 0 &&
!null_obj->arraycopy_src() &&
!null_obj->arraycopy_dst(), "sanity");
#endif
_collecting = false;
} // TracePhase t3("connectionGraph")
// 4. Optimize ideal graph based on EA information.
bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);
if (has_non_escaping_obj) {
optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
}
#ifndef PRODUCT
if (PrintEscapeAnalysis) {
dump(ptnodes_worklist); // Dump ConnectionGraph
}
#endif
#ifdef ASSERT
if (VerifyConnectionGraph) {
int alloc_length = alloc_worklist.length();
for (int next = 0; next < alloc_length; ++next) {
Node* n = alloc_worklist.at(next);
PointsToNode* ptn = ptnode_adr(n->_idx);
assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
}
}
#endif
// 5. Separate memory graph for scalar replaceable allcations.
bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
if (has_scalar_replaceable_candidates && EliminateAllocations) {
assert(C->do_aliasing(), "Aliasing should be enabled");
// Now use the escape information to create unique types for
// scalar replaceable objects.
split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist);
if (C->failing()) {
NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
return false;
}
C->print_method(PHASE_AFTER_EA, 2);
#ifdef ASSERT
} else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
tty->print("=== No allocations eliminated for ");
C->method()->print_short_name();
if (!EliminateAllocations) {
tty->print(" since EliminateAllocations is off ===");
} else if(!has_scalar_replaceable_candidates) {
tty->print(" since there are no scalar replaceable candidates ===");
}
tty->cr();
#endif
}
// Annotate at safepoints if they have <= ArgEscape objects in their scope and at
// java calls if they pass ArgEscape objects as parameters.
if (has_non_escaping_obj &&
(C->env()->should_retain_local_variables() ||
C->env()->jvmti_can_get_owned_monitor_info() ||
C->env()->jvmti_can_walk_any_space() ||
DeoptimizeObjectsALot)) {
int sfn_length = sfn_worklist.length();
for (int next = 0; next < sfn_length; next++) {
SafePointNode* sfn = sfn_worklist.at(next);
sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));
if (sfn->is_CallJava()) {
CallJavaNode* call = sfn->as_CallJava();
call->set_arg_escape(has_arg_escape(call));
}
}
}
NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
return has_non_escaping_obj;
}
// Returns true if there is an object in the scope of sfn that does not escape globally.
bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {
Compile* C = _compile;
for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) {
if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||
DeoptimizeObjectsALot) {
// Jvmti agents can access locals. Must provide info about local objects at runtime.
int num_locs = jvms->loc_size();
for (int idx = 0; idx < num_locs; idx++) {
Node* l = sfn->local(jvms, idx);
if (not_global_escape(l)) {
return true;
}
}
}
if (C->env()->jvmti_can_get_owned_monitor_info() ||
C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {
// Jvmti agents can read monitors. Must provide info about locked objects at runtime.
int num_mon = jvms->nof_monitors();
for (int idx = 0; idx < num_mon; idx++) {
Node* m = sfn->monitor_obj(jvms, idx);
if (m != NULL && not_global_escape(m)) {
return true;
}
}
}
}
return false;
}
// Returns true if at least one of the arguments to the call is an object
// that does not escape globally.
bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
if (call->method() != NULL) {
uint max_idx = TypeFunc::Parms + call->method()->arg_size();
for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
Node* p = call->in(idx);
if (not_global_escape(p)) {
return true;
}
}
} else {
const char* name = call->as_CallStaticJava()->_name;
assert(name != NULL, "no name");
// no arg escapes through uncommon traps
if (strcmp(name, "uncommon_trap") != 0) {
// process_call_arguments() assumes that all arguments escape globally
const TypeTuple* d = call->tf()->domain();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const Type* at = d->field_at(i);
if (at->isa_oopptr() != NULL) {
return true;
}
}
}
}
return false;
}
// Utility function for nodes that load an object
void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
const Type* t = _igvn->type(n);
if (t->make_ptr() != NULL) {
Node* adr = n->in(MemNode::Address);
#ifdef ASSERT
if (!adr->is_AddP()) {
assert(_igvn->type(adr)->isa_rawptr(), "sanity");
} else {
assert((ptnode_adr(adr->_idx) == NULL ||
ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
}
#endif
add_local_var_and_edge(n, PointsToNode::NoEscape,
adr, delayed_worklist);
}
}
// Populate Connection Graph with PointsTo nodes and create simple
// connection graph edges.
void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
assert(!_verify, "this method should not be called for verification");
PhaseGVN* igvn = _igvn;
uint n_idx = n->_idx;
PointsToNode* n_ptn = ptnode_adr(n_idx);
if (n_ptn != NULL) {
return; // No need to redefine PointsTo node during first iteration.
}
int opcode = n->Opcode();
bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
if (gc_handled) {
return; // Ignore node if already handled by GC.
}
if (n->is_Call()) {
// Arguments to allocation and locking don't escape.
if (n->is_AbstractLock()) {
// Put Lock and Unlock nodes on IGVN worklist to process them during
// first IGVN optimization when escape information is still available.
record_for_optimizer(n);
} else if (n->is_Allocate()) {
add_call_node(n->as_Call());
record_for_optimizer(n);
} else {
if (n->is_CallStaticJava()) {
const char* name = n->as_CallStaticJava()->_name;
if (name != NULL && strcmp(name, "uncommon_trap") == 0) {
return; // Skip uncommon traps
}
}
// Don't mark as processed since call's arguments have to be processed.
delayed_worklist->push(n);
// Check if a call returns an object.
if ((n->as_Call()->returns_pointer() &&
n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) ||
(n->is_CallStaticJava() &&
n->as_CallStaticJava()->is_boxing_method())) {
add_call_node(n->as_Call());
}
}
return;
}
// Put this check here to process call arguments since some call nodes
// point to phantom_obj.
if (n_ptn == phantom_obj || n_ptn == null_obj) {
return; // Skip predefined nodes.
}
switch (opcode) {
case Op_AddP: {
Node* base = get_addp_base(n);
PointsToNode* ptn_base = ptnode_adr(base->_idx);
// Field nodes are created for all field types. They are used in
// adjust_scalar_replaceable_state() and split_unique_types().
// Note, non-oop fields will have only base edges in Connection
// Graph because such fields are not used for oop loads and stores.
int offset = address_offset(n, igvn);
add_field(n, PointsToNode::NoEscape, offset);
if (ptn_base == NULL) {
delayed_worklist->push(n); // Process it later.
} else {
n_ptn = ptnode_adr(n_idx);
add_base(n_ptn->as_Field(), ptn_base);
}
break;
}
case Op_CastX2P: {
map_ideal_node(n, phantom_obj);
break;
}
case Op_CastPP:
case Op_CheckCastPP:
case Op_EncodeP:
case Op_DecodeN:
case Op_EncodePKlass:
case Op_DecodeNKlass: {
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
break;
}
case Op_CMoveP: {
add_local_var(n, PointsToNode::NoEscape);
// Do not add edges during first iteration because some could be
// not defined yet.
delayed_worklist->push(n);
break;
}
case Op_ConP:
case Op_ConN:
case Op_ConNKlass: {
// assume all oop constants globally escape except for null
PointsToNode::EscapeState es;
const Type* t = igvn->type(n);
if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
es = PointsToNode::NoEscape;
} else {
es = PointsToNode::GlobalEscape;
}
add_java_object(n, es);
break;
}
case Op_CreateEx: {
// assume that all exception objects globally escape
map_ideal_node(n, phantom_obj);
break;
}
case Op_LoadKlass:
case Op_LoadNKlass: {
// Unknown class is loaded
map_ideal_node(n, phantom_obj);
break;
}
case Op_LoadP:
case Op_LoadN: {
add_objload_to_connection_graph(n, delayed_worklist);
break;
}
case Op_Parm: {
map_ideal_node(n, phantom_obj);
break;
}
case Op_PartialSubtypeCheck: {
// Produces Null or notNull and is used in only in CmpP so
// phantom_obj could be used.
map_ideal_node(n, phantom_obj); // Result is unknown
break;
}
case Op_Phi: {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
const Type* t = n->as_Phi()->type();
if (t->make_ptr() != NULL) {
add_local_var(n, PointsToNode::NoEscape);
// Do not add edges during first iteration because some could be
// not defined yet.
delayed_worklist->push(n);
}
break;
}
case Op_Proj: {
// we are only interested in the oop result projection from a call
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
n->in(0)->as_Call()->returns_pointer()) {
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
}
break;
}
case Op_Rethrow: // Exception object escapes
case Op_Return: {
if (n->req() > TypeFunc::Parms &&
igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
// Treat Return value as LocalVar with GlobalEscape escape state.
add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
}
break;
}
case Op_CompareAndExchangeP:
case Op_CompareAndExchangeN:
case Op_GetAndSetP:
case Op_GetAndSetN: {
add_objload_to_connection_graph(n, delayed_worklist);
// fall-through
}
case Op_StoreP:
case Op_StoreN:
case Op_StoreNKlass:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN: {
add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
break;
}
case Op_AryEq:
case Op_CountPositives:
case Op_StrComp:
case Op_StrEquals:
case Op_StrIndexOf:
case Op_StrIndexOfChar:
case Op_StrInflatedCopy:
case Op_StrCompressedCopy:
case Op_EncodeISOArray: {
add_local_var(n, PointsToNode::ArgEscape);
delayed_worklist->push(n); // Process it later.
break;
}
case Op_ThreadLocal: {
add_java_object(n, PointsToNode::ArgEscape);
break;
}
case Op_Blackhole: {
// All blackhole pointer arguments are globally escaping.
// Only do this if there is at least one pointer argument.
// Do not add edges during first iteration because some could be
// not defined yet, defer to final step.
for (uint i = 0; i < n->req(); i++) {
Node* in = n->in(i);
if (in != nullptr) {
const Type* at = _igvn->type(in);
if (!at->isa_ptr()) continue;
add_local_var(n, PointsToNode::GlobalEscape);
delayed_worklist->push(n);
break;
}
}
break;
}
default:
; // Do nothing for nodes not related to EA.
}
return;
}
// Add final simple edges to graph.
void ConnectionGraph::add_final_edges(Node *n) {
PointsToNode* n_ptn = ptnode_adr(n->_idx);
#ifdef ASSERT
if (_verify && n_ptn->is_JavaObject())
return; // This method does not change graph for JavaObject.
#endif
if (n->is_Call()) {
process_call_arguments(n->as_Call());
return;
}
assert(n->is_Store() || n->is_LoadStore() ||
(n_ptn != NULL) && (n_ptn->ideal_node() != NULL),
"node should be registered already");
int opcode = n->Opcode();
bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
if (gc_handled) {
return; // Ignore node if already handled by GC.
}
switch (opcode) {
case Op_AddP: {
Node* base = get_addp_base(n);
PointsToNode* ptn_base = ptnode_adr(base->_idx);
assert(ptn_base != NULL, "field's base should be registered");
add_base(n_ptn->as_Field(), ptn_base);
break;
}
case Op_CastPP:
case Op_CheckCastPP:
case Op_EncodeP:
case Op_DecodeN:
case Op_EncodePKlass:
case Op_DecodeNKlass: {
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
break;
}
case Op_CMoveP: {
for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
Node* in = n->in(i);
if (in == NULL) {
continue; // ignore NULL
}
Node* uncast_in = in->uncast();
if (uncast_in->is_top() || uncast_in == n) {
continue; // ignore top or inputs which go back this node
}
PointsToNode* ptn = ptnode_adr(in->_idx);
assert(ptn != NULL, "node should be registered");
add_edge(n_ptn, ptn);
}
break;
}
case Op_LoadP:
case Op_LoadN: {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type");
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
break;
}
case Op_Phi: {
// Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
// ThreadLocal has RawPtr type.
assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type");
for (uint i = 1; i < n->req(); i++) {
Node* in = n->in(i);
if (in == NULL) {
continue; // ignore NULL
}
Node* uncast_in = in->uncast();
if (uncast_in->is_top() || uncast_in == n) {
continue; // ignore top or inputs which go back this node
}
PointsToNode* ptn = ptnode_adr(in->_idx);
assert(ptn != NULL, "node should be registered");
add_edge(n_ptn, ptn);
}
break;
}
case Op_Proj: {
// we are only interested in the oop result projection from a call
assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL);
break;
}
case Op_Rethrow: // Exception object escapes
case Op_Return: {
assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
"Unexpected node type");
// Treat Return value as LocalVar with GlobalEscape escape state.
add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL);
break;
}
case Op_CompareAndExchangeP:
case Op_CompareAndExchangeN:
case Op_GetAndSetP:
case Op_GetAndSetN:{
assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type");
add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
// fall-through
}
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_StoreP:
case Op_StoreN:
case Op_StoreNKlass:{
add_final_edges_unsafe_access(n, opcode);
break;
}
case Op_AryEq:
case Op_CountPositives:
case Op_StrComp:
case Op_StrEquals:
case Op_StrIndexOf:
case Op_StrIndexOfChar:
case Op_StrInflatedCopy:
case Op_StrCompressedCopy:
case Op_EncodeISOArray: {
// char[]/byte[] arrays passed to string intrinsic do not escape but
// they are not scalar replaceable. Adjust escape state for them.
// Start from in(2) edge since in(1) is memory edge.
for (uint i = 2; i < n->req(); i++) {
Node* adr = n->in(i);
const Type* at = _igvn->type(adr);
if (!adr->is_top() && at->isa_ptr()) {
assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
at->isa_ptr() != NULL, "expecting a pointer");
if (adr->is_AddP()) {
adr = get_addp_base(adr);
}
PointsToNode* ptn = ptnode_adr(adr->_idx);
assert(ptn != NULL, "node should be registered");
add_edge(n_ptn, ptn);
}
}
break;
}
case Op_Blackhole: {
// All blackhole pointer arguments are globally escaping.
for (uint i = 0; i < n->req(); i++) {
Node* in = n->in(i);
if (in != nullptr) {
const Type* at = _igvn->type(in);
if (!at->isa_ptr()) continue;
if (in->is_AddP()) {
in = get_addp_base(in);
}
PointsToNode* ptn = ptnode_adr(in->_idx);
assert(ptn != nullptr, "should be defined already");
set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole"));
add_edge(n_ptn, ptn);
}
}
break;
}
default: {
// This method should be called only for EA specific nodes which may
// miss some edges when they were created.
#ifdef ASSERT
n->dump(1);
#endif
guarantee(false, "unknown node");
}
}
return;
}
void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
Node* adr = n->in(MemNode::Address);
const Type* adr_type = _igvn->type(adr);
adr_type = adr_type->make_ptr();
if (adr_type == NULL) {
return; // skip dead nodes
}
if (adr_type->isa_oopptr()
|| ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
&& adr_type == TypeRawPtr::NOTNULL
&& is_captured_store_address(adr))) {
delayed_worklist->push(n); // Process it later.
#ifdef ASSERT
assert (adr->is_AddP(), "expecting an AddP");
if (adr_type == TypeRawPtr::NOTNULL) {
// Verify a raw address for a store captured by Initialize node.
int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
assert(offs != Type::OffsetBot, "offset must be a constant");
}
#endif
} else {
// Ignore copy the displaced header to the BoxNode (OSR compilation).
if (adr->is_BoxLock()) {
return;
}
// Stored value escapes in unsafe access.
if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
delayed_worklist->push(n); // Process unsafe access later.
return;
}
#ifdef ASSERT
n->dump(1);
assert(false, "not unsafe");
#endif
}
}
bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
Node* adr = n->in(MemNode::Address);
const Type *adr_type = _igvn->type(adr);
adr_type = adr_type->make_ptr();
#ifdef ASSERT
if (adr_type == NULL) {
n->dump(1);
assert(adr_type != NULL, "dead node should not be on list");
return true;
}
#endif
if (adr_type->isa_oopptr()
|| ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
&& adr_type == TypeRawPtr::NOTNULL
&& is_captured_store_address(adr))) {
// Point Address to Value
PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
assert(adr_ptn != NULL &&
adr_ptn->as_Field()->is_oop(), "node should be registered");
Node* val = n->in(MemNode::ValueIn);
PointsToNode* ptn = ptnode_adr(val->_idx);
assert(ptn != NULL, "node should be registered");
add_edge(adr_ptn, ptn);
return true;
} else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
// Stored value escapes in unsafe access.
Node* val = n->in(MemNode::ValueIn);
PointsToNode* ptn = ptnode_adr(val->_idx);
assert(ptn != NULL, "node should be registered");
set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
// Add edge to object for unsafe access with offset.
PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
assert(adr_ptn != NULL, "node should be registered");
if (adr_ptn->is_Field()) {
assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
add_edge(adr_ptn, ptn);
}
return true;
}
#ifdef ASSERT
n->dump(1);
assert(false, "not unsafe");
#endif
return false;
}
void ConnectionGraph::add_call_node(CallNode* call) {
assert(call->returns_pointer(), "only for call which returns pointer");
uint call_idx = call->_idx;
if (call->is_Allocate()) {
Node* k = call->in(AllocateNode::KlassNode);
const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
assert(kt != NULL, "TypeKlassPtr required.");
PointsToNode::EscapeState es = PointsToNode::NoEscape;
bool scalar_replaceable = true;
NOT_PRODUCT(const char* nsr_reason = "");
if (call->is_AllocateArray()) {
if (!kt->isa_aryklassptr()) { // StressReflectiveCode
es = PointsToNode::GlobalEscape;
} else {
int length = call->in(AllocateNode::ALength)->find_int_con(-1);
if (length < 0) {
// Not scalar replaceable if the length is not constant.
scalar_replaceable = false;
NOT_PRODUCT(nsr_reason = "has a non-constant length");
} else if (length > EliminateAllocationArraySizeLimit) {
// Not scalar replaceable if the length is too big.
scalar_replaceable = false;
NOT_PRODUCT(nsr_reason = "has a length that is too big");
}
}
} else { // Allocate instance
if (!kt->isa_instklassptr()) { // StressReflectiveCode
es = PointsToNode::GlobalEscape;
} else {
const TypeInstKlassPtr* ikt = kt->is_instklassptr();
ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass();
if (ik->is_subclass_of(_compile->env()->Thread_klass()) ||
ik->is_subclass_of(_compile->env()->Reference_klass()) ||
!ik->can_be_instantiated() ||
ik->has_finalizer()) {
es = PointsToNode::GlobalEscape;
} else {
int nfields = ik->as_instance_klass()->nof_nonstatic_fields();
if (nfields > EliminateAllocationFieldsLimit) {
// Not scalar replaceable if there are too many fields.
scalar_replaceable = false;
NOT_PRODUCT(nsr_reason = "has too many fields");
}
}
}
}
add_java_object(call, es);
PointsToNode* ptn = ptnode_adr(call_idx);
if (!scalar_replaceable && ptn->scalar_replaceable()) {
set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason));
}
} else if (call->is_CallStaticJava()) {
// Call nodes could be different types:
//
// 1. CallDynamicJavaNode (what happened during call is unknown):
//
// - mapped to GlobalEscape JavaObject node if oop is returned;
//
// - all oop arguments are escaping globally;
//
// 2. CallStaticJavaNode (execute bytecode analysis if possible):
//
// - the same as CallDynamicJavaNode if can't do bytecode analysis;
//
// - mapped to GlobalEscape JavaObject node if unknown oop is returned;
// - mapped to NoEscape JavaObject node if non-escaping object allocated
// during call is returned;
// - mapped to ArgEscape LocalVar node pointed to object arguments
// which are returned and does not escape during call;
//
// - oop arguments escaping status is defined by bytecode analysis;
//
// For a static call, we know exactly what method is being called.
// Use bytecode estimator to record whether the call's return value escapes.
ciMethod* meth = call->as_CallJava()->method();
if (meth == NULL) {
const char* name = call->as_CallStaticJava()->_name;
assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
// Returns a newly allocated non-escaped object.
add_java_object(call, PointsToNode::NoEscape);
set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
} else if (meth->is_boxing_method()) {
// Returns boxing object
PointsToNode::EscapeState es;
vmIntrinsics::ID intr = meth->intrinsic_id();
if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
// It does not escape if object is always allocated.
es = PointsToNode::NoEscape;
} else {
// It escapes globally if object could be loaded from cache.
es = PointsToNode::GlobalEscape;
}
add_java_object(call, es);
} else {
BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
call_analyzer->copy_dependencies(_compile->dependencies());
if (call_analyzer->is_return_allocated()) {
// Returns a newly allocated non-escaped object, simply
// update dependency information.
// Mark it as NoEscape so that objects referenced by
// it's fields will be marked as NoEscape at least.
add_java_object(call, PointsToNode::NoEscape);
set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
} else {
// Determine whether any arguments are returned.
const TypeTuple* d = call->tf()->domain();
bool ret_arg = false;
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
if (d->field_at(i)->isa_ptr() != NULL &&
call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
ret_arg = true;
break;
}
}
if (ret_arg) {
add_local_var(call, PointsToNode::ArgEscape);
} else {
// Returns unknown object.
map_ideal_node(call, phantom_obj);
}
}
}
} else {
// An other type of call, assume the worst case:
// returned value is unknown and globally escapes.
assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
map_ideal_node(call, phantom_obj);
}
}
void ConnectionGraph::process_call_arguments(CallNode *call) {
bool is_arraycopy = false;
switch (call->Opcode()) {
#ifdef ASSERT
case Op_Allocate:
case Op_AllocateArray:
case Op_Lock:
case Op_Unlock:
assert(false, "should be done already");
break;
#endif
case Op_ArrayCopy:
case Op_CallLeafNoFP:
// Most array copies are ArrayCopy nodes at this point but there
// are still a few direct calls to the copy subroutines (See
// PhaseStringOpts::copy_string())
is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
call->as_CallLeaf()->is_call_to_arraycopystub();
// fall through
case Op_CallLeafVector:
case Op_CallLeaf: {
// Stub calls, objects do not escape but they are not scale replaceable.
// Adjust escape state for outgoing arguments.
const TypeTuple * d = call->tf()->domain();
bool src_has_oops = false;
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const Type* at = d->field_at(i);
Node *arg = call->in(i);
if (arg == NULL) {
continue;
}
const Type *aat = _igvn->type(arg);
if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
continue;
}
if (arg->is_AddP()) {
//
// The inline_native_clone() case when the arraycopy stub is called
// after the allocation before Initialize and CheckCastPP nodes.
// Or normal arraycopy for object arrays case.
//
// Set AddP's base (Allocate) as not scalar replaceable since
// pointer to the base (with offset) is passed as argument.
//
arg = get_addp_base(arg);
}
PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
assert(arg_ptn != NULL, "should be registered");
PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
aat->isa_ptr() != NULL, "expecting an Ptr");
bool arg_has_oops = aat->isa_oopptr() &&
(aat->isa_instptr() ||
(aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != NULL)));
if (i == TypeFunc::Parms) {
src_has_oops = arg_has_oops;
}
//
// src or dst could be j.l.Object when other is basic type array:
//
// arraycopy(char[],0,Object*,0,size);
// arraycopy(Object*,0,char[],0,size);
//
// Don't add edges in such cases.
//
bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
arg_has_oops && (i > TypeFunc::Parms);
#ifdef ASSERT
if (!(is_arraycopy ||
BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
(call->as_CallLeaf()->_name != NULL &&
(strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0)
))) {
call->dump();
fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
}
#endif
// Always process arraycopy's destination object since
// we need to add all possible edges to references in
// source object.
if (arg_esc >= PointsToNode::ArgEscape &&
!arg_is_arraycopy_dest) {
continue;
}
PointsToNode::EscapeState es = PointsToNode::ArgEscape;
if (call->is_ArrayCopy()) {
ArrayCopyNode* ac = call->as_ArrayCopy();
if (ac->is_clonebasic() ||
ac->is_arraycopy_validated() ||
ac->is_copyof_validated() ||
ac->is_copyofrange_validated()) {
es = PointsToNode::NoEscape;
}
}
set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
if (arg_is_arraycopy_dest) {
Node* src = call->in(TypeFunc::Parms);
if (src->is_AddP()) {
src = get_addp_base(src);
}
PointsToNode* src_ptn = ptnode_adr(src->_idx);
assert(src_ptn != NULL, "should be registered");
if (arg_ptn != src_ptn) {
// Special arraycopy edge:
// A destination object's field can't have the source object
// as base since objects escape states are not related.
// Only escape state of destination object's fields affects
// escape state of fields in source object.
add_arraycopy(call, es, src_ptn, arg_ptn);
}
}
}
}
break;
}
case Op_CallStaticJava: {
// For a static call, we know exactly what method is being called.
// Use bytecode estimator to record the call's escape affects
#ifdef ASSERT
const char* name = call->as_CallStaticJava()->_name;
assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only");
#endif
ciMethod* meth = call->as_CallJava()->method();
if ((meth != NULL) && meth->is_boxing_method()) {
break; // Boxing methods do not modify any oops.
}
BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL;
// fall-through if not a Java method or no analyzer information
if (call_analyzer != NULL) {
PointsToNode* call_ptn = ptnode_adr(call->_idx);
const TypeTuple* d = call->tf()->domain();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const Type* at = d->field_at(i);
int k = i - TypeFunc::Parms;
Node* arg = call->in(i);
PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
if (at->isa_ptr() != NULL &&
call_analyzer->is_arg_returned(k)) {
// The call returns arguments.
if (call_ptn != NULL) { // Is call's result used?
assert(call_ptn->is_LocalVar(), "node should be registered");
assert(arg_ptn != NULL, "node should be registered");
add_edge(call_ptn, arg_ptn);
}
}
if (at->isa_oopptr() != NULL &&
arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
if (!call_analyzer->is_arg_stack(k)) {
// The argument global escapes
set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
} else {
set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
if (!call_analyzer->is_arg_local(k)) {
// The argument itself doesn't escape, but any fields might
set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
}
}
}
}
if (call_ptn != NULL && call_ptn->is_LocalVar()) {
// The call returns arguments.
assert(call_ptn->edge_count() > 0, "sanity");
if (!call_analyzer->is_return_local()) {
// Returns also unknown object.
add_edge(call_ptn, phantom_obj);
}
}
break;
}
}
default: {
// Fall-through here if not a Java method or no analyzer information
// or some other type of call, assume the worst case: all arguments
// globally escape.
const TypeTuple* d = call->tf()->domain();
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const Type* at = d->field_at(i);
if (at->isa_oopptr() != NULL) {
Node* arg = call->in(i);
if (arg->is_AddP()) {
arg = get_addp_base(arg);
}
assert(ptnode_adr(arg->_idx) != NULL, "should be defined already");
set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
}
}
}
}
}
// Finish Graph construction.
bool ConnectionGraph::complete_connection_graph(
GrowableArray<PointsToNode*>& ptnodes_worklist,
GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
GrowableArray<JavaObjectNode*>& java_objects_worklist,
GrowableArray<FieldNode*>& oop_fields_worklist) {
// Normally only 1-3 passes needed to build Connection Graph depending
// on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
// Set limit to 20 to catch situation when something did go wrong and
// bailout Escape Analysis.
// Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
#define GRAPH_BUILD_ITER_LIMIT 20
// Propagate GlobalEscape and ArgEscape escape states and check that
// we still have non-escaping objects. The method pushs on _worklist
// Field nodes which reference phantom_object.
if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
return false; // Nothing to do.
}
// Now propagate references to all JavaObject nodes.
int java_objects_length = java_objects_worklist.length();
elapsedTimer build_time;
build_time.start();
elapsedTimer time;
bool timeout = false;
int new_edges = 1;
int iterations = 0;
do {
while ((new_edges > 0) &&
(iterations++ < GRAPH_BUILD_ITER_LIMIT)) {
double start_time = time.seconds();
time.start();
new_edges = 0;
// Propagate references to phantom_object for nodes pushed on _worklist
// by find_non_escaped_objects() and find_field_value().
new_edges += add_java_object_edges(phantom_obj, false);
for (int next = 0; next < java_objects_length; ++next) {
JavaObjectNode* ptn = java_objects_worklist.at(next);
new_edges += add_java_object_edges(ptn, true);
#define SAMPLE_SIZE 4
if ((next % SAMPLE_SIZE) == 0) {
// Each 4 iterations calculate how much time it will take
// to complete graph construction.
time.stop();
// Poll for requests from shutdown mechanism to quiesce compiler
// because Connection graph construction may take long time.
CompileBroker::maybe_block();
double stop_time = time.seconds();
double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
double time_until_end = time_per_iter * (double)(java_objects_length - next);
if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
timeout = true;
break; // Timeout
}
start_time = stop_time;
time.start();
}
#undef SAMPLE_SIZE
}
if (timeout) break;
if (new_edges > 0) {
// Update escape states on each iteration if graph was updated.
if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
return false; // Nothing to do.
}
}
time.stop();
if (time.seconds() >= EscapeAnalysisTimeout) {
timeout = true;
break;
}
}
if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
time.start();
// Find fields which have unknown value.
int fields_length = oop_fields_worklist.length();
for (int next = 0; next < fields_length; next++) {
FieldNode* field = oop_fields_worklist.at(next);
if (field->edge_count() == 0) {
new_edges += find_field_value(field);
// This code may added new edges to phantom_object.
// Need an other cycle to propagate references to phantom_object.
}
}
time.stop();
if (time.seconds() >= EscapeAnalysisTimeout) {
timeout = true;
break;
}
} else {
new_edges = 0; // Bailout
}
} while (new_edges > 0);
build_time.stop();
_build_time = build_time.seconds();
_build_iterations = iterations;
// Bailout if passed limits.
if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {
Compile* C = _compile;
if (C->log() != NULL) {
C->log()->begin_elem("connectionGraph_bailout reason='reached ");
C->log()->text("%s", timeout ? "time" : "iterations");
C->log()->end_elem(" limit'");
}
assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d",
_invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());
// Possible infinite build_connection_graph loop,
// bailout (no changes to ideal graph were made).
return false;
}
#undef GRAPH_BUILD_ITER_LIMIT
// Find fields initialized by NULL for non-escaping Allocations.
int non_escaped_length = non_escaped_allocs_worklist.length();
for (int next = 0; next < non_escaped_length; next++) {
JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
PointsToNode::EscapeState es = ptn->escape_state();
assert(es <= PointsToNode::ArgEscape, "sanity");
if (es == PointsToNode::NoEscape) {
if (find_init_values_null(ptn, _igvn) > 0) {
// Adding references to NULL object does not change escape states
// since it does not escape. Also no fields are added to NULL object.
add_java_object_edges(null_obj, false);
}
}
Node* n = ptn->ideal_node();
if (n->is_Allocate()) {
// The object allocated by this Allocate node will never be
// seen by an other thread. Mark it so that when it is
// expanded no MemBarStoreStore is added.
InitializeNode* ini = n->as_Allocate()->initialization();
if (ini != NULL)
ini->set_does_not_escape();
}
}
return true; // Finished graph construction.
}
// Propagate GlobalEscape and ArgEscape escape states to all nodes
// and check that we still have non-escaping java objects.
bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) {
GrowableArray<PointsToNode*> escape_worklist;
// First, put all nodes with GlobalEscape and ArgEscape states on worklist.
int ptnodes_length = ptnodes_worklist.length();
for (int next = 0; next < ptnodes_length; ++next) {
PointsToNode* ptn = ptnodes_worklist.at(next);
if (ptn->escape_state() >= PointsToNode::ArgEscape ||
ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
escape_worklist.push(ptn);
}
}
// Set escape states to referenced nodes (edges list).
while (escape_worklist.length() > 0) {
PointsToNode* ptn = escape_worklist.pop();
PointsToNode::EscapeState es = ptn->escape_state();
PointsToNode::EscapeState field_es = ptn->fields_escape_state();
if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
es >= PointsToNode::ArgEscape) {
// GlobalEscape or ArgEscape state of field means it has unknown value.
if (add_edge(ptn, phantom_obj)) {
// New edge was added
add_field_uses_to_worklist(ptn->as_Field());
}
}
for (EdgeIterator i(ptn); i.has_next(); i.next()) {
PointsToNode* e = i.get();
if (e->is_Arraycopy()) {
assert(ptn->arraycopy_dst(), "sanity");
// Propagate only fields escape state through arraycopy edge.
if (e->fields_escape_state() < field_es) {
set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
escape_worklist.push(e);
}
} else if (es >= field_es) {
// fields_escape_state is also set to 'es' if it is less than 'es'.
if (e->escape_state() < es) {
set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
escape_worklist.push(e);
}
} else {
// Propagate field escape state.
bool es_changed = false;
if (e->fields_escape_state() < field_es) {
set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
es_changed = true;
}
if ((e->escape_state() < field_es) &&
e->is_Field() && ptn->is_JavaObject() &&
e->as_Field()->is_oop()) {
// Change escape state of referenced fields.
set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
es_changed = true;
} else if (e->escape_state() < es) {
set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
es_changed = true;
}
if (es_changed) {
escape_worklist.push(e);
}
}
}
}
// Remove escaped objects from non_escaped list.
for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {
JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
non_escaped_allocs_worklist.delete_at(next);
}
if (ptn->escape_state() == PointsToNode::NoEscape) {
// Find fields in non-escaped allocations which have unknown value.
find_init_values_phantom(ptn);
}
}
return (non_escaped_allocs_worklist.length() > 0);
}
// Add all references to JavaObject node by walking over all uses.
int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
int new_edges = 0;
if (populate_worklist) {
// Populate _worklist by uses of jobj's uses.
for (UseIterator i(jobj); i.has_next(); i.next()) {
PointsToNode* use = i.get();
if (use->is_Arraycopy()) {
continue;
}
add_uses_to_worklist(use);
if (use->is_Field() && use->as_Field()->is_oop()) {
// Put on worklist all field's uses (loads) and
// related field nodes (same base and offset).
add_field_uses_to_worklist(use->as_Field());
}
}
}
for (int l = 0; l < _worklist.length(); l++) {
PointsToNode* use = _worklist.at(l);
if (PointsToNode::is_base_use(use)) {
// Add reference from jobj to field and from field to jobj (field's base).
use = PointsToNode::get_use_node(use)->as_Field();
if (add_base(use->as_Field(), jobj)) {
new_edges++;
}
continue;
}
assert(!use->is_JavaObject(), "sanity");
if (use->is_Arraycopy()) {
if (jobj == null_obj) { // NULL object does not have field edges
continue;
}
// Added edge from Arraycopy node to arraycopy's source java object
if (add_edge(use, jobj)) {
jobj->set_arraycopy_src();
new_edges++;
}
// and stop here.
continue;
}
if (!add_edge(use, jobj)) {
continue; // No new edge added, there was such edge already.
}
new_edges++;
if (use->is_LocalVar()) {
add_uses_to_worklist(use);
if (use->arraycopy_dst()) {
for (EdgeIterator i(use); i.has_next(); i.next()) {
PointsToNode* e = i.get();
if (e->is_Arraycopy()) {
if (jobj == null_obj) { // NULL object does not have field edges
continue;
}
// Add edge from arraycopy's destination java object to Arraycopy node.
if (add_edge(jobj, e)) {
new_edges++;
jobj->set_arraycopy_dst();
}
}
}
}
} else {
// Added new edge to stored in field values.
// Put on worklist all field's uses (loads) and
// related field nodes (same base and offset).
add_field_uses_to_worklist(use->as_Field());
}
}
_worklist.clear();
_in_worklist.reset();
return new_edges;
}
// Put on worklist all related field nodes.
void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
assert(field->is_oop(), "sanity");
int offset = field->offset();
add_uses_to_worklist(field);
// Loop over all bases of this field and push on worklist Field nodes
// with the same offset and base (since they may reference the same field).
for (BaseIterator i(field); i.has_next(); i.next()) {
PointsToNode* base = i.get();
add_fields_to_worklist(field, base);
// Check if the base was source object of arraycopy and go over arraycopy's
// destination objects since values stored to a field of source object are
// accessible by uses (loads) of fields of destination objects.
if (base->arraycopy_src()) {
for (UseIterator j(base); j.has_next(); j.next()) {
PointsToNode* arycp = j.get();
if (arycp->is_Arraycopy()) {
for (UseIterator k(arycp); k.has_next(); k.next()) {
PointsToNode* abase = k.get();
if (abase->arraycopy_dst() && abase != base) {
// Look for the same arraycopy reference.
add_fields_to_worklist(field, abase);
}
}
}
}
}
}
}
// Put on worklist all related field nodes.
void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
int offset = field->offset();
if (base->is_LocalVar()) {
for (UseIterator j(base); j.has_next(); j.next()) {
PointsToNode* f = j.get();
if (PointsToNode::is_base_use(f)) { // Field
f = PointsToNode::get_use_node(f);
if (f == field || !f->as_Field()->is_oop()) {
continue;
}
int offs = f->as_Field()->offset();
if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
add_to_worklist(f);
}
}
}
} else {
assert(base->is_JavaObject(), "sanity");
if (// Skip phantom_object since it is only used to indicate that
// this field's content globally escapes.
(base != phantom_obj) &&
// NULL object node does not have fields.
(base != null_obj)) {
for (EdgeIterator i(base); i.has_next(); i.next()) {
PointsToNode* f = i.get();
// Skip arraycopy edge since store to destination object field
// does not update value in source object field.
if (f->is_Arraycopy()) {
assert(base->arraycopy_dst(), "sanity");
continue;
}
if (f == field || !f->as_Field()->is_oop()) {
continue;
}
int offs = f->as_Field()->offset();
if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
add_to_worklist(f);
}
}
}
}
}
// Find fields which have unknown value.
int ConnectionGraph::find_field_value(FieldNode* field) {
// Escaped fields should have init value already.
assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
int new_edges = 0;
for (BaseIterator i(field); i.has_next(); i.next()) {
PointsToNode* base = i.get();
if (base->is_JavaObject()) {
// Skip Allocate's fields which will be processed later.
if (base->ideal_node()->is_Allocate()) {
return 0;
}
assert(base == null_obj, "only NULL ptr base expected here");
}
}
if (add_edge(field, phantom_obj)) {
// New edge was added
new_edges++;
add_field_uses_to_worklist(field);
}
return new_edges;
}
// Find fields initializing values for allocations.
int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
Node* alloc = pta->ideal_node();
// Do nothing for Allocate nodes since its fields values are
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.61 Sekunden
(vorverarbeitet)
¤
|
Haftungshinweis
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.
|