/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// -------------------- Compile::mach_constant_base_node ----------------------- // Constant table base node singleton.
MachConstantBaseNode* Compile::mach_constant_base_node() { if (_mach_constant_base_node == NULL) {
_mach_constant_base_node = new MachConstantBaseNode();
_mach_constant_base_node->add_req(C->root());
} return _mach_constant_base_node;
}
/// Support for intrinsics.
// Return the index at which m must be inserted (or already exists). // The sort order is by the address of the ciMethod, with is_virtual as minor key. class IntrinsicDescPair { private:
ciMethod* _m; bool _is_virtual; public:
IntrinsicDescPair(ciMethod* m, bool is_virtual) : _m(m), _is_virtual(is_virtual) {} staticint compare(IntrinsicDescPair* const& key, CallGenerator* const& elt) {
ciMethod* m= elt->method();
ciMethod* key_m = key->_m; if (key_m < m) return -1; elseif (key_m > m) return 1; else { bool is_virtual = elt->is_virtual(); bool key_virtual = key->_is_virtual; if (key_virtual < is_virtual) return -1; elseif (key_virtual > is_virtual) return 1; elsereturn 0;
}
}
}; int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual, bool& found) { #ifdef ASSERT for (int i = 1; i < _intrinsics.length(); i++) {
CallGenerator* cg1 = _intrinsics.at(i-1);
CallGenerator* cg2 = _intrinsics.at(i);
assert(cg1->method() != cg2->method()
? cg1->method() < cg2->method()
: cg1->is_virtual() < cg2->is_virtual(), "compiler intrinsics list must stay sorted");
} #endif
IntrinsicDescPair pair(m, is_virtual); return _intrinsics.find_sorted<IntrinsicDescPair*, IntrinsicDescPair::compare>(&pair, found);
}
void Compile::register_intrinsic(CallGenerator* cg) { bool found = false; int index = intrinsic_insertion_index(cg->method(), cg->is_virtual(), found);
assert(!found, "registering twice");
_intrinsics.insert_before(index, cg);
assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
}
CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
assert(m->is_loaded(), "don't try this on unloaded methods"); if (_intrinsics.length() > 0) { bool found = false; int index = intrinsic_insertion_index(m, is_virtual, found); if (found) { return _intrinsics.at(index);
}
} // Lazily create intrinsics for intrinsic IDs well-known in the runtime. if (m->intrinsic_id() != vmIntrinsics::_none &&
m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
CallGenerator* cg = make_vm_intrinsic(m, is_virtual); if (cg != NULL) { // Save it for next time:
register_intrinsic(cg); return cg;
} else {
gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
}
} return NULL;
}
// Compile::make_vm_intrinsic is defined in library_call.cpp.
bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob"); int oflags = _intrinsic_hist_flags[as_int(id)];
assert(flags != 0, "what happened?"); if (is_virtual) {
flags |= _intrinsic_virtual;
} bool changed = (flags != oflags); if ((flags & _intrinsic_worked) != 0) {
juint count = (_intrinsic_hist_count[as_int(id)] += 1); if (count == 1) {
changed = true; // first time
} // increment the overall count also:
_intrinsic_hist_count[as_int(vmIntrinsics::_none)] += 1;
} if (changed) { if (((oflags ^ flags) & _intrinsic_virtual) != 0) { // Something changed about the intrinsic's virtuality. if ((flags & _intrinsic_virtual) != 0) { // This is the first use of this intrinsic as a virtual call. if (oflags != 0) { // We already saw it as a non-virtual, so note both cases.
flags |= _intrinsic_both;
}
} elseif ((oflags & _intrinsic_both) == 0) { // This is the first use of this intrinsic as a non-virtual
flags |= _intrinsic_both;
}
}
_intrinsic_hist_flags[as_int(id)] = (jubyte) (oflags | flags);
} // update the overall flags also:
_intrinsic_hist_flags[as_int(vmIntrinsics::_none)] |= (jubyte) flags; return changed;
}
void Compile::print_intrinsic_statistics() { char flagsbuf[100];
ttyLocker ttyl; if (xtty != NULL) xtty->head("statistics type='intrinsic'");
tty->print_cr("Compiler intrinsic usage:");
juint total = _intrinsic_hist_count[as_int(vmIntrinsics::_none)]; if (total == 0) total = 1; // avoid div0 in case of no successes #define PRINT_STAT_LINE(name, c, f) \
tty->print_cr(" %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f); for (auto id : EnumRange<vmIntrinsicID>{}) { int flags = _intrinsic_hist_flags[as_int(id)];
juint count = _intrinsic_hist_count[as_int(id)]; if ((flags | count) != 0) {
PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
}
}
PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[as_int(vmIntrinsics::_none)], flagsbuf)); if (xtty != NULL) xtty->tail("statistics");
}
void Compile::print_statistics() {
{ ttyLocker ttyl; if (xtty != NULL) xtty->head("statistics type='opto'");
Parse::print_statistics();
PhaseStringOpts::print_statistics();
PhaseCCP::print_statistics();
PhaseRegAlloc::print_statistics();
PhaseOutput::print_statistics();
PhasePeephole::print_statistics();
PhaseIdealLoop::print_statistics();
ConnectionGraph::print_statistics();
PhaseMacroExpand::print_statistics(); if (xtty != NULL) xtty->tail("statistics");
} if (_intrinsic_hist_flags[as_int(vmIntrinsics::_none)] != 0) { // put this under its own <statistics> element.
print_intrinsic_statistics();
}
} #endif//PRODUCT
void Compile::gvn_replace_by(Node* n, Node* nn) { for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
Node* use = n->last_out(i); bool is_in_table = initial_gvn()->hash_delete(use);
uint uses_found = 0; for (uint j = 0; j < use->len(); j++) { if (use->in(j) == n) { if (j < use->req())
use->set_req(j, nn); else
use->set_prec(j, nn);
uses_found++;
}
} if (is_in_table) { // reinsert into table
initial_gvn()->hash_find_insert(use);
}
record_for_igvn(use);
i -= uses_found; // we deleted 1 or more copies of this edge
}
}
// Identify all nodes that are reachable from below, useful. // Use breadth-first pass that records state in a Unique_Node_List, // recursive traversal is slower. void Compile::identify_useful_nodes(Unique_Node_List &useful) { int estimated_worklist_size = live_nodes();
useful.map( estimated_worklist_size, NULL ); // preallocate space
// Initialize worklist if (root() != NULL) { useful.push(root()); } // If 'top' is cached, declare it useful to preserve cached node if( cached_top_node() ) { useful.push(cached_top_node()); }
// Push all useful nodes onto the list, breadthfirst for( uint next = 0; next < useful.size(); ++next ) {
assert( next < unique(), "Unique useful nodes < total nodes");
Node *n = useful.at(next);
uint max = n->len(); for( uint i = 0; i < max; ++i ) {
Node *m = n->in(i); if (not_a_node(m)) continue;
useful.push(m);
}
}
}
// Update dead_node_list with any missing dead nodes using useful // list. Consider all non-useful nodes to be useless i.e., dead nodes. void Compile::update_dead_node_list(Unique_Node_List &useful) {
uint max_idx = unique();
VectorSet& useful_node_set = useful.member_set();
for (uint node_idx = 0; node_idx < max_idx; node_idx++) { // If node with index node_idx is not in useful set, // mark it as dead in dead node list. if (!useful_node_set.test(node_idx)) {
record_dead_node(node_idx);
}
}
}
void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) { int shift = 0; for (int i = 0; i < inlines->length(); i++) {
CallGenerator* cg = inlines->at(i); if (useful.member(cg->call_node())) { if (shift > 0) {
inlines->at_put(i - shift, cg);
}
} else {
shift++; // skip over the dead element
}
} if (shift > 0) {
inlines->trunc_to(inlines->length() - shift); // remove last elements from compacted array
}
}
void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Node* dead) {
assert(dead != NULL && dead->is_Call(), "sanity"); int found = 0; for (int i = 0; i < inlines->length(); i++) { if (inlines->at(i)->call_node() == dead) {
inlines->remove_at(i);
found++;
NOT_DEBUG( break; ) // elements are unique, so exit early
}
}
assert(found <= 1, "not unique");
}
void Compile::remove_useless_nodes(GrowableArray<Node*>& node_list, Unique_Node_List& useful) { for (int i = node_list.length() - 1; i >= 0; i--) {
Node* n = node_list.at(i); if (!useful.member(n)) {
node_list.delete_at(i); // replaces i-th with last element which is known to be useful (already processed)
}
}
}
// Constant node that has no out-edges and has only one in-edge from // root is usually dead. However, sometimes reshaping walk makes // it reachable by adding use edges. So, we will NOT count Con nodes // as dead to be conservative about the dead node count at any // given time. if (!dead->is_Con()) {
record_dead_node(dead->_idx);
} if (dead->is_macro()) {
remove_macro_node(dead);
} if (dead->is_expensive()) {
remove_expensive_node(dead);
} if (dead->Opcode() == Op_Opaque4) {
remove_skeleton_predicate_opaq(dead);
} if (dead->for_post_loop_opts_igvn()) {
remove_from_post_loop_opts_igvn(dead);
} if (dead->is_Call()) {
remove_useless_late_inlines( &_late_inlines, dead);
remove_useless_late_inlines( &_string_late_inlines, dead);
remove_useless_late_inlines( &_boxing_late_inlines, dead);
remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
// Disconnect all useless nodes by disconnecting those at the boundary. void Compile::disconnect_useless_nodes(Unique_Node_List &useful, Unique_Node_List* worklist) {
uint next = 0; while (next < useful.size()) {
Node *n = useful.at(next++); if (n->is_SafePoint()) { // We're done with a parsing phase. Replaced nodes are not valid // beyond that point.
n->as_SafePoint()->delete_replaced_nodes();
} // Use raw traversal of out edges since this code removes out edges int max = n->outcnt(); for (int j = 0; j < max; ++j) {
Node* child = n->raw_out(j); if (!useful.member(child)) {
assert(!child->is_top() || child != top(), "If top is cached in Compile object it is in useful list"); // Only need to remove this out-edge to the useless node
n->raw_del_out(j);
--j;
--max;
}
} if (n->outcnt() == 1 && n->has_special_unique_user()) {
worklist->push(n->unique_out());
}
}
#ifndef PRODUCT void Compile::print_ideal_ir(constchar* phase_name) {
ttyLocker ttyl; // keep the following output all in one block // This output goes directly to the tty, not the compiler log. // To enable tools to match it up with the compilation activity, // be sure to tag this tty output with the compile ID. if (xtty != NULL) {
xtty->head("ideal compile_id='%d'%s compile_phase='%s'",
compile_id(),
is_osr_compilation() ? " compile_kind='osr'" : "",
phase_name);
} if (_output == nullptr) {
tty->print_cr("AFTER: %s", phase_name); // Print out all nodes in ascending order of index.
root()->dump_bfs(MaxNodeLimit, nullptr, "+S$");
} else { // Dump the node blockwise if we have a scheduling
_output->print_scheduling();
}
if (xtty != NULL) {
xtty->tail("ideal");
}
} #endif
// ============================================================================ //------------------------------Compile standard-------------------------------
debug_only( int Compile::_debug_idx = 100000; )
// Compile a method. entry_bci is -1 for normal compilations and indicates // the continuation bci for on stack replacement.
#ifdefined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) bool print_opto_assembly = directive->PrintOptoAssemblyOption; // We can always print a disassembly, either abstract (hex dump) or // with the help of a suitable hsdis library. Thus, we should not // couple print_assembly and print_opto_assembly controls. // But: always print opto and regular assembly on compile command 'print'. bool print_assembly = directive->PrintAssemblyOption;
set_print_assembly(print_opto_assembly || print_assembly); #else
set_print_assembly(false); // must initialize. #endif
if (directive->ReplayInlineOption) {
_replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
} #endif
set_print_inlining(directive->PrintInliningOption || PrintOptoInlining);
set_print_intrinsics(directive->PrintIntrinsicsOption);
set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) { // Make sure the method being compiled gets its own MDO, // so we can at least track the decompile_count(). // Need MDO to record RTM code generation state.
method()->ensure_method_data();
}
Init(/*do_aliasing=*/ true);
print_compile_messages();
_ilt = InlineTree::build_inline_tree_root();
// Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
assert(num_alias_types() >= AliasIdxRaw, "");
#define MINIMUM_NODE_HASH 1023 // Node list that Iterative GVN will start with
Unique_Node_List for_igvn(comp_arena());
set_for_igvn(&for_igvn);
// GVN that will be run immediately on new nodes
uint estimated_size = method()->code_size()*4+64;
estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn);
print_inlining_init();
{ // Scope for timing the parser
TracePhase tp("parse", &timers[_t_parser]);
// Put top into the hash table ASAP.
initial_gvn()->transform_no_reclaim(top());
// Set up tf(), start(), and find a CallGenerator.
CallGenerator* cg = NULL; if (is_osr_compilation()) { const TypeTuple *domain = StartOSRNode::osr_domain(); const TypeTuple *range = TypeTuple::make_range(method()->signature());
init_tf(TypeFunc::make(domain, range));
StartNode* s = new StartOSRNode(root(), domain);
initial_gvn()->set_type_bottom(s);
init_start(s);
cg = CallGenerator::for_osr(method(), entry_bci());
} else { // Normal case.
init_tf(TypeFunc::make(method()));
StartNode* s = new StartNode(root(), tf()->domain());
initial_gvn()->set_type_bottom(s);
init_start(s); if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) { // With java.lang.ref.reference.get() we must go through the // intrinsic - even when get() is the root // method of the compile - so that, if necessary, the value in // the referent field of the reference object gets recorded by // the pre-barrier code.
cg = find_intrinsic(method(), false);
} if (cg == NULL) { float past_uses = method()->interpreter_invocation_count(); float expected_uses = past_uses;
cg = CallGenerator::for_inline(method(), expected_uses);
}
} if (failing()) return; if (cg == NULL) {
record_method_not_compilable("cannot parse method"); return;
}
JVMState* jvms = build_start_state(start(), tf()); if ((jvms = cg->generate(jvms)) == NULL) { if (!failure_reason_is(C2Compiler::retry_class_loading_during_parsing())) {
record_method_not_compilable("method parse failed");
} return;
}
GraphKit kit(jvms);
if (!kit.stopped()) { // Accept return values, and transfer control we know not where. // This is done by a special, unique ReturnNode bound to root.
return_values(kit.jvms());
}
if (kit.has_exceptions()) { // Any exceptions that escape from this call must be rethrown // to whatever caller is dynamically above us on the stack. // This is done by a special, unique RethrowNode bound to root.
rethrow_exceptions(kit.transfer_exceptions_into_jvms());
}
// Remove clutter produced by parsing. if (!failing()) {
ResourceMark rm;
PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
}
}
// Note: Large methods are capped off in do_one_bytecode(). if (failing()) return;
// After parsing, node notes are no longer automagic. // They must be propagated by register_new_node_with_optimizer(), // clone(), or the like.
set_default_node_notes(NULL);
#ifndef PRODUCT if (should_print_igv(1)) {
_igv_printer->print_inlining();
} #endif
if (failing()) return;
NOT_PRODUCT( verify_graph_edges(); )
// If any phase is randomized for stress testing, seed random number // generation and log the seed for repeatability. if (StressLCM || StressGCM || StressIGVN || StressCCP) { if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && RepeatCompilation)) {
_stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
FLAG_SET_ERGO(StressSeed, _stress_seed);
} else {
_stress_seed = StressSeed;
} if (_log != NULL) {
_log->elem("stress_test seed='%u'", _stress_seed);
}
}
// Now optimize
Optimize(); if (failing()) return;
NOT_PRODUCT( verify_graph_edges(); )
#ifndef PRODUCT if (should_print_ideal()) {
print_ideal_ir("print_ideal");
} #endif
// Dump compilation data to replay it. if (directive->DumpReplayOption) {
env()->dump_replay_data(_compile_id);
} if (directive->DumpInlineOption && (ilt() != NULL)) {
env()->dump_inline_data(_compile_id);
}
// Now that we know the size of all the monitors we can add a fixed slot // for the original deopt pc. int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
set_fixed_slots(next_slot);
// Compute when to use implicit null checks. Used by matching trap based // nodes and NullCheck optimization.
set_allowed_deopt_reasons();
{ // The following is a dummy for the sake of GraphKit::gen_stub
Unique_Node_List for_igvn(comp_arena());
set_for_igvn(&for_igvn); // not used, but some GraphKit guys push on this
PhaseGVN gvn(Thread::current()->resource_area(),255);
set_initial_gvn(&gvn); // not significant, but GraphKit guys use it pervasively
gvn.transform_no_reclaim(top());
_node_note_array = NULL;
_default_node_notes = NULL;
DEBUG_ONLY( _modified_nodes = NULL; ) // Used in Optimize()
_immutable_memory = NULL; // filled in at first inquiry
// Globally visible Nodes // First set TOP to NULL to give safe behavior during creation of RootNode
set_cached_top_node(NULL);
set_root(new RootNode()); // Now that you have a Root to point to, create the real TOP
set_cached_top_node( new ConNode(Type::TOP) );
set_recent_alloc(NULL, NULL);
// Create Debug Information Recorder to record scopes, oopmaps, etc.
env()->set_oop_recorder(new OopRecorder(env()->arena()));
env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
env()->set_dependencies(new Dependencies(env()));
_fixed_slots = 0;
set_has_split_ifs(false);
set_has_loops(false); // first approximation
set_has_stringbuilder(false);
set_has_boxed_value(false);
_trap_can_recompile = false; // no traps emitted yet
_major_progress = true; // start out assuming good things will happen
set_has_unsafe_access(false);
set_max_vector_size(0);
set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
set_decompile_count(0);
if (AllowVectorizeOnDemand) { if (has_method() && (_directive->VectorizeOption || _directive->VectorizeDebugOption)) {
set_do_vector_loop(true);
NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
} elseif (has_method() && method()->name() != 0 &&
method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
set_do_vector_loop(true);
}
}
set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
set_rtm_state(NoRTM); // No RTM lock eliding by default
_max_node_limit = _directive->MaxNodeLimitOption;
#if INCLUDE_RTM_OPT if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) { int rtm_state = method()->method_data()->rtm_state(); if (method_has_option(CompileCommand::NoRTMLockEliding) || ((rtm_state & NoRTM) != 0)) { // Don't generate RTM lock eliding code.
set_rtm_state(NoRTM);
} elseif (method_has_option(CompileCommand::UseRTMLockEliding) || ((rtm_state & UseRTM) != 0) || !UseRTMDeopt) { // Generate RTM lock eliding code without abort ratio calculation code.
set_rtm_state(UseRTM);
} elseif (UseRTMDeopt) { // Generate RTM lock eliding code and include abort ratio calculation // code if UseRTMDeopt is on.
set_rtm_state(ProfileRTM);
}
} #endif if (VM_Version::supports_fast_class_init_checks() && has_method() && !is_osr_compilation() && method()->needs_clinit_barrier()) {
set_clinit_barrier_on_entry(true);
} if (debug_info()->recording_non_safepoints()) {
set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
(comp_arena(), 8, 0, NULL));
set_default_node_notes(Node_Notes::make(this));
}
constint grow_ats = 16;
_max_alias_types = grow_ats;
_alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats);
Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
{ for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i];
} // Initialize the first few types.
_alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
_alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
_alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
_num_alias_types = AliasIdxRaw+1; // Zero out the alias type cache.
Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache)); // A NULL adr_type hits in the cache right away. Preload the right answer.
probe_alias_cache(NULL)->_index = AliasIdxTop;
//---------------------------init_start---------------------------------------- // Install the StartNode on this compile object. void Compile::init_start(StartNode* s) { if (failing()) return; // already failing
assert(s == start(), "");
}
/** * Return the 'StartNode'. We must not have a pending failure, since the ideal graph * can be in an inconsistent state, i.e., we can get segmentation faults when traversing * the ideal graph.
*/
StartNode* Compile::start() const {
assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason()); for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
Node* start = root()->fast_out(i); if (start->is_Start()) { return start->as_Start();
}
}
fatal("Did not find Start node!"); return NULL;
}
//-------------------------------immutable_memory------------------------------------- // Access immutable memory
Node* Compile::immutable_memory() { if (_immutable_memory != NULL) { return _immutable_memory;
}
StartNode* s = start(); for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
Node *p = s->fast_out(i); if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
_immutable_memory = p; return _immutable_memory;
}
}
ShouldNotReachHere(); return NULL;
}
//----------------------set_cached_top_node------------------------------------ // Install the cached top node, and make sure Node::is_top works correctly. void Compile::set_cached_top_node(Node* tn) { if (tn != NULL) verify_top(tn);
Node* old_top = _top;
_top = tn; // Calling Node::setup_is_top allows the nodes the chance to adjust // their _out arrays. if (_top != NULL) _top->setup_is_top(); if (old_top != NULL) old_top->setup_is_top();
assert(_top == NULL || top()->is_top(), "");
}
#ifdef ASSERT
uint Compile::count_live_nodes_by_graph_walk() {
Unique_Node_List useful(comp_arena()); // Get useful node list by walking the graph.
identify_useful_nodes(useful); return useful.size();
}
void Compile::print_missing_nodes() {
// Return if CompileLog is NULL and PrintIdealNodeCount is false. if ((_log == NULL) && (! PrintIdealNodeCount)) { return;
}
// This is an expensive function. It is executed only when the user // specifies VerifyIdealNodeCount option or otherwise knows the // additional work that needs to be done to identify reachable nodes // by walking the flow graph and find the missing ones using // _dead_node_list.
Unique_Node_List useful(comp_arena()); // Get useful node list by walking the graph.
identify_useful_nodes(useful);
#ifndef PRODUCT void Compile::verify_top(Node* tn) const { if (tn != NULL) {
assert(tn->is_Con(), "top node must be a constant");
assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
assert(tn->in(0) != NULL, "must have live top node");
}
} #endif
bool Compile::copy_node_notes_to(Node* dest, Node* source) { if (source == NULL || dest == NULL) returnfalse;
if (dest->is_Con()) returnfalse; // Do not push debug info onto constants.
#ifdef ASSERT // Leave a bread crumb trail pointing to the original node: if (dest != NULL && dest != source && dest->debug_orig() == NULL) {
dest->set_debug_orig(source);
} #endif
if (node_note_array() == NULL) returnfalse; // Not collecting any notes now.
// This is a copy onto a pre-existing node, which may already have notes. // If both nodes have notes, do not overwrite any pre-existing notes.
Node_Notes* source_notes = node_notes_at(source->_idx); if (source_notes == NULL || source_notes->is_clear()) returnfalse;
Node_Notes* dest_notes = node_notes_at(dest->_idx); if (dest_notes == NULL || dest_notes->is_clear()) { return set_node_notes_at(dest->_idx, source_notes);
}
Node_Notes merged_notes = (*source_notes); // The order of operations here ensures that dest notes will win...
merged_notes.update_from(dest_notes); return set_node_notes_at(dest->_idx, &merged_notes);
}
//--------------------------allow_range_check_smearing------------------------- // Gating condition for coalescing similar range checks. // Sometimes we try 'speculatively' replacing a series of a range checks by a // single covering check that is at least as strong as any of them. // If the optimization succeeds, the simplified (strengthened) range check // will always succeed. If it fails, we will deopt, and then give up // on the optimization. bool Compile::allow_range_check_smearing() const { // If this method has already thrown a range-check, // assume it was because we already tried range smearing // and it failed.
uint already_trapped = trap_count(Deoptimization::Reason_range_check); return !already_trapped;
}
//------------------------------flatten_alias_type----------------------------- const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
assert(do_aliasing(), "Aliasing should be enabled"); int offset = tj->offset();
TypePtr::PTR ptr = tj->ptr();
// Known instance (scalarizable allocation) alias only with itself. bool is_known_inst = tj->isa_oopptr() != NULL &&
tj->is_oopptr()->is_known_instance();
// Process weird unsafe references. if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
assert(InlineUnsafeOps || StressReflectiveCode, "indeterminate pointers come only from unsafe ops");
assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
tj = TypeOopPtr::BOTTOM;
ptr = tj->ptr();
offset = tj->offset();
}
// Array pointers need some flattening const TypeAryPtr* ta = tj->isa_aryptr(); if (ta && ta->is_stable()) { // Erase stability property for alias analysis.
tj = ta = ta->cast_to_stable(false);
} if( ta && is_known_inst ) { if ( offset != Type::OffsetBot &&
offset > arrayOopDesc::length_offset_in_bytes() ) {
offset = Type::OffsetBot; // Flatten constant access into array body only
tj = ta = ta->
remove_speculative()->
cast_to_ptr_type(ptr)->
with_offset(offset);
}
} elseif (ta) { // For arrays indexed by constant indices, we flatten the alias // space to include all of the array body. Only the header, klass // and array length can be accessed un-aliased. if( offset != Type::OffsetBot ) { if( ta->const_oop() ) { // MethodData* or Method*
offset = Type::OffsetBot; // Flatten constant access into array body
tj = ta = ta->
remove_speculative()->
cast_to_ptr_type(ptr)->
cast_to_exactness(false)->
with_offset(offset);
} elseif( offset == arrayOopDesc::length_offset_in_bytes() ) { // range is OK as-is.
tj = ta = TypeAryPtr::RANGE;
} elseif( offset == oopDesc::klass_offset_in_bytes() ) {
tj = TypeInstPtr::KLASS; // all klass loads look alike
ta = TypeAryPtr::RANGE; // generic ignored junk
ptr = TypePtr::BotPTR;
} elseif( offset == oopDesc::mark_offset_in_bytes() ) {
tj = TypeInstPtr::MARK;
ta = TypeAryPtr::RANGE; // generic ignored junk
ptr = TypePtr::BotPTR;
} else { // Random constant offset into array body
offset = Type::OffsetBot; // Flatten constant access into array body
tj = ta = ta->
remove_speculative()->
cast_to_ptr_type(ptr)->
cast_to_exactness(false)->
with_offset(offset);
}
} // Arrays of fixed size alias with arrays of unknown size. if (ta->size() != TypeInt::POS) { const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
tj = ta = ta->
remove_speculative()->
cast_to_ptr_type(ptr)->
with_ary(tary)->
cast_to_exactness(false);
} // Arrays of known objects become arrays of unknown objects. if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
} if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
} // Arrays of bytes and of booleans both use 'bastore' and 'baload' so // cannot be distinguished by bytecode alone. if (ta->elem() == TypeInt::BOOL) { const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
} // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
tj = ta = ta->
remove_speculative()->
cast_to_ptr_type(TypePtr::BotPTR)->
cast_to_exactness(false)->
with_offset(offset);
}
}
// Oop pointers need some flattening const TypeInstPtr *to = tj->isa_instptr(); if (to && to != TypeOopPtr::BOTTOM) {
ciInstanceKlass* ik = to->instance_klass(); if( ptr == TypePtr::Constant ) { if (ik != ciEnv::current()->Class_klass() ||
offset < ik->layout_helper_size_in_bytes()) { // No constant oop pointers (such as Strings); they alias with // unknown strings.
assert(!is_known_inst, "not scalarizable allocation");
tj = to = to->
cast_to_instance_id(TypeOopPtr::InstanceBot)->
remove_speculative()->
cast_to_ptr_type(TypePtr::BotPTR)->
cast_to_exactness(false);
}
} elseif( is_known_inst ) {
tj = to; // Keep NotNull and klass_is_exact for instance type
} elseif( ptr == TypePtr::NotNull || to->klass_is_exact() ) { // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same.
tj = to = to->
remove_speculative()->
cast_to_instance_id(TypeOopPtr::InstanceBot)->
cast_to_ptr_type(TypePtr::BotPTR)->
cast_to_exactness(false);
} if (to->speculative() != NULL) {
tj = to = to->remove_speculative();
} // Canonicalize the holder of this field if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). if (!is_known_inst) { // Do it only for non-instance types
tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
}
} elseif (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) { // Static fields are in the space above the normal instance // fields in the java.lang.Class instance. if (ik != ciEnv::current()->Class_klass()) {
to = NULL;
tj = TypeOopPtr::BOTTOM;
offset = tj->offset();
}
} else {
ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
assert(offset < canonical_holder->layout_helper_size_in_bytes(), ""); if (!ik->equals(canonical_holder) || tj->offset() != offset) { if( is_known_inst ) {
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
} else {
tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
}
}
}
}
// Klass pointers to object array klasses need some flattening const TypeKlassPtr *tk = tj->isa_klassptr(); if( tk ) { // If we are referencing a field within a Klass, we need // to assume the worst case of an Object. Both exact and // inexact types must flatten to the same alias class so // use NotNull as the PTR. if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
env()->Object_klass(),
offset);
}
if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
ciKlass* k = ciObjArrayKlass::make(env()->Object_klass()); if (!k || !k->is_loaded()) { // Only fails for some -Xcomp runs
tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), offset);
} else {
tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, offset);
}
}
// Check for precise loads from the primary supertype array and force them // to the supertype cache alias index. Check for generic array loads from // the primary supertype array and also force them to the supertype cache // alias index. Since the same load can reach both, we need to merge // these 2 disparate memories into the same alias class. Since the // primary supertype array is read-only, there's no chance of confusion // where we bypass an array load and an array store. int primary_supers_offset = in_bytes(Klass::primary_supers_offset()); if (offset == Type::OffsetBot ||
(offset >= primary_supers_offset &&
offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
offset = in_bytes(Klass::secondary_super_cache_offset());
tj = tk = tk->with_offset(offset);
}
}
// Flatten all Raw pointers together. if (tj->base() == Type::RawPtr)
tj = TypeRawPtr::BOTTOM;
if (tj->base() == Type::AnyPtr)
tj = TypePtr::BOTTOM; // An error, which the caller must check for.
offset = tj->offset();
assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
int idx = AliasIdxTop; for (int i = 0; i < num_alias_types(); i++) { if (alias_type(i)->adr_type() == flat) {
idx = i; break;
}
}
if (idx == AliasIdxTop) { if (no_create) return NULL; // Grow the array if necessary. if (_num_alias_types == _max_alias_types) grow_alias_types(); // Add a new alias type.
idx = _num_alias_types++;
_alias_types[idx]->Init(idx, flat); if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false); if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false); if (flat->isa_instptr()) { if (flat->offset() == java_lang_Class::klass_offset()
&& flat->is_instptr()->instance_klass() == env()->Class_klass())
alias_type(idx)->set_rewritable(false);
} if (flat->isa_aryptr()) { #ifdef ASSERT constint header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); // (T_BYTE has the weakest alignment and size restrictions...)
assert(flat->offset() < header_size_min, "array body reference must be OffsetBot"); #endif if (flat->offset() == TypePtr::OffsetBot) {
alias_type(idx)->set_element(flat->is_aryptr()->elem());
}
} if (flat->isa_klassptr()) { if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
alias_type(idx)->set_rewritable(false); if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
alias_type(idx)->set_rewritable(false); if (flat->offset() == in_bytes(Klass::access_flags_offset()))
alias_type(idx)->set_rewritable(false); if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
alias_type(idx)->set_rewritable(false); if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
alias_type(idx)->set_rewritable(false);
} // %%% (We would like to finalize JavaThread::threadObj_offset(), // but the base pointer type is not distinctive enough to identify // references into JavaThread.)
// Check for final fields. const TypeInstPtr* tinst = flat->isa_instptr(); if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
ciField* field; if (tinst->const_oop() != NULL &&
tinst->instance_klass() == ciEnv::current()->Class_klass() &&
tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) { // static field
ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
field = k->get_field_by_offset(tinst->offset(), true);
} else {
ciInstanceKlass *k = tinst->instance_klass();
field = k->get_field_by_offset(tinst->offset(), false);
}
assert(field == NULL ||
original_field == NULL ||
(field->holder() == original_field->holder() &&
field->offset() == original_field->offset() &&
field->is_static() == original_field->is_static()), "wrong field?"); // Set field() and is_rewritable() attributes. if (field != NULL) alias_type(idx)->set_field(field);
}
}
// Fill the cache for next time.
ace->_adr_type = adr_type;
ace->_index = idx;
assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
// Might as well try to fill the cache for the flattened version, too.
AliasCacheEntry* face = probe_alias_cache(flat); if (face->_adr_type == NULL) {
face->_adr_type = flat;
face->_index = idx;
assert(alias_type(flat) == alias_type(idx), "flat type must work too");
}
return alias_type(idx);
}
Compile::AliasType* Compile::alias_type(ciField* field) { const TypeOopPtr* t; if (field->is_static())
t = TypeInstPtr::make(field->holder()->java_mirror()); else
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.29 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.