/* * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
//============================================================================= // Helper methods for _get* and _put* bytecodes //============================================================================= void Parse::do_field_access(bool is_get, bool is_field) { bool will_link;
ciField* field = iter().get_field(will_link);
assert(will_link, "getfield: typeflow responsibility");
ciInstanceKlass* field_holder = field->holder();
if (is_field == field->is_static()) { // Interpreter will throw java_lang_IncompatibleClassChangeError // Check this before allowing <clinit> methods to access static fields
uncommon_trap(Deoptimization::Reason_unhandled,
Deoptimization::Action_none); return;
}
// Deoptimize on putfield writes to call site target field outside of CallSite ctor. if (!is_get && field->is_call_site_target() &&
!(method()->holder() == field_holder && method()->is_object_initializer())) {
uncommon_trap(Deoptimization::Reason_unhandled,
Deoptimization::Action_reinterpret,
NULL, "put to call site target field"); return;
}
if (C->needs_clinit_barrier(field, method())) {
clinit_barrier(field_holder, method()); if (stopped()) return;
}
// Does this field have a constant value? If so, just push the value. if (field->is_constant() && // Keep consistent with types found by ciTypeFlow: for an // unloaded field type, ciTypeFlow::StateVector::do_getstatic() // speculates the field is null. The code in the rest of this // method does the same. We must not bypass it and use a non // null constant here.
(bt != T_OBJECT || field->type()->is_loaded())) { // final or stable field
Node* con = make_constant_from_field(field, obj); if (con != NULL) {
push_node(field->layout_type(), con); return;
}
}
if (is_obj) { if (!field->type()->is_loaded()) {
type = TypeInstPtr::BOTTOM;
must_assert_null = true;
} elseif (field->is_static_constant()) { // This can happen if the constant oop is non-perm.
ciObject* con = field->constant_value().as_object(); // Do not "join" in the previous type; it doesn't add value, // and may yield a vacuous result if the field is of interface type. if (con->is_null_object()) {
type = TypePtr::NULL_PTR;
} else {
type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
}
assert(type != NULL, "field singleton type must be consistent");
} else {
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
}
} else {
type = Type::get_const_basic_type(bt);
}
if (must_assert_null) { // Do not take a trap here. It's possible that the program // will never load the field's class, and will happily see // null values in this field forever. Don't stumble into a // trap for such a program, or we might get a long series // of useless recompilations. (Or, we might load a class // which should not be loaded.) If we ever see a non-null // value, we will then trap and recompile. (The trap will // not need to mention the class index, since the class will // already have been loaded if we ever see a non-null value.) // uncommon_trap(iter().get_field_signature_index()); if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
} if (C->log() != NULL) {
C->log()->elem("assert_null reason='field' klass='%d'",
C->log()->identify(field->type()));
} // If there is going to be a trap, put it at the next bytecode:
set_bci(iter().next_bci());
null_assert(peek());
set_bci(iter().cur_bci()); // put it back
}
}
// Store the value. const Type* field_type; if (!field->type()->is_loaded()) {
field_type = TypeInstPtr::BOTTOM;
} else { if (is_obj) {
field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
} else {
field_type = Type::BOTTOM;
}
}
access_store_at(obj, adr, adr_type, val, field_type, bt, decorators);
if (is_field) { // Remember we wrote a volatile field. // For not multiple copy atomic cpu (ppc64) a barrier should be issued // in constructors which have such stores. See do_exits() in parse1.cpp. if (is_vol) {
set_wrote_volatile(true);
}
set_wrote_fields(true);
// If the field is final, the rules of Java say we are in <init> or <clinit>. // Note the presence of writes to final non-static fields, so that we // can insert a memory barrier later on to keep the writes from floating // out of the constructor. // Any method can write a @Stable field; insert memory barriers after those also. if (field->is_final()) {
set_wrote_final(true); if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { // Preserve allocation ptr to create precedent edge to it in membar // generated on exit from constructor. // Can't bind stable with its allocation, only record allocation for final field.
set_alloc_with_final(obj);
}
} if (field->is_stable()) {
set_wrote_stable(true);
}
}
}
//============================================================================= void Parse::do_anewarray() { bool will_link;
ciKlass* klass = iter().get_klass(will_link);
// Uncommon Trap when class that array contains is not loaded // we need the loaded class for the rest of graph; do not // initialize the container class (see Java spec)!!!
assert(will_link, "anewarray: typeflow responsibility");
ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass); // Check that array_klass object is loaded if (!array_klass->is_loaded()) { // Generate uncommon_trap for unloaded array_class
uncommon_trap(Deoptimization::Reason_unloaded,
Deoptimization::Action_reinterpret,
array_klass); return;
}
// Note: Array classes are always initialized; no is_initialized check.
kill_dead_locals();
// get the lengths from the stack (first dimension is on top)
Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
length[ndimensions] = NULL; // terminating null for make_runtime_call int j; for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
// The original expression was of this form: new T[length0][length1]... // It is often the case that the lengths are small (except the last). // If that happens, use the fast 1-d creator a constant number of times. constint expand_limit = MIN2((int)MultiArrayExpandLimit, 100); int expand_count = 1; // count of allocations in the expansion int expand_fanout = 1; // running total fanout for (j = 0; j < ndimensions-1; j++) { int dim_con = find_int_con(length[j], -1);
expand_fanout *= dim_con;
expand_count += expand_fanout; // count the level-J sub-arrays if (dim_con <= 0
|| dim_con > expand_limit
|| expand_count > expand_limit) {
expand_count = 0; break;
}
}
// Can use multianewarray instead of [a]newarray if only one dimension, // or if all non-final dimensions are small constants. if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
Node* obj = NULL; // Set the original stack and the reexecute bit for the interpreter // to reexecute the multianewarray bytecode if deoptimization happens. // Do it unconditionally even for one dimension multianewarray. // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges() // when AllocateArray node for newarray is created.
{ PreserveReexecuteState preexecs(this);
inc_sp(ndimensions); // Pass 0 as nargs since uncommon trap code does not need to restore stack.
obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
} //original reexecute and sp are set back here
push(obj); return;
}
address fun = NULL; switch (ndimensions) { case 1: ShouldNotReachHere(); break; case 2: fun = OptoRuntime::multianewarray2_Java(); break; case 3: fun = OptoRuntime::multianewarray3_Java(); break; case 4: fun = OptoRuntime::multianewarray4_Java(); break; case 5: fun = OptoRuntime::multianewarray5_Java(); break;
};
Node* c = NULL;
Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms));
const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
// Improve the type: We know it's not null, exact, and of a given length.
type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
type = type->is_aryptr()->cast_to_exactness(true);
const TypeInt* ltype = _gvn.find_int_type(length[0]); if (ltype != NULL)
type = type->is_aryptr()->cast_to_size(ltype);
// We cannot sharpen the nested sub-arrays, since the top level is mutable.
Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) );
push(cast);
// Possible improvements: // - Make a fast path for small multi-arrays. (W/ implicit init. loops.) // - Issue CastII against length[*] values, to TypeInt::POS.
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.