/* * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
if (is_clonebasic()) { if (src_type->isa_instptr()) { const TypeInstPtr* inst_src = src_type->is_instptr();
ciInstanceKlass* ik = inst_src->instance_klass(); // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected // fields into account. They are rare anyway so easier to simply // skip instances with injected fields. if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) { return -1;
} int nb_fields = ik->nof_nonstatic_fields(); return nb_fields;
} else { const TypeAryPtr* ary_src = src_type->isa_aryptr();
assert (ary_src != NULL, "not an array or instance?"); // clone passes a length as a rounded number of longs. If we're // cloning an array we'll do it element by element. If the // length input to ArrayCopyNode is constant, length of input // array must be too.
MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem(); if (can_reshape) {
phase->is_IterGVN()->_worklist.push(mem);
}
ciInstanceKlass* ik = inst_src->instance_klass();
if (!inst_src->klass_is_exact()) {
assert(!ik->is_interface(), "inconsistent klass hierarchy"); if (ik->has_subklass()) { // Concurrent class loading. // Fail fast and return NodeSentinel to indicate that the transform failed. return NodeSentinel;
} else {
phase->C->dependencies()->assert_leaf_type(ik);
}
}
assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); for (int i = 0; i < count; i++) {
ciField* field = ik->nonstatic_field_at(i); const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
Node* off = phase->MakeConX(field->offset());
Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
BasicType bt = field->layout_type();
const Type *type; if (bt == T_OBJECT) { if (!field->type()->is_loaded()) {
type = TypeInstPtr::BOTTOM;
} else {
ciType* field_klass = field->type();
type = TypeOopPtr::make_from_klass(field_klass->as_klass());
}
} else {
type = Type::get_const_basic_type(bt);
}
// newly allocated object is guaranteed to not overlap with source object
disjoint_bases = is_alloc_tightly_coupled(); if (ary_src == NULL || ary_src->elem() == Type::BOTTOM ||
ary_dest == NULL || ary_dest->elem() == Type::BOTTOM) { // We don't know if arguments are arrays returnfalse;
}
BasicType src_elem = ary_src->elem()->array_element_basic_type();
BasicType dest_elem = ary_dest->elem()->array_element_basic_type(); if (is_reference_type(src_elem, true)) src_elem = T_OBJECT; if (is_reference_type(dest_elem, true)) dest_elem = T_OBJECT;
if (src_elem != dest_elem || dest_elem == T_VOID) { // We don't know if arguments are arrays of the same type returnfalse;
}
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, false, BarrierSetC2::Optimization)) { // It's an object array copy but we can't emit the card marking // that is needed returnfalse;
}
src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size()); if (src_offset->is_top()) { // Offset is out of bounds (the ArrayCopyNode will be removed) returnfalse;
}
dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size()); if (dest_offset->is_top()) { // Offset is out of bounds (the ArrayCopyNode will be removed) if (can_reshape) { // record src_offset, so it can be deleted later (if it is dead)
phase->is_IterGVN()->_worklist.push(src_offset);
} returnfalse;
}
// The address is offsetted to an aligned address where a raw copy would start. // If the clone copy is decomposed into load-stores - the address is adjusted to // point at where the array starts. const Type* toff = phase->type(src_offset); int offset = toff->isa_long() ? (int) toff->is_long()->get_con() : (int) toff->is_int()->get_con(); int diff = arrayOopDesc::base_offset_in_bytes(elem) - offset;
assert(diff >= 0, "clone should not start after 1st array element"); if (diff > 0) {
adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
}
copy_type = elem;
value_type = ary_src->elem();
} returntrue;
}
const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN* phase, const TypePtr* atp, Node* n) { if (atp == TypeOopPtr::BOTTOM) {
atp = phase->type(n)->isa_ptr();
} // adjust atp to be the correct array element address type return atp->add_offset(Type::OffsetBot);
}
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, is_clone_inst(), BarrierSetC2::Optimization), "can only happen with card marking"); returnfalse;
}
igvn->replace_node(out_mem->raw_out(0), mem);
Node* out_ctl = proj_out(TypeFunc::Control);
igvn->replace_node(out_ctl, ctl);
} else { // replace fallthrough projections of the ArrayCopyNode by the // new memory, control and the input IO.
CallProjections callprojs;
extract_projections(&callprojs, true, false);
if (callprojs.fallthrough_ioproj != NULL) {
igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
} if (callprojs.fallthrough_memproj != NULL) {
igvn->replace_node(callprojs.fallthrough_memproj, mem);
} if (callprojs.fallthrough_catchproj != NULL) {
igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
}
// The ArrayCopyNode is not disconnected. It still has the // projections for the exception case. Replace current // ArrayCopyNode with a dummy new one with a top() control so // that this part of the graph stays consistent but is // eventually removed.
set_req(0, phase->C->top());
remove_dead_region(phase, can_reshape);
}
} else { if (in(TypeFunc::Control) != ctl) { // we can't return new memory and control from Ideal at parse time
assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
phase->record_for_igvn(this); returnfalse;
}
} returntrue;
}
Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) returnthis;
if (StressArrayCopyMacroNode && !can_reshape) {
phase->record_for_igvn(this); return NULL;
}
// See if it's a small array copy and we can inline it as // loads/stores // Here we can only do: // - arraycopy if all arguments were validated before and we don't // need card marking // - clone for which we don't need to do card marking
if (!prepare_array_copy(phase, can_reshape,
adr_src, base_src, adr_dest, base_dest,
copy_type, value_type, disjoint_bases)) {
assert(adr_src == NULL, "no node can be left behind");
assert(adr_dest == NULL, "no node can be left behind"); return NULL;
}
Node* ctl = NULL; if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
ctl = new RegionNode(3);
ctl->init_req(1, forward_ctl);
ctl->init_req(2, backward_ctl);
ctl = phase->transform(ctl);
MergeMemNode* forward_mm = forward_mem->as_MergeMem();
MergeMemNode* backward_mm = backward_mem->as_MergeMem(); for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) { if (mms.memory() != mms.memory2()) {
Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx()));
phi->init_req(1, mms.memory());
phi->init_req(2, mms.memory2());
phi = phase->transform(phi);
mms.set_memory(phi);
}
}
mem = forward_mem;
} elseif (!forward_ctl->is_top()) {
ctl = forward_ctl;
mem = forward_mem;
} else {
assert(!backward_ctl->is_top(), "no copy?");
ctl = backward_ctl;
mem = backward_mem;
}
if (can_reshape) {
assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
phase->is_IterGVN()->set_delay_transform(false);
}
if (!finish_transform(phase, can_reshape, ctl, mem)) { if (can_reshape) { // put in worklist, so that if it happens to be dead it is removed
phase->is_IterGVN()->_worklist.push(mem);
} return NULL;
}
return mem;
}
bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
Node* dest = in(ArrayCopyNode::Dest); if (dest->is_top()) { returnfalse;
} const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
_src_type->is_known_instance(), "result of EA not recorded");
if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance"); return t_oop->instance_id() == _dest_type->instance_id();
}
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off
c = bs->step_over_gc_barrier(c);
CallNode* call = NULL;
guarantee(c != NULL, "step_over_gc_barrier failed, there must be something to step to."); if (c->is_Region()) { for (uint i = 1; i < c->req(); i++) { if (c->in(i) != NULL) {
Node* n = c->in(i)->in(0); if (may_modify_helper(t_oop, n, phase, call)) {
ac = call->isa_ArrayCopy();
assert(c == mb->in(0), "only for clone"); returntrue;
}
}
}
} elseif (may_modify_helper(t_oop, c->in(0), phase, call)) {
ac = call->isa_ArrayCopy(); #ifdef ASSERT bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks();
assert(c == mb->in(0) || (ac != NULL && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone"); #endif returntrue;
} elseif (mb->trailing_partial_array_copy()) { returntrue;
}
returnfalse;
}
// Does this array copy modify offsets between offset_lo and offset_hi // in the destination array // if must_modify is false, return true if the copy could write // between offset_lo and offset_hi // if must_modify is true, return true if the copy is guaranteed to // write between offset_lo and offset_hi bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransform* phase, bool must_modify) const {
assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
Node* dest = in(Dest);
Node* dest_pos = in(DestPos);
Node* len = in(Length);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.