/* * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
void PhaseVector::scalarize_vbox_nodes() { if (C->failing()) return;
if (!EnableVectorReboxing) { return; // don't scalarize vector boxes
}
int macro_idx = C->macro_count() - 1; while (macro_idx >= 0) {
Node * n = C->macro_node(macro_idx);
assert(n->is_macro(), "only macro nodes expected here"); if (n->Opcode() == Op_VectorBox) {
VectorBoxNode* vbox = static_cast<VectorBoxNode*>(n);
scalarize_vbox_node(vbox); if (C->failing()) return;
C->print_method(PHASE_SCALARIZE_VBOX, 3, vbox);
} if (C->failing()) return;
macro_idx = MIN2(macro_idx - 1, C->macro_count() - 1);
}
}
void PhaseVector::expand_vbox_nodes() { if (C->failing()) return;
int macro_idx = C->macro_count() - 1; while (macro_idx >= 0) {
Node * n = C->macro_node(macro_idx);
assert(n->is_macro(), "only macro nodes expected here"); if (n->Opcode() == Op_VectorBox) {
VectorBoxNode* vbox = static_cast<VectorBoxNode*>(n);
expand_vbox_node(vbox); if (C->failing()) return;
} if (C->failing()) return;
macro_idx = MIN2(macro_idx - 1, C->macro_count() - 1);
}
}
void PhaseVector::expand_vunbox_nodes() { if (C->failing()) return;
int macro_idx = C->macro_count() - 1; while (macro_idx >= 0) {
Node * n = C->macro_node(macro_idx);
assert(n->is_macro(), "only macro nodes expected here"); if (n->Opcode() == Op_VectorUnbox) {
VectorUnboxNode* vec_unbox = static_cast<VectorUnboxNode*>(n);
expand_vunbox_node(vec_unbox); if (C->failing()) return;
C->print_method(PHASE_EXPAND_VUNBOX, 3, vec_unbox);
} if (C->failing()) return;
macro_idx = MIN2(macro_idx - 1, C->macro_count() - 1);
}
}
void PhaseVector::eliminate_vbox_alloc_nodes() { if (C->failing()) return;
int macro_idx = C->macro_count() - 1; while (macro_idx >= 0) {
Node * n = C->macro_node(macro_idx);
assert(n->is_macro(), "only macro nodes expected here"); if (n->Opcode() == Op_VectorBoxAllocate) {
VectorBoxAllocateNode* vbox_alloc = static_cast<VectorBoxAllocateNode*>(n);
eliminate_vbox_alloc_node(vbox_alloc); if (C->failing()) return;
C->print_method(PHASE_ELIMINATE_VBOX_ALLOC, 3, vbox_alloc);
} if (C->failing()) return;
macro_idx = MIN2(macro_idx - 1, C->macro_count() - 1);
}
}
static JVMState* clone_jvms(Compile* C, SafePointNode* sfpt) {
JVMState* new_jvms = sfpt->jvms()->clone_shallow(C);
uint size = sfpt->req();
SafePointNode* map = new SafePointNode(size, new_jvms); for (uint i = 0; i < size; i++) {
map->init_req(i, sfpt->in(i));
}
Node* mem = map->memory(); if (!mem->is_MergeMem()) { // Since we are not in parsing, the SafePointNode does not guarantee that the memory // input is necessarily a MergeMemNode. But we need to ensure that there is that // MergeMemNode, since the GraphKit assumes the memory input of the map to be a // MergeMemNode, so that it can directly access the memory slices.
PhaseGVN& gvn = *C->initial_gvn();
Node* mergemem = MergeMemNode::make(mem);
gvn.set_type_bottom(mergemem);
map->set_memory(mergemem);
}
new_jvms->set_map(map); return new_jvms;
}
// Adjust JVMS from post-call to pre-call state: put args on stack
uint nargs = call->method()->arg_size();
kit.ensure_stack(kit.sp() + nargs); for (uint i = TypeFunc::Parms; i < call->tf()->domain()->cnt(); i++) {
kit.push(call->in(i));
}
jvms = kit.sync_jvms();
// If a mask is feeding into safepoint[s], then its value should be // packed into a boolean/byte vector first, this will simplify the // re-materialization logic for both predicated and non-predicated // targets. bool is_mask = is_vector_mask(iklass); if (is_mask && vec_value->Opcode() != Op_VectorStoreMask) { const TypeVect* vt = vec_value->bottom_type()->is_vect();
BasicType bt = vt->element_basic_type();
vec_value = gvn.transform(VectorStoreMaskNode::make(gvn, vec_value, bt, vt->length()));
}
while (safepoints.size() > 0) {
SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
jvms->set_endoff(sfpt->req()); // Now make a pass over the debug information replacing any references // to the allocated object with vector value. for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
Node* debug = sfpt->in(i); if (debug != NULL && debug->uncast(/*keep_deps*/false) == vec_box) {
sfpt->set_req(i, sobj);
}
}
C->record_for_igvn(sfpt);
}
}
Node* PhaseVector::expand_vbox_node_helper(Node* vbox,
Node* vect, const TypeInstPtr* box_type, const TypeVect* vect_type) { if (vbox->is_Phi() && vect->is_Phi()) {
assert(vbox->as_Phi()->region() == vect->as_Phi()->region(), "");
Node* new_phi = new PhiNode(vbox->as_Phi()->region(), box_type); for (uint i = 1; i < vbox->req(); i++) {
Node* new_box = expand_vbox_node_helper(vbox->in(i), vect->in(i), box_type, vect_type);
new_phi->set_req(i, new_box);
}
new_phi = C->initial_gvn()->transform(new_phi); return new_phi;
} elseif (vbox->is_Phi() && (vect->is_Vector() || vect->is_LoadVector())) { // Handle the case when the allocation input to VectorBoxNode is a phi // but the vector input is not, which can definitely be the case if the // vector input has been value-numbered. It seems to be safe to do by // construction because VectorBoxNode and VectorBoxAllocate come in a // specific order as a result of expanding an intrinsic call. After that, if // any of the inputs to VectorBoxNode are value-numbered they can only // move up and are guaranteed to dominate.
Node* new_phi = new PhiNode(vbox->as_Phi()->region(), box_type); for (uint i = 1; i < vbox->req(); i++) {
Node* new_box = expand_vbox_node_helper(vbox->in(i), vect, box_type, vect_type);
new_phi->set_req(i, new_box);
}
new_phi = C->initial_gvn()->transform(new_phi); return new_phi;
} elseif (vbox->is_Proj() && vbox->in(0)->Opcode() == Op_VectorBoxAllocate) {
VectorBoxAllocateNode* vbox_alloc = static_cast<VectorBoxAllocateNode*>(vbox->in(0)); return expand_vbox_alloc_node(vbox_alloc, vect, box_type, vect_type);
} else {
assert(!vbox->is_Phi(), ""); // TODO: assert that expanded vbox is initialized with the same value (vect). return vbox; // already expanded
}
}
bool is_mask = is_vector_mask(box_klass); // If boxed mask value is present in a predicate register, it must be // spilled to a vector though a VectorStoreMaskOperation before actual StoreVector // operation to vector payload field. if (is_mask && (value->bottom_type()->isa_vectmask() || bt != T_BOOLEAN)) {
value = gvn.transform(VectorStoreMaskNode::make(gvn, value, bt, num_elem)); // Although type of mask depends on its definition, in terms of storage everything is stored in boolean array.
bt = T_BOOLEAN;
assert(value->bottom_type()->is_vect()->element_basic_type() == bt, "must be consistent with mask representation");
}
// Generate array allocation for the field which holds the values. const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(bt));
Node* arr = kit.new_array(kit.makecon(array_klass), kit.intcon(num_elem), 1);
// Store the vector value into the array. // (The store should be captured by InitializeNode and turned into initialized store later.)
Node* arr_adr = kit.array_element_address(arr, kit.intcon(0), bt); const TypePtr* arr_adr_type = arr_adr->bottom_type()->is_ptr();
Node* arr_mem = kit.memory(arr_adr);
Node* vstore = gvn.transform(StoreVectorNode::make(0,
kit.control(),
arr_mem,
arr_adr,
arr_adr_type,
value,
num_elem));
kit.set_memory(vstore, arr_adr_type);
// Generate the allocate for the Vector object. const TypeKlassPtr* klass_type = box_type->as_klass_type();
Node* klass_node = kit.makecon(klass_type);
Node* vec_obj = kit.new_instance(klass_node);
// Store the allocated array into object.
ciField* field = ciEnv::current()->vector_VectorPayload_klass()->get_field_by_name(ciSymbols::payload_name(),
ciSymbols::object_signature(), false);
assert(field != NULL, "");
Node* vec_field = kit.basic_plus_adr(vec_obj, field->offset_in_bytes()); const TypePtr* vec_adr_type = vec_field->bottom_type()->is_ptr();
// The store should be captured by InitializeNode and turned into initialized store later.
Node* field_store = gvn.transform(kit.access_store_at(vec_obj,
vec_field,
vec_adr_type,
arr,
TypeOopPtr::make_from_klass(field->type()->as_klass()),
T_OBJECT,
IN_HEAP));
kit.set_memory(field_store, vec_adr_type);
void PhaseVector::eliminate_vbox_alloc_node(VectorBoxAllocateNode* vbox_alloc) {
JVMState* jvms = clone_jvms(C, vbox_alloc);
GraphKit kit(jvms); // Remove VBA, but leave a safepoint behind. // Otherwise, it may end up with a loop without any safepoint polls.
kit.replace_call(vbox_alloc, kit.map(), true);
C->remove_macro_node(vbox_alloc);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.