/* * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
ciInstanceKlass* ik = vbox_type->instance_klass();
assert(is_vector(ik), "not a vector");
ciField* fd1 = ik->get_field_by_name(ciSymbols::ETYPE_name(), ciSymbols::class_signature(), /* is_static */ true);
assert(fd1 != NULL, "element type info is missing");
ciConstant val1 = fd1->constant_value();
BasicType elem_bt = val1.as_object()->as_instance()->java_mirror_type()->basic_type();
assert(is_java_primitive(elem_bt), "element type info is missing");
ciField* fd2 = ik->get_field_by_name(ciSymbols::VLENGTH_name(), ciSymbols::int_signature(), /* is_static */ true);
assert(fd2 != NULL, "vector length info is missing");
ciConstant val2 = fd2->constant_value();
assert(val2.as_int() > 0, "vector length info is missing");
// has_scalar_args flag is true only for non-constant scalar shift count, // since in this case shift needs to be broadcasted. if (!Matcher::match_rule_supported_vector(opc, num_elem, elem_bt) ||
(has_scalar_args &&
!arch_supports_vector(VectorNode::replicate_opcode(elem_bt), num_elem, elem_bt, VecMaskNotUsed))) {
is_supported = false;
}
if (is_supported) { // Check if mask unboxing is supported, this is a two step process which first loads the contents // of boolean array into vector followed by either lane expansion to match the lane size of masked // vector operation or populate the predicate register. if ((mask_use_type & VecMaskUseLoad) != 0) { if (!Matcher::match_rule_supported_vector(Op_VectorLoadMask, num_elem, elem_bt) ||
!Matcher::match_rule_supported_vector(Op_LoadVector, num_elem, T_BOOLEAN)) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr(" ** Rejected vector mask loading (%s,%s,%d) because architecture does not support it",
NodeClassNames[Op_VectorLoadMask], type2name(elem_bt), num_elem);
} #endif returnfalse;
}
}
if ((mask_use_type & VecMaskUsePred) != 0) { if (!Matcher::has_predicated_vectors() ||
!Matcher::match_rule_supported_vector_masked(opc, num_elem, elem_bt)) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr("Rejected vector mask predicate using (%s,%s,%d) because architecture does not support it",
NodeClassNames[opc], type2name(elem_bt), num_elem);
} #endif returnfalse;
}
}
}
Node* GraphKit::vector_shift_count(Node* cnt, int shift_op, BasicType bt, int num_elem) {
assert(bt == T_INT || bt == T_LONG || bt == T_SHORT || bt == T_BYTE, "byte, short, long and int are supported");
juint mask = (type2aelembytes(bt) * BitsPerByte - 1);
Node* nmask = gvn().transform(ConNode::make(TypeInt::make(mask)));
Node* mcnt = gvn().transform(new AndINode(cnt, nmask)); return gvn().transform(VectorNode::shift_count(shift_op, mcnt, num_elem, bt));
}
bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type, VectorMaskUseType mask_use_type, bool has_scalar_args) { // Check that the operation is valid. if (sopc <= 0) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr(" ** Rejected intrinsification because no valid vector op could be extracted");
} #endif returnfalse;
}
if (VectorNode::is_vector_rotate(sopc)) { if(!arch_supports_vector_rotate(sopc, num_elem, type, mask_use_type, has_scalar_args)) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr(" ** Rejected vector op (%s,%s,%d) because architecture does not support variable vector shifts",
NodeClassNames[sopc], type2name(type), num_elem);
} #endif returnfalse;
}
} elseif (VectorNode::is_vector_integral_negate(sopc)) { if (!VectorNode::is_vector_integral_negate_supported(sopc, num_elem, type, false)) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr(" ** Rejected vector op (%s,%s,%d) because architecture does not support integral vector negate",
NodeClassNames[sopc], type2name(type), num_elem);
} #endif returnfalse;
}
} else { // Check that architecture supports this op-size-type combination. if (!Matcher::match_rule_supported_vector(sopc, num_elem, type)) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr(" ** Rejected vector op (%s,%s,%d) because architecture does not support it",
NodeClassNames[sopc], type2name(type), num_elem);
} #endif returnfalse;
} else {
assert(Matcher::match_rule_supported(sopc), "must be supported");
}
}
if (num_elem == 1) { if (mask_use_type != VecMaskNotUsed) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr(" ** Rejected vector mask op (%s,%s,%d) because architecture does not support it",
NodeClassNames[sopc], type2name(type), num_elem);
} #endif returnfalse;
}
if (sopc != 0) { if (sopc != Op_LoadVector && sopc != Op_StoreVector) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr(" ** Not a svml call or load/store vector op (%s,%s,%d)",
NodeClassNames[sopc], type2name(type), num_elem);
} #endif returnfalse;
}
}
}
if (!has_scalar_args && VectorNode::is_vector_shift(sopc) &&
Matcher::supports_vector_variable_shifts() == false) { if (C->print_intrinsics()) {
tty->print_cr(" ** Rejected vector op (%s,%s,%d) because architecture does not support variable vector shifts",
NodeClassNames[sopc], type2name(type), num_elem);
} returnfalse;
}
// Check if mask unboxing is supported, this is a two step process which first loads the contents // of boolean array into vector followed by either lane expansion to match the lane size of masked // vector operation or populate the predicate register. if ((mask_use_type & VecMaskUseLoad) != 0) { if (!Matcher::match_rule_supported_vector(Op_VectorLoadMask, num_elem, type) ||
!Matcher::match_rule_supported_vector(Op_LoadVector, num_elem, T_BOOLEAN)) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr(" ** Rejected vector mask loading (%s,%s,%d) because architecture does not support it",
NodeClassNames[Op_VectorLoadMask], type2name(type), num_elem);
} #endif returnfalse;
}
}
// Check if mask boxing is supported, this is a two step process which first stores the contents // of mask vector / predicate register into a boolean vector followed by vector store operation to // transfer the contents to underlined storage of mask boxes which is a boolean array. if ((mask_use_type & VecMaskUseStore) != 0) { if (!Matcher::match_rule_supported_vector(Op_VectorStoreMask, num_elem, type) ||
!Matcher::match_rule_supported_vector(Op_StoreVector, num_elem, T_BOOLEAN)) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr("Rejected vector mask storing (%s,%s,%d) because architecture does not support it",
NodeClassNames[Op_VectorStoreMask], type2name(type), num_elem);
} #endif returnfalse;
}
}
if (!is_supported) { #ifndef PRODUCT if (C->print_intrinsics()) {
tty->print_cr("Rejected vector mask predicate using (%s,%s,%d) because architecture does not support it",
NodeClassNames[sopc], type2name(type), num_elem);
} #endif returnfalse;
}
}
returntrue;
}
staticbool is_klass_initialized(const TypeInstPtr* vec_klass) { if (vec_klass->const_oop() == NULL) { returnfalse; // uninitialized or some kind of unsafe access
}
assert(vec_klass->const_oop()->as_instance()->java_lang_Class_klass() != NULL, "klass instance expected");
ciInstanceKlass* klass = vec_klass->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); return klass->is_initialized();
}
// public static // <V extends Vector<E>, // M extends VectorMask<E>, // E> // V unaryOp(int oprId, Class<? extends V> vmClass, Class<? extends M> maskClass, Class<E> elementType, // int length, V v, M m, // UnaryOperation<V, M> defaultImpl) // // public static // <V, // M extends VectorMask<E>, // E> // V binaryOp(int oprId, Class<? extends V> vmClass, Class<? extends M> maskClass, Class<E> elementType, // int length, V v1, V v2, M m, // BinaryOperation<V, M> defaultImpl) // // public static // <V extends Vector<E>, // M extends VectorMask<E>, // E> // V ternaryOp(int oprId, Class<? extends V> vmClass, Class<? extends M> maskClass, Class<E> elementType, // int length, V v1, V v2, V v3, M m, // TernaryOperation<V, M> defaultImpl) // bool LibraryCallKit::inline_vector_nary_operation(int n) { const TypeInt* opr = gvn().type(argument(0))->isa_int(); const TypeInstPtr* vector_klass = gvn().type(argument(1))->isa_instptr(); const TypeInstPtr* mask_klass = gvn().type(argument(2))->isa_instptr(); const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(4))->isa_int();
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { if (C->print_intrinsics()) {
tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type());
} returnfalse; // should be primitive type
} if (!is_klass_initialized(vector_klass)) { if (C->print_intrinsics()) {
tty->print_cr(" ** klass argument not initialized");
} returnfalse;
}
// "argument(n + 5)" should be the mask object. We assume it is "null" when no mask // is used to control this operation. const Type* vmask_type = gvn().type(argument(n + 5)); bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { if (mask_klass == NULL || mask_klass->const_oop() == NULL) { if (C->print_intrinsics()) {
tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]);
} returnfalse; // not enough info for intrinsification
}
if (!is_klass_initialized(mask_klass)) { if (C->print_intrinsics()) {
tty->print_cr(" ** mask klass argument not initialized");
} returnfalse;
}
if (vmask_type->maybe_null()) { if (C->print_intrinsics()) {
tty->print_cr(" ** null mask values are not allowed for masked op");
} returnfalse;
}
}
BasicType elem_bt = elem_type->basic_type(); int num_elem = vlen->get_con(); int opc = VectorSupport::vop2ideal(opr->get_con(), elem_bt); int sopc = VectorNode::opcode(opc, elem_bt); if ((opc != Op_CallLeafVector) && (sopc == 0)) { if (C->print_intrinsics()) {
tty->print_cr(" ** operation not supported: opc=%s bt=%s", NodeClassNames[opc], type2name(elem_bt));
} returnfalse; // operation not supported
} if (num_elem == 1) { if (opc != Op_CallLeafVector || elem_bt != T_DOUBLE) { if (C->print_intrinsics()) {
tty->print_cr(" ** not a svml call: arity=%d opc=%d vlen=%d etype=%s",
n, opc, num_elem, type2name(elem_bt));
} returnfalse;
}
}
ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass(); const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
if (is_vector_mask(vbox_klass)) {
assert(!is_masked_op, "mask operations do not need mask to control");
}
if (opc == Op_CallLeafVector) { if (!UseVectorStubs) { if (C->print_intrinsics()) {
tty->print_cr(" ** vector stubs support is disabled");
} returnfalse;
} if (!Matcher::supports_vector_calling_convention()) { if (C->print_intrinsics()) {
tty->print_cr(" ** no vector calling conventions supported");
} returnfalse;
} if (!Matcher::vector_size_supported(elem_bt, num_elem)) { if (C->print_intrinsics()) {
tty->print_cr(" ** vector size (vlen=%d, etype=%s) is not supported",
num_elem, type2name(elem_bt));
} returnfalse;
}
}
// When using mask, mask use type needs to be VecMaskUseLoad.
VectorMaskUseType mask_use_type = is_vector_mask(vbox_klass) ? VecMaskUseAll
: is_masked_op ? VecMaskUseLoad : VecMaskNotUsed; if ((sopc != 0) && !arch_supports_vector(sopc, num_elem, elem_bt, mask_use_type)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d opc=%d vlen=%d etype=%s ismask=%d is_masked_op=%d",
n, sopc, num_elem, type2name(elem_bt),
is_vector_mask(vbox_klass) ? 1 : 0, is_masked_op ? 1 : 0);
} returnfalse; // not supported
}
// Return true if current platform has implemented the masked operation with predicate feature. bool use_predicate = is_masked_op && sopc != 0 && arch_supports_vector(sopc, num_elem, elem_bt, VecMaskUsePred); if (is_masked_op && !use_predicate && !arch_supports_vector(Op_VectorBlend, num_elem, elem_bt, VecMaskUseLoad)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d opc=%d vlen=%d etype=%s ismask=0 is_masked_op=1",
n, sopc, num_elem, type2name(elem_bt));
} returnfalse;
}
if (is_masked_op && mask != NULL) { if (use_predicate) {
operation->add_req(mask);
operation->add_flag(Node::Flag_is_predicated_vector);
} else {
operation->add_flag(Node::Flag_is_predicated_using_blend);
operation = gvn().transform(operation);
operation = new VectorBlendNode(opd1, operation, mask);
}
}
operation = gvn().transform(operation);
// Wrap it up in VectorBox to keep object type information.
Node* vbox = box_vector(operation, vbox_type, elem_bt, num_elem);
set_result(vbox);
C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt)))); returntrue;
}
// <Sh extends VectorShuffle<E>, E> // Sh ShuffleIota(Class<?> E, Class<?> shuffleClass, Vector.Species<E> s, int length, // int start, int step, int wrap, ShuffleIotaOperation<Sh, E> defaultImpl) bool LibraryCallKit::inline_vector_shuffle_iota() { const TypeInstPtr* shuffle_klass = gvn().type(argument(1))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(3))->isa_int(); const TypeInt* start_val = gvn().type(argument(4))->isa_int(); const TypeInt* step_val = gvn().type(argument(5))->isa_int(); const TypeInt* wrap = gvn().type(argument(6))->isa_int();
// Make the indices greater than lane count as -ve values. This matches the java side implementation.
res = gvn().transform(VectorNode::make(Op_AndI, res, bcast_mod, num_elem, elem_bt));
Node * biased_val = gvn().transform(VectorNode::make(Op_SubI, res, bcast_lane_cnt, num_elem, elem_bt));
res = gvn().transform(new VectorBlendNode(biased_val, res, mask));
}
// Wrap it up in VectorBox to keep object type information.
res = box_vector(res, shuffle_box_type, elem_bt, num_elem);
set_result(res);
C->set_max_vector_size(MAX2(C->max_vector_size(), (uint)(num_elem * type2aelembytes(elem_bt)))); returntrue;
}
int cast_vopc = VectorCastNode::opcode(-1, T_BYTE); // from shuffle of type T_BYTE // Make sure that cast is implemented to particular type/size combination. if (!arch_supports_vector(cast_vopc, num_elem, elem_bt, VecMaskNotUsed)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=1 op=cast#%d/3 vlen2=%d etype2=%s",
cast_vopc, num_elem, type2name(elem_bt));
} returnfalse;
}
// Unbox shuffle with true flag to indicate its load shuffle to vector // shuffle is a byte array
Node* shuffle_vec = unbox_vector(shuffle, shuffle_box_type, T_BYTE, num_elem, true);
// cast byte to target element type
shuffle_vec = gvn().transform(VectorCastNode::make(cast_vopc, shuffle_vec, elem_bt, num_elem));
if (!is_klass_initialized(vector_klass)) { if (C->print_intrinsics()) {
tty->print_cr(" ** klass argument not initialized");
} returnfalse;
}
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { if (C->print_intrinsics()) {
tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type());
} returnfalse; // should be primitive type
}
BasicType elem_bt = elem_type->basic_type(); int num_elem = vlen->get_con();
ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass(); const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass);
// public static // <C, // VM, // E, // S extends VectorSpecies<E>> // VM load(Class<? extends VM> vmClass, Class<E> elementType, int length, // Object base, long offset, // Unsafe addressing // C container, long index, S s, // Arguments for default implementation // LoadOperation<C, VM, E, S> defaultImpl) // // public static // <C, // V extends Vector<?>> // void store(Class<?> vectorClass, Class<?> elementType, int length, // Object base, long offset, // Unsafe addressing // V v, // C container, long index, // Arguments for default implementation // StoreVectorOperation<C, V> defaultImpl)
if (vector_klass == NULL || elem_klass == NULL || vlen == NULL ||
vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con()) { if (C->print_intrinsics()) {
tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s",
NodeClassNames[argument(0)->Opcode()],
NodeClassNames[argument(1)->Opcode()],
NodeClassNames[argument(2)->Opcode()]);
} returnfalse; // not enough info for intrinsification
} if (!is_klass_initialized(vector_klass)) { if (C->print_intrinsics()) {
tty->print_cr(" ** klass argument not initialized");
} returnfalse;
}
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { if (C->print_intrinsics()) {
tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type());
} returnfalse; // should be primitive type
}
BasicType elem_bt = elem_type->basic_type(); int num_elem = vlen->get_con();
// TODO When mask usage is supported, VecMaskNotUsed needs to be VecMaskUseLoad. if (!arch_supports_vector(is_store ? Op_StoreVector : Op_LoadVector, num_elem, elem_bt, VecMaskNotUsed)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s ismask=no",
is_store, is_store ? "store" : "load",
num_elem, type2name(elem_bt));
} returnfalse; // not supported
}
// Now handle special case where load/store happens from/to byte array but element type is not byte. bool using_byte_array = arr_type != NULL && arr_type->elem()->array_element_basic_type() == T_BYTE && elem_bt != T_BYTE; // Handle loading masks. // If there is no consistency between array and vector element types, it must be special byte array case or loading masks if (arr_type != NULL && !using_byte_array && !is_mask && !elem_consistent_with_arr(elem_bt, arr_type)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no",
is_store, is_store ? "store" : "load",
num_elem, type2name(elem_bt), type2name(arr_type->elem()->array_element_basic_type()));
}
set_map(old_map);
set_sp(old_sp); returnfalse;
} // Since we are using byte array, we need to double check that the byte operations are supported by backend. if (using_byte_array) { int byte_num_elem = num_elem * type2aelembytes(elem_bt); if (!arch_supports_vector(is_store ? Op_StoreVector : Op_LoadVector, byte_num_elem, T_BYTE, VecMaskNotUsed)
|| !arch_supports_vector(Op_VectorReinterpret, byte_num_elem, T_BYTE, VecMaskNotUsed)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d*8 etype=%s/8 ismask=no",
is_store, is_store ? "store" : "load",
byte_num_elem, type2name(elem_bt));
}
set_map(old_map);
set_sp(old_sp); returnfalse; // not supported
}
} if (is_mask) { if (!is_store) { if (!arch_supports_vector(Op_LoadVector, num_elem, elem_bt, VecMaskUseLoad)) {
set_map(old_map);
set_sp(old_sp); returnfalse; // not supported
}
} else { if (!arch_supports_vector(Op_StoreVector, num_elem, elem_bt, VecMaskUseStore)) {
set_map(old_map);
set_sp(old_sp); returnfalse; // not supported
}
}
}
// public static // <C, // V extends Vector<?>, // E, // S extends VectorSpecies<E>, // M extends VectorMask<E>> // V loadMasked(Class<? extends V> vectorClass, Class<M> maskClass, Class<E> elementType, // int length, Object base, long offset, M m, int offsetInRange, // C container, long index, S s, // Arguments for default implementation // LoadVectorMaskedOperation<C, V, S, M> defaultImpl) { // // public static // <C, // V extends Vector<E>, // M extends VectorMask<E>, // E> // void storeMasked(Class<? extends V> vectorClass, Class<M> maskClass, Class<E> elementType, // int length, Object base, long offset, // V v, M m, // C container, long index, // Arguments for default implementation // StoreVectorMaskedOperation<C, V, M, E> defaultImpl) { // bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { const TypeInstPtr* vector_klass = gvn().type(argument(0))->isa_instptr(); const TypeInstPtr* mask_klass = gvn().type(argument(1))->isa_instptr(); const TypeInstPtr* elem_klass = gvn().type(argument(2))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(3))->isa_int();
if (vector_klass == NULL || mask_klass == NULL || elem_klass == NULL || vlen == NULL ||
vector_klass->const_oop() == NULL || mask_klass->const_oop() == NULL ||
elem_klass->const_oop() == NULL || !vlen->is_con()) { if (C->print_intrinsics()) {
tty->print_cr(" ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s",
NodeClassNames[argument(0)->Opcode()],
NodeClassNames[argument(1)->Opcode()],
NodeClassNames[argument(2)->Opcode()],
NodeClassNames[argument(3)->Opcode()]);
} returnfalse; // not enough info for intrinsification
} if (!is_klass_initialized(vector_klass)) { if (C->print_intrinsics()) {
tty->print_cr(" ** klass argument not initialized");
} returnfalse;
}
if (!is_klass_initialized(mask_klass)) { if (C->print_intrinsics()) {
tty->print_cr(" ** mask klass argument not initialized");
} returnfalse;
}
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { if (C->print_intrinsics()) {
tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type());
} returnfalse; // should be primitive type
}
BasicType elem_bt = elem_type->basic_type(); int num_elem = vlen->get_con();
Node* base = argument(4);
Node* offset = ConvL2X(argument(5));
// Save state and restore on bailout
uint old_sp = sp();
SafePointNode* old_map = clone_map();
// Now handle special case where load/store happens from/to byte array but element type is not byte. bool using_byte_array = arr_type != NULL && arr_type->elem()->array_element_basic_type() == T_BYTE && elem_bt != T_BYTE; // If there is no consistency between array and vector element types, it must be special byte array case if (arr_type != NULL && !using_byte_array && !elem_consistent_with_arr(elem_bt, arr_type)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s",
is_store, is_store ? "storeMasked" : "loadMasked",
num_elem, type2name(elem_bt), type2name(arr_type->elem()->array_element_basic_type()));
}
set_map(old_map);
set_sp(old_sp); returnfalse;
}
// If current arch does not support the predicated operations, we have to bail // out when current case uses the predicate feature. if (!supports_predicate) { bool needs_predicate = false; if (is_store) { // Masked vector store always uses the predicated store.
needs_predicate = true;
} else { // Masked vector load with IOOBE always uses the predicated load. const TypeInt* offset_in_range = gvn().type(argument(8))->isa_int(); if (!offset_in_range->is_con()) { if (C->print_intrinsics()) {
tty->print_cr(" ** missing constant: offsetInRange=%s",
NodeClassNames[argument(8)->Opcode()]);
}
set_map(old_map);
set_sp(old_sp); returnfalse;
}
needs_predicate = (offset_in_range->get_con() == 0);
}
// This only happens for masked vector load. If predicate is not supported, then check whether // the normal vector load and blend operations are supported by backend. if (!supports_predicate && (!arch_supports_vector(Op_LoadVector, mem_num_elem, mem_elem_bt, VecMaskNotUsed) ||
!arch_supports_vector(Op_VectorBlend, mem_num_elem, mem_elem_bt, VecMaskUseLoad))) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: op=loadMasked vlen=%d etype=%s using_byte_array=%d",
num_elem, type2name(elem_bt), using_byte_array ? 1 : 0);
}
set_map(old_map);
set_sp(old_sp); returnfalse;
}
// Since we are using byte array, we need to double check that the vector reinterpret operation // with byte type is supported by backend. if (using_byte_array) { if (!arch_supports_vector(Op_VectorReinterpret, mem_num_elem, T_BYTE, VecMaskNotUsed)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s using_byte_array=1",
is_store, is_store ? "storeMasked" : "loadMasked",
num_elem, type2name(elem_bt));
}
set_map(old_map);
set_sp(old_sp); returnfalse;
}
}
// Since it needs to unbox the mask, we need to double check that the related load operations // for mask are supported by backend. if (!arch_supports_vector(Op_LoadVector, num_elem, elem_bt, VecMaskUseLoad)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s",
is_store, is_store ? "storeMasked" : "loadMasked",
num_elem, type2name(elem_bt));
}
set_map(old_map);
set_sp(old_sp); returnfalse;
}
// Can base be NULL? Otherwise, always on-heap access. bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(gvn().type(base)); if (can_access_non_heap) {
insert_mem_bar(Op_MemBarCPUOrder);
}
if (!is_klass_initialized(vector_klass) || !is_klass_initialized(vector_idx_klass)) { if (C->print_intrinsics()) {
tty->print_cr(" ** klass argument not initialized");
} returnfalse;
}
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type(); if (!elem_type->is_primitive_type()) { if (C->print_intrinsics()) {
tty->print_cr(" ** not a primitive bt=%d", elem_type->basic_type());
} returnfalse; // should be primitive type
}
BasicType elem_bt = elem_type->basic_type(); int num_elem = vlen->get_con();
const Type* vmask_type = gvn().type(is_scatter ? argument(10) : argument(9)); bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { if (mask_klass == NULL || mask_klass->const_oop() == NULL) { if (C->print_intrinsics()) {
tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(1)->Opcode()]);
} returnfalse; // not enough info for intrinsification
}
if (!is_klass_initialized(mask_klass)) { if (C->print_intrinsics()) {
tty->print_cr(" ** mask klass argument not initialized");
} returnfalse;
}
if (vmask_type->maybe_null()) { if (C->print_intrinsics()) {
tty->print_cr(" ** null mask values are not allowed for masked op");
} returnfalse;
}
// Check whether the predicated gather/scatter node is supported by architecture. if (!arch_supports_vector(is_scatter ? Op_StoreVectorScatterMasked : Op_LoadVectorGatherMasked, num_elem, elem_bt,
(VectorMaskUseType) (VecMaskUseLoad | VecMaskUsePred))) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s is_masked_op=1",
is_scatter, is_scatter ? "scatterMasked" : "gatherMasked",
num_elem, type2name(elem_bt));
} returnfalse; // not supported
}
} else { // Check whether the normal gather/scatter node is supported for non-masked operation. if (!arch_supports_vector(is_scatter ? Op_StoreVectorScatter : Op_LoadVectorGather, num_elem, elem_bt, VecMaskNotUsed)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s is_masked_op=0",
is_scatter, is_scatter ? "scatter" : "gather",
num_elem, type2name(elem_bt));
} returnfalse; // not supported
}
}
// Check that the vector holding indices is supported by architecture if (!arch_supports_vector(Op_LoadVector, num_elem, T_INT, VecMaskNotUsed)) { if (C->print_intrinsics()) {
tty->print_cr(" ** not supported: arity=%d op=%s/loadindex vlen=%d etype=int is_masked_op=%d",
is_scatter, is_scatter ? "scatter" : "gather",
num_elem, is_masked_op ? 1 : 0);
} returnfalse; // not supported
}
Node* base = argument(5);
Node* offset = ConvL2X(argument(6));
// Save state and restore on bailout
uint old_sp = sp();
SafePointNode* old_map = clone_map();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.