|
|
Quellcode-Bibliothek
© Kompilation durch diese Firma
[Weder Korrektheit noch Funktionsfähigkeit der Software werden zugesichert.]
Datei:
HttpALot.java
Sprache: JAVA
Untersuchungsergebnis.ad Download desSML {SML[101] C[124] BAT[322]}zum Wurzelverzeichnis wechseln //
// Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2022, Arm Limited. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// This file is automatically generated by running "m4 aarch64_vector_ad.m4". Do not edit!
// AArch64 VECTOR Architecture Description File
// 4 bit signed offset -- for predicated load/store
operand vmemA_immIOffset4() %{
// (esize / msize) = 1
predicate(Address::offset_ok_for_sve_immed(n->get_int(), 4,
Matcher::scalable_vector_reg_size(T_BYTE)));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand vmemA_immLOffset4() %{
// (esize / msize) = 1
predicate(Address::offset_ok_for_sve_immed(n->get_long(), 4,
Matcher::scalable_vector_reg_size(T_BYTE)));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand vmemA_indOffI4(iRegP reg, vmemA_immIOffset4 off) %{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand vmemA_indOffL4(iRegP reg, vmemA_immLOffset4 off) %{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
// The indOff of vmemA is valid only when the vector element (load to/store from)
// size equals to memory element (load from/store to) size.
opclass vmemA(indirect, vmemA_indOffI4, vmemA_indOffL4);
source_hpp %{
// Assert that the given node is not a variable shift.
bool assert_not_var_shift(const Node* n);
Assembler::SIMD_Arrangement get_arrangement(const Node* n);
%}
source %{
typedef void (C2_MacroAssembler::* sve_mem_insn_predicate)(FloatRegister Rt, Assembler::SIMD_RegVariant T,
PRegister Pg, const Address &adr);
// Predicated load/store, with optional ptrue to all elements of given predicate register.
static void loadStoreA_predicated(C2_MacroAssembler masm, bool is_store, FloatRegister reg,
PRegister pg, BasicType mem_elem_bt, BasicType vector_elem_bt,
int opcode, Register base, int index, int size, int disp) {
sve_mem_insn_predicate insn;
int mesize = type2aelembytes(mem_elem_bt);
if (index == -1) {
assert(size == 0, "unsupported address mode: scale size = %d", size);
switch(mesize) {
case 1:
insn = is_store ? &C2_MacroAssembler::sve_st1b : &C2_MacroAssembler::sve_ld1b;
break;
case 2:
insn = is_store ? &C2_MacroAssembler::sve_st1h : &C2_MacroAssembler::sve_ld1h;
break;
case 4:
insn = is_store ? &C2_MacroAssembler::sve_st1w : &C2_MacroAssembler::sve_ld1w;
break;
case 8:
insn = is_store ? &C2_MacroAssembler::sve_st1d : &C2_MacroAssembler::sve_ld1d;
break;
default:
assert(false, "unsupported");
ShouldNotReachHere();
}
int imm4 = disp / mesize / Matcher::scalable_vector_reg_size(vector_elem_bt);
(masm.*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
} else {
assert(false, "unimplemented");
ShouldNotReachHere();
}
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
if (UseSVE == 0) {
// These operations are not profitable to be vectorized on NEON, because no direct
// NEON instructions support them. But the match rule support for them is profitable for
// Vector API intrinsics.
if ((opcode == Op_VectorCastD2X && bt == T_INT) ||
(opcode == Op_VectorCastL2X && bt == T_FLOAT) ||
(opcode == Op_CountLeadingZerosV && bt == T_LONG) ||
(opcode == Op_CountTrailingZerosV && bt == T_LONG) ||
opcode == Op_AddReductionVD || opcode == Op_AddReductionVF ||
opcode == Op_MulReductionVD || opcode == Op_MulReductionVF ||
opcode == Op_MulVL) {
return false;
}
}
return match_rule_supported_vector(opcode, vlen, bt);
}
// Identify extra cases that we might want to provide match rules for vector nodes and
// other intrinsics guarded with vector length (vlen) and element type (bt).
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
if (!match_rule_supported(opcode)) {
return false;
}
int length_in_bytes = vlen * type2aelembytes(bt);
if (UseSVE == 0 && length_in_bytes > 16) {
return false;
}
// Check whether specific Op is supported.
// Fail fast, otherwise fall through to common vector_size_supported() check.
switch (opcode) {
case Op_AndVMask:
case Op_OrVMask:
case Op_XorVMask:
case Op_MaskAll:
case Op_VectorMaskGen:
case Op_LoadVectorMasked:
case Op_StoreVectorMasked:
case Op_LoadVectorGather:
case Op_StoreVectorScatter:
case Op_LoadVectorGatherMasked:
case Op_StoreVectorScatterMasked:
case Op_PopulateIndex:
case Op_CompressM:
case Op_CompressV:
if (UseSVE == 0) {
return false;
}
break;
case Op_MulAddVS2VI:
if (length_in_bytes != 16) {
return false;
}
break;
case Op_MulReductionVD:
case Op_MulReductionVF:
case Op_MulReductionVI:
case Op_MulReductionVL:
// No vector multiply reduction instructions, but we do
// emit scalar instructions for 64/128-bit vectors.
if (length_in_bytes != 8 && length_in_bytes != 16) {
return false;
}
break;
case Op_VectorMaskCmp:
if (length_in_bytes < 8) {
return false;
}
break;
case Op_VectorLoadShuffle:
case Op_VectorRearrange:
if (vlen < 4) {
return false;
}
break;
case Op_ExpandV:
if (UseSVE < 2 || is_subword_type(bt)) {
return false;
}
break;
case Op_VectorMaskToLong:
if (UseSVE > 0 && vlen > 64) {
return false;
}
break;
case Op_VectorLongToMask:
if (UseSVE < 2 || vlen > 64 || !VM_Version::supports_svebitperm()) {
return false;
}
break;
default:
break;
}
return vector_size_supported(bt, vlen);
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
// Only SVE supports masked operations.
if (UseSVE == 0) {
return false;
}
// If an opcode does not support the masked version,
// unpredicated node with VectorBlend node will be used instead.
switch(opcode) {
case Op_VectorRearrange:
case Op_MulReductionVD:
case Op_MulReductionVF:
case Op_MulReductionVI:
case Op_MulReductionVL:
return false;
// We use Op_LoadVectorMasked to implement the predicated Op_LoadVector.
// Hence we turn to check whether Op_LoadVectorMasked is supported. The
// same as vector store/gather/scatter.
case Op_LoadVector:
opcode = Op_LoadVectorMasked;
break;
case Op_StoreVector:
opcode = Op_StoreVectorMasked;
break;
case Op_LoadVectorGather:
opcode = Op_LoadVectorGatherMasked;
break;
case Op_StoreVectorScatter:
opcode = Op_StoreVectorScatterMasked;
break;
default:
break;
}
return match_rule_supported_vector(opcode, vlen, bt);
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
// Only SVE has partial vector operations
if (UseSVE == 0) {
return false;
}
switch(node->Opcode()) {
case Op_VectorLoadMask:
case Op_VectorMaskCmp:
case Op_LoadVectorGather:
case Op_StoreVectorScatter:
case Op_AddReductionVF:
case Op_AddReductionVD:
case Op_AndReductionV:
case Op_OrReductionV:
case Op_XorReductionV:
// Mask is needed for partial Op_VectorMaskFirstTrue, because when the
// input predicate is all-false, the result should be the vector length
// instead of the vector register size.
case Op_VectorMaskFirstTrue:
return true;
case Op_MaskAll:
return !node->in(1)->is_Con();
case Op_LoadVector:
case Op_StoreVector:
// We use NEON load/store instructions if the vector length is <= 128 bits.
return vt->length_in_bytes() > 16;
case Op_AddReductionVI:
case Op_AddReductionVL:
// We may prefer using NEON instructions rather than SVE partial operations.
return !VM_Version::use_neon_for_vector(vt->length_in_bytes());
case Op_MinReductionV:
case Op_MaxReductionV:
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we may prefer using NEON
// instructions rather than SVE partial operations.
return vt->element_basic_type() == T_LONG ||
!VM_Version::use_neon_for_vector(vt->length_in_bytes());
default:
// For other ops whose vector size is smaller than the max vector size, a
// full-sized unpredicated operation does not impact the final vector result.
return false;
}
}
// Assert that the given node is not a variable shift.
bool assert_not_var_shift(const Node* n) {
assert(!n->as_ShiftV()->is_var_shift(), "illegal variable shift");
return true;
}
Assembler::SIMD_Arrangement get_arrangement(const Node* n) {
BasicType bt = Matcher::vector_element_basic_type(n);
uint length_in_bytes = Matcher::vector_length_in_bytes(n);
return Assembler::esize2arrangement((uint)type2aelembytes(bt),
/* isQ */ length_in_bytes == 16);
}
%}
// All VECTOR instructions
// ------------------------------ Vector load/store ----------------------------
// Load Vector (16 bits)
instruct loadV2(vReg dst, vmem2 mem) %{
predicate(n->as_LoadVector()->memory_size() == 2);
match(Set dst (LoadVector mem));
format %{ "loadV2 $dst, $mem\t# vector (16 bits)" %}
ins_encode( aarch64_enc_ldrvH(dst, mem) );
ins_pipe(pipe_slow);
%}
// Store Vector (16 bits)
instruct storeV2(vReg src, vmem2 mem) %{
predicate(n->as_StoreVector()->memory_size() == 2);
match(Set mem (StoreVector mem src));
format %{ "storeV2 $mem, $src\t# vector (16 bits)" %}
ins_encode( aarch64_enc_strvH(src, mem) );
ins_pipe(pipe_slow);
%}
// Load Vector (32 bits)
instruct loadV4(vReg dst, vmem4 mem) %{
predicate(n->as_LoadVector()->memory_size() == 4);
match(Set dst (LoadVector mem));
format %{ "loadV4 $dst, $mem\t# vector (32 bits)" %}
ins_encode( aarch64_enc_ldrvS(dst, mem) );
ins_pipe(pipe_slow);
%}
// Store Vector (32 bits)
instruct storeV4(vReg src, vmem4 mem) %{
predicate(n->as_StoreVector()->memory_size() == 4);
match(Set mem (StoreVector mem src));
format %{ "storeV4 $mem, $src\t# vector (32 bits)" %}
ins_encode( aarch64_enc_strvS(src, mem) );
ins_pipe(pipe_slow);
%}
// Load Vector (64 bits)
instruct loadV8(vReg dst, vmem8 mem) %{
predicate(n->as_LoadVector()->memory_size() == 8);
match(Set dst (LoadVector mem));
format %{ "loadV8 $dst, $mem\t# vector (64 bits)" %}
ins_encode( aarch64_enc_ldrvD(dst, mem) );
ins_pipe(pipe_slow);
%}
// Store Vector (64 bits)
instruct storeV8(vReg src, vmem8 mem) %{
predicate(n->as_StoreVector()->memory_size() == 8);
match(Set mem (StoreVector mem src));
format %{ "storeV8 $mem, $src\t# vector (64 bits)" %}
ins_encode( aarch64_enc_strvD(src, mem) );
ins_pipe(pipe_slow);
%}
// Load Vector (128 bits)
instruct loadV16(vReg dst, vmem16 mem) %{
predicate(n->as_LoadVector()->memory_size() == 16);
match(Set dst (LoadVector mem));
format %{ "loadV16 $dst, $mem\t# vector (128 bits)" %}
ins_encode( aarch64_enc_ldrvQ(dst, mem) );
ins_pipe(pipe_slow);
%}
// Store Vector (128 bits)
instruct storeV16(vReg src, vmem16 mem) %{
predicate(n->as_StoreVector()->memory_size() == 16);
match(Set mem (StoreVector mem src));
format %{ "storeV16 $mem, $src\t# vector (128 bits)" %}
ins_encode( aarch64_enc_strvQ(src, mem) );
ins_pipe(pipe_slow);
%}
// Load Vector (> 128 bits)
instruct loadV(vReg dst, vmemA mem) %{
predicate(n->as_LoadVector()->memory_size() > 16);
match(Set dst (LoadVector mem));
format %{ "loadV $dst, $mem\t# vector (sve)" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false,
$dst$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
ins_pipe(pipe_slow);
%}
// Store Vector (> 128 bits)
instruct storeV(vReg src, vmemA mem) %{
predicate(n->as_StoreVector()->memory_size() > 16);
match(Set mem (StoreVector mem src));
format %{ "storeV $mem, $src\t# vector (sve)" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
BasicType bt = Matcher::vector_element_basic_type(this, $src);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true,
$src$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
ins_pipe(pipe_slow);
%}
// vector load/store - predicated
instruct loadV_masked(vReg dst, vmemA mem, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst (LoadVectorMasked mem pg));
format %{ "loadV_masked $dst, $pg, $mem" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false, $dst$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
ins_pipe(pipe_slow);
%}
instruct storeV_masked(vReg src, vmemA mem, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set mem (StoreVectorMasked mem (Binary src pg)));
format %{ "storeV_masked $mem, $pg, $src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true, $src$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
ins_pipe(pipe_slow);
%}
// vector load const
instruct vloadcon(vReg dst, immI0 src) %{
match(Set dst (VectorLoadConst src));
format %{ "vloadcon $dst, $src\t# load/generate iota indices" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
if (UseSVE == 0) {
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes <= 16, "must be");
// The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 16.
int offset = exact_log2(type2aelembytes(bt)) << 4;
if (is_floating_point_type(bt)) {
offset += 32;
}
__ lea(rscratch1, ExternalAddress(StubRoutines::aarch64::vector_iota_indices() + offset));
if (length_in_bytes == 16) {
__ ldrq($dst$$FloatRegister, rscratch1);
} else {
__ ldrd($dst$$FloatRegister, rscratch1);
}
} else {
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_index($dst$$FloatRegister, size, 0, 1);
if (is_floating_point_type(bt)) {
__ sve_scvtf($dst$$FloatRegister, size, ptrue, $dst$$FloatRegister, size);
}
}
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector add -----------------------------------
// vector add
instruct vaddB(vReg dst, vReg src1, vReg src2) %{
match(Set dst (AddVB src1 src2));
format %{ "vaddB $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ addv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_add($dst$$FloatRegister, __ B, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vaddS(vReg dst, vReg src1, vReg src2) %{
match(Set dst (AddVS src1 src2));
format %{ "vaddS $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ addv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_add($dst$$FloatRegister, __ H, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vaddI(vReg dst, vReg src1, vReg src2) %{
match(Set dst (AddVI src1 src2));
format %{ "vaddI $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ addv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_add($dst$$FloatRegister, __ S, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vaddL(vReg dst, vReg src1, vReg src2) %{
match(Set dst (AddVL src1 src2));
format %{ "vaddL $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ addv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_add($dst$$FloatRegister, __ D, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vaddF(vReg dst, vReg src1, vReg src2) %{
match(Set dst (AddVF src1 src2));
format %{ "vaddF $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fadd($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fadd($dst$$FloatRegister, __ S, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vaddD(vReg dst, vReg src1, vReg src2) %{
match(Set dst (AddVD src1 src2));
format %{ "vaddD $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fadd($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fadd($dst$$FloatRegister, __ D, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector add - predicated
instruct vaddB_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddVB (Binary dst_src1 src2) pg));
format %{ "vaddB_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_add($dst_src1$$FloatRegister, __ B, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddS_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddVS (Binary dst_src1 src2) pg));
format %{ "vaddS_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_add($dst_src1$$FloatRegister, __ H, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddI_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddVI (Binary dst_src1 src2) pg));
format %{ "vaddI_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_add($dst_src1$$FloatRegister, __ S, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddL_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddVL (Binary dst_src1 src2) pg));
format %{ "vaddL_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_add($dst_src1$$FloatRegister, __ D, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddF_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddVF (Binary dst_src1 src2) pg));
format %{ "vaddF_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fadd($dst_src1$$FloatRegister, __ S, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vaddD_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AddVD (Binary dst_src1 src2) pg));
format %{ "vaddD_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fadd($dst_src1$$FloatRegister, __ D, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector add reg imm (unpredicated)
instruct vaddImmB(vReg dst_src, immBAddSubV con) %{
predicate(UseSVE > 0);
match(Set dst_src (AddVB dst_src (ReplicateB con)));
format %{ "vaddImmB $dst_src, $dst_src, $con" %}
ins_encode %{
int val = (int)$con$$constant;
if (val > 0) {
__ sve_add($dst_src$$FloatRegister, __ B, val);
} else {
__ sve_sub($dst_src$$FloatRegister, __ B, -val);
}
%}
ins_pipe(pipe_slow);
%}
instruct vaddImmS(vReg dst_src, immIAddSubV con) %{
predicate(UseSVE > 0);
match(Set dst_src (AddVS dst_src (ReplicateS con)));
format %{ "vaddImmS $dst_src, $dst_src, $con" %}
ins_encode %{
int val = (int)$con$$constant;
if (val > 0) {
__ sve_add($dst_src$$FloatRegister, __ H, val);
} else {
__ sve_sub($dst_src$$FloatRegister, __ H, -val);
}
%}
ins_pipe(pipe_slow);
%}
instruct vaddImmI(vReg dst_src, immIAddSubV con) %{
predicate(UseSVE > 0);
match(Set dst_src (AddVI dst_src (ReplicateI con)));
format %{ "vaddImmI $dst_src, $dst_src, $con" %}
ins_encode %{
int val = (int)$con$$constant;
if (val > 0) {
__ sve_add($dst_src$$FloatRegister, __ S, val);
} else {
__ sve_sub($dst_src$$FloatRegister, __ S, -val);
}
%}
ins_pipe(pipe_slow);
%}
instruct vaddImmL(vReg dst_src, immLAddSubV con) %{
predicate(UseSVE > 0);
match(Set dst_src (AddVL dst_src (ReplicateL con)));
format %{ "vaddImmL $dst_src, $dst_src, $con" %}
ins_encode %{
int val = (int)$con$$constant;
if (val > 0) {
__ sve_add($dst_src$$FloatRegister, __ D, val);
} else {
__ sve_sub($dst_src$$FloatRegister, __ D, -val);
}
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector sub -----------------------------------
// vector sub
instruct vsubB(vReg dst, vReg src1, vReg src2) %{
match(Set dst (SubVB src1 src2));
format %{ "vsubB $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ subv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_sub($dst$$FloatRegister, __ B, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vsubS(vReg dst, vReg src1, vReg src2) %{
match(Set dst (SubVS src1 src2));
format %{ "vsubS $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ subv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_sub($dst$$FloatRegister, __ H, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vsubI(vReg dst, vReg src1, vReg src2) %{
match(Set dst (SubVI src1 src2));
format %{ "vsubI $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ subv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_sub($dst$$FloatRegister, __ S, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vsubL(vReg dst, vReg src1, vReg src2) %{
match(Set dst (SubVL src1 src2));
format %{ "vsubL $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ subv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_sub($dst$$FloatRegister, __ D, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vsubF(vReg dst, vReg src1, vReg src2) %{
match(Set dst (SubVF src1 src2));
format %{ "vsubF $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fsub($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fsub($dst$$FloatRegister, __ S, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vsubD(vReg dst, vReg src1, vReg src2) %{
match(Set dst (SubVD src1 src2));
format %{ "vsubD $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fsub($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fsub($dst$$FloatRegister, __ D, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector sub - predicated
instruct vsubB_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (SubVB (Binary dst_src1 src2) pg));
format %{ "vsubB_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_sub($dst_src1$$FloatRegister, __ B, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubS_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (SubVS (Binary dst_src1 src2) pg));
format %{ "vsubS_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_sub($dst_src1$$FloatRegister, __ H, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubI_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (SubVI (Binary dst_src1 src2) pg));
format %{ "vsubI_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_sub($dst_src1$$FloatRegister, __ S, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubL_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (SubVL (Binary dst_src1 src2) pg));
format %{ "vsubL_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_sub($dst_src1$$FloatRegister, __ D, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubF_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (SubVF (Binary dst_src1 src2) pg));
format %{ "vsubF_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fsub($dst_src1$$FloatRegister, __ S, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsubD_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (SubVD (Binary dst_src1 src2) pg));
format %{ "vsubD_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fsub($dst_src1$$FloatRegister, __ D, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector mul -----------------------------------
// vector mul - BYTE, CHAR, SHORT, INT
instruct vmulB_neon(vReg dst, vReg src1, vReg src2) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst (MulVB src1 src2));
format %{ "vmulB_neon $dst, $src1, $src2" %}
ins_encode %{
__ mulv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulB_sve(vReg dst_src1, vReg src2) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst_src1 (MulVB dst_src1 src2));
format %{ "vmulB_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
__ sve_mul($dst_src1$$FloatRegister, __ B, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulS_neon(vReg dst, vReg src1, vReg src2) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst (MulVS src1 src2));
format %{ "vmulS_neon $dst, $src1, $src2" %}
ins_encode %{
__ mulv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulS_sve(vReg dst_src1, vReg src2) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst_src1 (MulVS dst_src1 src2));
format %{ "vmulS_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
__ sve_mul($dst_src1$$FloatRegister, __ H, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulI_neon(vReg dst, vReg src1, vReg src2) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst (MulVI src1 src2));
format %{ "vmulI_neon $dst, $src1, $src2" %}
ins_encode %{
__ mulv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulI_sve(vReg dst_src1, vReg src2) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst_src1 (MulVI dst_src1 src2));
format %{ "vmulI_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
__ sve_mul($dst_src1$$FloatRegister, __ S, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector mul - LONG
instruct vmulL_neon(vReg dst, vReg src1, vReg src2) %{
predicate(UseSVE == 0);
match(Set dst (MulVL src1 src2));
format %{ "vmulL_neon $dst, $src1, $src2\t# 2L" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == 16, "must be");
__ umov(rscratch1, $src1$$FloatRegister, __ D, 0);
__ umov(rscratch2, $src2$$FloatRegister, __ D, 0);
__ mul(rscratch2, rscratch2, rscratch1);
__ mov($dst$$FloatRegister, __ D, 0, rscratch2);
__ umov(rscratch1, $src1$$FloatRegister, __ D, 1);
__ umov(rscratch2, $src2$$FloatRegister, __ D, 1);
__ mul(rscratch2, rscratch2, rscratch1);
__ mov($dst$$FloatRegister, __ D, 1, rscratch2);
%}
ins_pipe(pipe_slow);
%}
instruct vmulL_sve(vReg dst_src1, vReg src2) %{
predicate(UseSVE > 0);
match(Set dst_src1 (MulVL dst_src1 src2));
format %{ "vmulL_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
__ sve_mul($dst_src1$$FloatRegister, __ D, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector mul - floating-point
instruct vmulF(vReg dst, vReg src1, vReg src2) %{
match(Set dst (MulVF src1 src2));
format %{ "vmulF $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fmul($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fmul($dst$$FloatRegister, __ S, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vmulD(vReg dst, vReg src1, vReg src2) %{
match(Set dst (MulVD src1 src2));
format %{ "vmulD $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fmul($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fmul($dst$$FloatRegister, __ D, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector mul - predicated
instruct vmulB_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (MulVB (Binary dst_src1 src2) pg));
format %{ "vmulB_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_mul($dst_src1$$FloatRegister, __ B, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulS_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (MulVS (Binary dst_src1 src2) pg));
format %{ "vmulS_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_mul($dst_src1$$FloatRegister, __ H, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulI_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (MulVI (Binary dst_src1 src2) pg));
format %{ "vmulI_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_mul($dst_src1$$FloatRegister, __ S, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulL_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (MulVL (Binary dst_src1 src2) pg));
format %{ "vmulL_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_mul($dst_src1$$FloatRegister, __ D, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulF_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (MulVF (Binary dst_src1 src2) pg));
format %{ "vmulF_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fmul($dst_src1$$FloatRegister, __ S, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vmulD_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (MulVD (Binary dst_src1 src2) pg));
format %{ "vmulD_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fmul($dst_src1$$FloatRegister, __ D, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector float div -----------------------------
// vector float div
instruct vdivF_neon(vReg dst, vReg src1, vReg src2) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst (DivVF src1 src2));
format %{ "vdivF_neon $dst, $src1, $src2" %}
ins_encode %{
__ fdiv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vdivF_sve(vReg dst_src1, vReg src2) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst_src1 (DivVF dst_src1 src2));
format %{ "vdivF_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
__ sve_fdiv($dst_src1$$FloatRegister, __ S, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_neon(vReg dst, vReg src1, vReg src2) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst (DivVD src1 src2));
format %{ "vdivD_neon $dst, $src1, $src2" %}
ins_encode %{
__ fdiv($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_sve(vReg dst_src1, vReg src2) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst_src1 (DivVD dst_src1 src2));
format %{ "vdivD_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
__ sve_fdiv($dst_src1$$FloatRegister, __ D, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector float div - predicated
instruct vdivF_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (DivVF (Binary dst_src1 src2) pg));
format %{ "vdivF_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fdiv($dst_src1$$FloatRegister, __ S, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vdivD_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (DivVD (Binary dst_src1 src2) pg));
format %{ "vdivD_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
__ sve_fdiv($dst_src1$$FloatRegister, __ D, $pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector and -----------------------------------
// vector and
instruct vand(vReg dst, vReg src1, vReg src2) %{
match(Set dst (AndV src1 src2));
format %{ "vand $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ andr($dst$$FloatRegister, length_in_bytes == 16 ? __ T16B : __ T8B,
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_and($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector and - predicated
instruct vand_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AndV (Binary dst_src1 src2) pg));
format %{ "vand_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_and($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector and reg imm (unpredicated)
instruct vandImmB(vReg dst_src, immBLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (AndV dst_src (ReplicateB con)));
format %{ "vandImmB $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_and($dst_src$$FloatRegister, __ B, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vandImmS(vReg dst_src, immSLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (AndV dst_src (ReplicateS con)));
format %{ "vandImmS $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_and($dst_src$$FloatRegister, __ H, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vandImmI(vReg dst_src, immILog con) %{
predicate(UseSVE > 0);
match(Set dst_src (AndV dst_src (ReplicateI con)));
format %{ "vandImmI $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_and($dst_src$$FloatRegister, __ S, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vandImmL(vReg dst_src, immLLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (AndV dst_src (ReplicateL con)));
format %{ "vandImmL $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_and($dst_src$$FloatRegister, __ D, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector or ------------------------------------
// vector or
instruct vor(vReg dst, vReg src1, vReg src2) %{
match(Set dst (OrV src1 src2));
format %{ "vor $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ orr($dst$$FloatRegister, length_in_bytes == 16 ? __ T16B : __ T8B,
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_orr($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector or - predicated
instruct vor_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (OrV (Binary dst_src1 src2) pg));
format %{ "vor_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_orr($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector or reg imm (unpredicated)
instruct vorImmB(vReg dst_src, immBLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (OrV dst_src (ReplicateB con)));
format %{ "vorImmB $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_orr($dst_src$$FloatRegister, __ B, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vorImmS(vReg dst_src, immSLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (OrV dst_src (ReplicateS con)));
format %{ "vorImmS $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_orr($dst_src$$FloatRegister, __ H, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vorImmI(vReg dst_src, immILog con) %{
predicate(UseSVE > 0);
match(Set dst_src (OrV dst_src (ReplicateI con)));
format %{ "vorImmI $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_orr($dst_src$$FloatRegister, __ S, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vorImmL(vReg dst_src, immLLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (OrV dst_src (ReplicateL con)));
format %{ "vorImmL $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_orr($dst_src$$FloatRegister, __ D, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector xor -----------------------------------
// vector xor
instruct vxor(vReg dst, vReg src1, vReg src2) %{
match(Set dst (XorV src1 src2));
format %{ "vxor $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ eor($dst$$FloatRegister, length_in_bytes == 16 ? __ T16B : __ T8B,
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_eor($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector xor - predicated
instruct vxor_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (XorV (Binary dst_src1 src2) pg));
format %{ "vxor_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_eor($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector xor reg imm (unpredicated)
instruct vxorImmB(vReg dst_src, immBLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (XorV dst_src (ReplicateB con)));
format %{ "vxorImmB $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_eor($dst_src$$FloatRegister, __ B, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vxorImmS(vReg dst_src, immSLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (XorV dst_src (ReplicateS con)));
format %{ "vxorImmS $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_eor($dst_src$$FloatRegister, __ H, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vxorImmI(vReg dst_src, immILog con) %{
predicate(UseSVE > 0);
match(Set dst_src (XorV dst_src (ReplicateI con)));
format %{ "vxorImmI $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_eor($dst_src$$FloatRegister, __ S, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
instruct vxorImmL(vReg dst_src, immLLog con) %{
predicate(UseSVE > 0);
match(Set dst_src (XorV dst_src (ReplicateL con)));
format %{ "vxorImmL $dst_src, $dst_src, $con" %}
ins_encode %{
__ sve_eor($dst_src$$FloatRegister, __ D, (uint64_t)($con$$constant));
%}
ins_pipe(pipe_slow);
%}
// vector eor3 (unpredicated)
instruct veor3_neon(vReg dst, vReg src1, vReg src2, vReg src3) %{
predicate(VM_Version::supports_sha3() &&
VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst (XorV src1 (XorV src2 src3)));
format %{ "veor3_neon $dst, $src1, $src2, $src3" %}
ins_encode %{
__ eor3($dst$$FloatRegister, __ T16B, $src1$$FloatRegister,
$src2$$FloatRegister, $src3$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct veor3_sve(vReg dst_src1, vReg src2, vReg src3) %{
predicate(UseSVE == 2 && !VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst_src1 (XorV dst_src1 (XorV src2 src3)));
format %{ "veor3_sve $dst_src1, $dst_src1, $src2, $src3" %}
ins_encode %{
__ sve_eor3($dst_src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector not -----------------------------------
// vector not
instruct vnotI(vReg dst, vReg src, immI_M1 m1) %{
match(Set dst (XorV src (ReplicateB m1)));
match(Set dst (XorV src (ReplicateS m1)));
match(Set dst (XorV src (ReplicateI m1)));
format %{ "vnotI $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ notr($dst$$FloatRegister, length_in_bytes == 16 ? __ T16B : __ T8B,
$src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_not($dst$$FloatRegister, __ D, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vnotL(vReg dst, vReg src, immL_M1 m1) %{
match(Set dst (XorV src (ReplicateL m1)));
format %{ "vnotL $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ notr($dst$$FloatRegister, length_in_bytes == 16 ? __ T16B : __ T8B,
$src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_not($dst$$FloatRegister, __ D, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector not - predicated
instruct vnotI_masked(vReg dst_src, immI_M1 m1, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (XorV (Binary dst_src (ReplicateB m1)) pg));
match(Set dst_src (XorV (Binary dst_src (ReplicateS m1)) pg));
match(Set dst_src (XorV (Binary dst_src (ReplicateI m1)) pg));
format %{ "vnotI_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_not($dst_src$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vnotL_masked(vReg dst_src, immL_M1 m1, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (XorV (Binary dst_src (ReplicateL m1)) pg));
format %{ "vnotL_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_not($dst_src$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector and_not -------------------------------
// vector and_not
instruct vand_notI(vReg dst, vReg src1, vReg src2, immI_M1 m1) %{
match(Set dst (AndV src1 (XorV src2 (ReplicateB m1))));
match(Set dst (AndV src1 (XorV src2 (ReplicateS m1))));
match(Set dst (AndV src1 (XorV src2 (ReplicateI m1))));
format %{ "vand_notI $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ bic($dst$$FloatRegister, length_in_bytes == 16 ? __ T16B : __ T8B,
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_bic($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vand_notL(vReg dst, vReg src1, vReg src2, immL_M1 m1) %{
match(Set dst (AndV src1 (XorV src2 (ReplicateL m1))));
format %{ "vand_notL $dst, $src1, $src2" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ bic($dst$$FloatRegister, length_in_bytes == 16 ? __ T16B : __ T8B,
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_bic($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector and_not - predicated
instruct vand_notI_masked(vReg dst_src1, vReg src2, immI_M1 m1, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AndV (Binary dst_src1 (XorV src2 (ReplicateB m1))) pg));
match(Set dst_src1 (AndV (Binary dst_src1 (XorV src2 (ReplicateS m1))) pg));
match(Set dst_src1 (AndV (Binary dst_src1 (XorV src2 (ReplicateI m1))) pg));
format %{ "vand_notI_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_bic($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vand_notL_masked(vReg dst_src1, vReg src2, immL_M1 m1, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AndV (Binary dst_src1 (XorV src2 (ReplicateL m1))) pg));
format %{ "vand_notL_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_bic($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector abs -----------------------------------
// vector abs
instruct vabsB(vReg dst, vReg src) %{
match(Set dst (AbsVB src));
format %{ "vabsB $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ absr($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_abs($dst$$FloatRegister, __ B, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vabsS(vReg dst, vReg src) %{
match(Set dst (AbsVS src));
format %{ "vabsS $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ absr($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_abs($dst$$FloatRegister, __ H, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vabsI(vReg dst, vReg src) %{
match(Set dst (AbsVI src));
format %{ "vabsI $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ absr($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_abs($dst$$FloatRegister, __ S, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vabsL(vReg dst, vReg src) %{
match(Set dst (AbsVL src));
format %{ "vabsL $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ absr($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_abs($dst$$FloatRegister, __ D, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vabsF(vReg dst, vReg src) %{
match(Set dst (AbsVF src));
format %{ "vabsF $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fabs($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fabs($dst$$FloatRegister, __ S, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vabsD(vReg dst, vReg src) %{
match(Set dst (AbsVD src));
format %{ "vabsD $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fabs($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fabs($dst$$FloatRegister, __ D, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector abs - predicated
instruct vabsB_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (AbsVB dst_src pg));
format %{ "vabsB_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_abs($dst_src$$FloatRegister, __ B, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vabsS_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (AbsVS dst_src pg));
format %{ "vabsS_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_abs($dst_src$$FloatRegister, __ H, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vabsI_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (AbsVI dst_src pg));
format %{ "vabsI_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_abs($dst_src$$FloatRegister, __ S, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vabsL_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (AbsVL dst_src pg));
format %{ "vabsL_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_abs($dst_src$$FloatRegister, __ D, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vabsF_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (AbsVF dst_src pg));
format %{ "vabsF_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_fabs($dst_src$$FloatRegister, __ S, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vabsD_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (AbsVD dst_src pg));
format %{ "vabsD_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_fabs($dst_src$$FloatRegister, __ D, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector fabd ----------------------------------
// vector fabs diff
instruct vfabd_neon(vReg dst, vReg src1, vReg src2) %{
predicate(VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst (AbsVF (SubVF src1 src2)));
match(Set dst (AbsVD (SubVD src1 src2)));
format %{ "vfabd_neon $dst, $src1, $src2" %}
ins_encode %{
__ fabd($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vfabd_sve(vReg dst_src1, vReg src2) %{
predicate(!VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst_src1 (AbsVF (SubVF dst_src1 src2)));
match(Set dst_src1 (AbsVD (SubVD dst_src1 src2)));
format %{ "vfabd_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
assert(UseSVE > 0, "must be sve");
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_fabd($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector fabs diff - predicated
instruct vfabd_masked(vReg dst_src1, vReg src2, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src1 (AbsVF (SubVF (Binary dst_src1 src2) pg) pg));
match(Set dst_src1 (AbsVD (SubVD (Binary dst_src1 src2) pg) pg));
format %{ "vfabd_masked $dst_src1, $pg, $dst_src1, $src2" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_fabd($dst_src1$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector neg -----------------------------------
// vector neg
instruct vnegI(vReg dst, vReg src) %{
match(Set dst (NegVI src));
format %{ "vnegI $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ negr($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_neg($dst$$FloatRegister, __ elemType_to_regVariant(bt),
ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vnegL(vReg dst, vReg src) %{
match(Set dst (NegVL src));
format %{ "vnegL $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ negr($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_neg($dst$$FloatRegister, __ D, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vnegF(vReg dst, vReg src) %{
match(Set dst (NegVF src));
format %{ "vnegF $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fneg($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fneg($dst$$FloatRegister, __ S, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vnegD(vReg dst, vReg src) %{
match(Set dst (NegVD src));
format %{ "vnegD $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fneg($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fneg($dst$$FloatRegister, __ D, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector neg - predicated
instruct vnegI_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (NegVI dst_src pg));
format %{ "vnegI_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ sve_neg($dst_src$$FloatRegister, __ elemType_to_regVariant(bt),
$pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vnegL_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (NegVL dst_src pg));
format %{ "vnegL_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_neg($dst_src$$FloatRegister, __ D, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vnegF_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (NegVF dst_src pg));
format %{ "vnegF_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_fneg($dst_src$$FloatRegister, __ S, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vnegD_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (NegVD dst_src pg));
format %{ "vnegD_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_fneg($dst_src$$FloatRegister, __ D, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector sqrt ----------------------------------
// vector sqrt
instruct vsqrtF(vReg dst, vReg src) %{
match(Set dst (SqrtVF src));
format %{ "vsqrtF $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fsqrt($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fsqrt($dst$$FloatRegister, __ S, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
instruct vsqrtD(vReg dst, vReg src) %{
match(Set dst (SqrtVD src));
format %{ "vsqrtD $dst, $src" %}
ins_encode %{
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
__ fsqrt($dst$$FloatRegister, get_arrangement(this), $src$$FloatRegister);
} else {
assert(UseSVE > 0, "must be sve");
__ sve_fsqrt($dst$$FloatRegister, __ D, ptrue, $src$$FloatRegister);
}
%}
ins_pipe(pipe_slow);
%}
// vector sqrt - predicated
instruct vsqrtF_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (SqrtVF dst_src pg));
format %{ "vsqrtF_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_fsqrt($dst_src$$FloatRegister, __ S, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vsqrtD_masked(vReg dst_src, pRegGov pg) %{
predicate(UseSVE > 0);
match(Set dst_src (SqrtVD dst_src pg));
format %{ "vsqrtD_masked $dst_src, $pg, $dst_src" %}
ins_encode %{
__ sve_fsqrt($dst_src$$FloatRegister, __ D, $pg$$PRegister, $dst_src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector min -----------------------------------
// vector min - LONG
instruct vminL_neon(vReg dst, vReg src1, vReg src2) %{
predicate(UseSVE == 0 && Matcher::vector_element_basic_type(n) == T_LONG);
match(Set dst (MinV src1 src2));
effect(TEMP_DEF dst);
format %{ "vminL_neon $dst, $src1, $src2\t# 2L" %}
ins_encode %{
__ cmgt($dst$$FloatRegister, __ T2D, $src1$$FloatRegister, $src2$$FloatRegister);
__ bsl($dst$$FloatRegister, __ T16B, $src2$$FloatRegister, $src1$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct vminL_sve(vReg dst_src1, vReg src2) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n) == T_LONG);
match(Set dst_src1 (MinV dst_src1 src2));
format %{ "vminL_sve $dst_src1, $dst_src1, $src2" %}
ins_encode %{
__ sve_smin($dst_src1$$FloatRegister, __ D, ptrue, $src2$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// vector min - B/S/I/F/D
instruct vmin_neon(vReg dst, vReg src1, vReg src2) %{
predicate(Matcher::vector_element_basic_type(n) != T_LONG &&
VM_Version::use_neon_for_vector(Matcher::vector_length_in_bytes(n)));
match(Set dst (MinV src1 src2));
format %{ "vmin_neon $dst, $src1, $src2\t# B/S/I/F/D" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
if (is_floating_point_type(bt)) {
__ fmin($dst$$FloatRegister, get_arrangement(this),
$src1$$FloatRegister, $src2$$FloatRegister);
} else {
assert(is_integral_type(bt) && bt != T_LONG, "unsupported type");
--> --------------------
--> maximum size reached
--> --------------------
[ zur Elbe Produktseite wechseln0.271Quellennavigators
]
|
|
|
|
|