Quellcode-Bibliothek
© Kompilation durch diese Firma
[Weder Korrektheit noch Funktionsfähigkeit der Software werden zugesichert.]
Datei:
Sprache: JAVA
rahmenlose Ansicht.ad DruckansichtSML {SML[186] C[217] BAT[465]}zum Wurzelverzeichnis wechseln //
// Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
// Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// RISCV Architecture Description File
//----------REGISTER DEFINITION BLOCK------------------------------------------
// This information is used by the matcher and the register allocator to
// describe individual registers and classes of registers within the target
// architecture.
register %{
//----------Architecture Description Register Definitions----------------------
// General Registers
// "reg_def" name ( register save type, C convention save type,
// ideal register type, encoding );
// Register Save Types:
//
// NS = No-Save: The register allocator assumes that these registers
// can be used without saving upon entry to the method, &
// that they do not need to be saved at call sites.
//
// SOC = Save-On-Call: The register allocator assumes that these registers
// can be used without saving upon entry to the method,
// but that they must be saved at call sites.
//
// SOE = Save-On-Entry: The register allocator assumes that these registers
// must be saved before using them upon entry to the
// method, but they do not need to be saved at call
// sites.
//
// AS = Always-Save: The register allocator assumes that these registers
// must be saved before using them upon entry to the
// method, & that they must be saved at call sites.
//
// Ideal Register Type is used to determine how to save & restore a
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
//
// The encoding number is the actual bit-pattern placed into the opcodes.
// We must define the 64 bit int registers in two 32 bit halves, the
// real lower register and a virtual upper half register. upper halves
// are used by the register allocator but are not actually supplied as
// operands to memory ops.
//
// follow the C1 compiler in making registers
//
// x7, x9-x17, x27-x31 volatile (caller save)
// x0-x4, x8, x23 system (no save, no allocate)
// x5-x6 non-allocatable (so we can use them as temporary regs)
//
// as regards Java usage. we don't use any callee save registers
// because this makes it difficult to de-optimise a frame (see comment
// in x86 implementation of Deoptimization::unwind_callee_save_values)
//
// General Registers
reg_def R0 ( NS, NS, Op_RegI, 0, x0->as_VMReg() ); // zr
reg_def R0_H ( NS, NS, Op_RegI, 0, x0->as_VMReg()->next() );
reg_def R1 ( NS, SOC, Op_RegI, 1, x1->as_VMReg() ); // ra
reg_def R1_H ( NS, SOC, Op_RegI, 1, x1->as_VMReg()->next() );
reg_def R2 ( NS, SOE, Op_RegI, 2, x2->as_VMReg() ); // sp
reg_def R2_H ( NS, SOE, Op_RegI, 2, x2->as_VMReg()->next() );
reg_def R3 ( NS, NS, Op_RegI, 3, x3->as_VMReg() ); // gp
reg_def R3_H ( NS, NS, Op_RegI, 3, x3->as_VMReg()->next() );
reg_def R4 ( NS, NS, Op_RegI, 4, x4->as_VMReg() ); // tp
reg_def R4_H ( NS, NS, Op_RegI, 4, x4->as_VMReg()->next() );
reg_def R7 ( SOC, SOC, Op_RegI, 7, x7->as_VMReg() );
reg_def R7_H ( SOC, SOC, Op_RegI, 7, x7->as_VMReg()->next() );
reg_def R8 ( NS, SOE, Op_RegI, 8, x8->as_VMReg() ); // fp
reg_def R8_H ( NS, SOE, Op_RegI, 8, x8->as_VMReg()->next() );
reg_def R9 ( SOC, SOE, Op_RegI, 9, x9->as_VMReg() );
reg_def R9_H ( SOC, SOE, Op_RegI, 9, x9->as_VMReg()->next() );
reg_def R10 ( SOC, SOC, Op_RegI, 10, x10->as_VMReg() );
reg_def R10_H ( SOC, SOC, Op_RegI, 10, x10->as_VMReg()->next());
reg_def R11 ( SOC, SOC, Op_RegI, 11, x11->as_VMReg() );
reg_def R11_H ( SOC, SOC, Op_RegI, 11, x11->as_VMReg()->next());
reg_def R12 ( SOC, SOC, Op_RegI, 12, x12->as_VMReg() );
reg_def R12_H ( SOC, SOC, Op_RegI, 12, x12->as_VMReg()->next());
reg_def R13 ( SOC, SOC, Op_RegI, 13, x13->as_VMReg() );
reg_def R13_H ( SOC, SOC, Op_RegI, 13, x13->as_VMReg()->next());
reg_def R14 ( SOC, SOC, Op_RegI, 14, x14->as_VMReg() );
reg_def R14_H ( SOC, SOC, Op_RegI, 14, x14->as_VMReg()->next());
reg_def R15 ( SOC, SOC, Op_RegI, 15, x15->as_VMReg() );
reg_def R15_H ( SOC, SOC, Op_RegI, 15, x15->as_VMReg()->next());
reg_def R16 ( SOC, SOC, Op_RegI, 16, x16->as_VMReg() );
reg_def R16_H ( SOC, SOC, Op_RegI, 16, x16->as_VMReg()->next());
reg_def R17 ( SOC, SOC, Op_RegI, 17, x17->as_VMReg() );
reg_def R17_H ( SOC, SOC, Op_RegI, 17, x17->as_VMReg()->next());
reg_def R18 ( SOC, SOE, Op_RegI, 18, x18->as_VMReg() );
reg_def R18_H ( SOC, SOE, Op_RegI, 18, x18->as_VMReg()->next());
reg_def R19 ( SOC, SOE, Op_RegI, 19, x19->as_VMReg() );
reg_def R19_H ( SOC, SOE, Op_RegI, 19, x19->as_VMReg()->next());
reg_def R20 ( SOC, SOE, Op_RegI, 20, x20->as_VMReg() ); // caller esp
reg_def R20_H ( SOC, SOE, Op_RegI, 20, x20->as_VMReg()->next());
reg_def R21 ( SOC, SOE, Op_RegI, 21, x21->as_VMReg() );
reg_def R21_H ( SOC, SOE, Op_RegI, 21, x21->as_VMReg()->next());
reg_def R22 ( SOC, SOE, Op_RegI, 22, x22->as_VMReg() );
reg_def R22_H ( SOC, SOE, Op_RegI, 22, x22->as_VMReg()->next());
reg_def R23 ( NS, SOE, Op_RegI, 23, x23->as_VMReg() ); // java thread
reg_def R23_H ( NS, SOE, Op_RegI, 23, x23->as_VMReg()->next());
reg_def R24 ( SOC, SOE, Op_RegI, 24, x24->as_VMReg() );
reg_def R24_H ( SOC, SOE, Op_RegI, 24, x24->as_VMReg()->next());
reg_def R25 ( SOC, SOE, Op_RegI, 25, x25->as_VMReg() );
reg_def R25_H ( SOC, SOE, Op_RegI, 25, x25->as_VMReg()->next());
reg_def R26 ( SOC, SOE, Op_RegI, 26, x26->as_VMReg() );
reg_def R26_H ( SOC, SOE, Op_RegI, 26, x26->as_VMReg()->next());
reg_def R27 ( SOC, SOE, Op_RegI, 27, x27->as_VMReg() ); // heapbase
reg_def R27_H ( SOC, SOE, Op_RegI, 27, x27->as_VMReg()->next());
reg_def R28 ( SOC, SOC, Op_RegI, 28, x28->as_VMReg() );
reg_def R28_H ( SOC, SOC, Op_RegI, 28, x28->as_VMReg()->next());
reg_def R29 ( SOC, SOC, Op_RegI, 29, x29->as_VMReg() );
reg_def R29_H ( SOC, SOC, Op_RegI, 29, x29->as_VMReg()->next());
reg_def R30 ( SOC, SOC, Op_RegI, 30, x30->as_VMReg() );
reg_def R30_H ( SOC, SOC, Op_RegI, 30, x30->as_VMReg()->next());
reg_def R31 ( SOC, SOC, Op_RegI, 31, x31->as_VMReg() );
reg_def R31_H ( SOC, SOC, Op_RegI, 31, x31->as_VMReg()->next());
// ----------------------------
// Float/Double Registers
// ----------------------------
// Double Registers
// The rules of ADL require that double registers be defined in pairs.
// Each pair must be two 32-bit values, but not necessarily a pair of
// single float registers. In each pair, ADLC-assigned register numbers
// must be adjacent, with the lower number even. Finally, when the
// CPU stores such a register pair to memory, the word associated with
// the lower ADLC-assigned number must be stored to the lower address.
// RISCV has 32 floating-point registers. Each can store a single
// or double precision floating-point value.
// for Java use float registers f0-f31 are always save on call whereas
// the platform ABI treats f8-f9 and f18-f27 as callee save). Other
// float registers are SOC as per the platform spec
reg_def F0 ( SOC, SOC, Op_RegF, 0, f0->as_VMReg() );
reg_def F0_H ( SOC, SOC, Op_RegF, 0, f0->as_VMReg()->next() );
reg_def F1 ( SOC, SOC, Op_RegF, 1, f1->as_VMReg() );
reg_def F1_H ( SOC, SOC, Op_RegF, 1, f1->as_VMReg()->next() );
reg_def F2 ( SOC, SOC, Op_RegF, 2, f2->as_VMReg() );
reg_def F2_H ( SOC, SOC, Op_RegF, 2, f2->as_VMReg()->next() );
reg_def F3 ( SOC, SOC, Op_RegF, 3, f3->as_VMReg() );
reg_def F3_H ( SOC, SOC, Op_RegF, 3, f3->as_VMReg()->next() );
reg_def F4 ( SOC, SOC, Op_RegF, 4, f4->as_VMReg() );
reg_def F4_H ( SOC, SOC, Op_RegF, 4, f4->as_VMReg()->next() );
reg_def F5 ( SOC, SOC, Op_RegF, 5, f5->as_VMReg() );
reg_def F5_H ( SOC, SOC, Op_RegF, 5, f5->as_VMReg()->next() );
reg_def F6 ( SOC, SOC, Op_RegF, 6, f6->as_VMReg() );
reg_def F6_H ( SOC, SOC, Op_RegF, 6, f6->as_VMReg()->next() );
reg_def F7 ( SOC, SOC, Op_RegF, 7, f7->as_VMReg() );
reg_def F7_H ( SOC, SOC, Op_RegF, 7, f7->as_VMReg()->next() );
reg_def F8 ( SOC, SOE, Op_RegF, 8, f8->as_VMReg() );
reg_def F8_H ( SOC, SOE, Op_RegF, 8, f8->as_VMReg()->next() );
reg_def F9 ( SOC, SOE, Op_RegF, 9, f9->as_VMReg() );
reg_def F9_H ( SOC, SOE, Op_RegF, 9, f9->as_VMReg()->next() );
reg_def F10 ( SOC, SOC, Op_RegF, 10, f10->as_VMReg() );
reg_def F10_H ( SOC, SOC, Op_RegF, 10, f10->as_VMReg()->next() );
reg_def F11 ( SOC, SOC, Op_RegF, 11, f11->as_VMReg() );
reg_def F11_H ( SOC, SOC, Op_RegF, 11, f11->as_VMReg()->next() );
reg_def F12 ( SOC, SOC, Op_RegF, 12, f12->as_VMReg() );
reg_def F12_H ( SOC, SOC, Op_RegF, 12, f12->as_VMReg()->next() );
reg_def F13 ( SOC, SOC, Op_RegF, 13, f13->as_VMReg() );
reg_def F13_H ( SOC, SOC, Op_RegF, 13, f13->as_VMReg()->next() );
reg_def F14 ( SOC, SOC, Op_RegF, 14, f14->as_VMReg() );
reg_def F14_H ( SOC, SOC, Op_RegF, 14, f14->as_VMReg()->next() );
reg_def F15 ( SOC, SOC, Op_RegF, 15, f15->as_VMReg() );
reg_def F15_H ( SOC, SOC, Op_RegF, 15, f15->as_VMReg()->next() );
reg_def F16 ( SOC, SOC, Op_RegF, 16, f16->as_VMReg() );
reg_def F16_H ( SOC, SOC, Op_RegF, 16, f16->as_VMReg()->next() );
reg_def F17 ( SOC, SOC, Op_RegF, 17, f17->as_VMReg() );
reg_def F17_H ( SOC, SOC, Op_RegF, 17, f17->as_VMReg()->next() );
reg_def F18 ( SOC, SOE, Op_RegF, 18, f18->as_VMReg() );
reg_def F18_H ( SOC, SOE, Op_RegF, 18, f18->as_VMReg()->next() );
reg_def F19 ( SOC, SOE, Op_RegF, 19, f19->as_VMReg() );
reg_def F19_H ( SOC, SOE, Op_RegF, 19, f19->as_VMReg()->next() );
reg_def F20 ( SOC, SOE, Op_RegF, 20, f20->as_VMReg() );
reg_def F20_H ( SOC, SOE, Op_RegF, 20, f20->as_VMReg()->next() );
reg_def F21 ( SOC, SOE, Op_RegF, 21, f21->as_VMReg() );
reg_def F21_H ( SOC, SOE, Op_RegF, 21, f21->as_VMReg()->next() );
reg_def F22 ( SOC, SOE, Op_RegF, 22, f22->as_VMReg() );
reg_def F22_H ( SOC, SOE, Op_RegF, 22, f22->as_VMReg()->next() );
reg_def F23 ( SOC, SOE, Op_RegF, 23, f23->as_VMReg() );
reg_def F23_H ( SOC, SOE, Op_RegF, 23, f23->as_VMReg()->next() );
reg_def F24 ( SOC, SOE, Op_RegF, 24, f24->as_VMReg() );
reg_def F24_H ( SOC, SOE, Op_RegF, 24, f24->as_VMReg()->next() );
reg_def F25 ( SOC, SOE, Op_RegF, 25, f25->as_VMReg() );
reg_def F25_H ( SOC, SOE, Op_RegF, 25, f25->as_VMReg()->next() );
reg_def F26 ( SOC, SOE, Op_RegF, 26, f26->as_VMReg() );
reg_def F26_H ( SOC, SOE, Op_RegF, 26, f26->as_VMReg()->next() );
reg_def F27 ( SOC, SOE, Op_RegF, 27, f27->as_VMReg() );
reg_def F27_H ( SOC, SOE, Op_RegF, 27, f27->as_VMReg()->next() );
reg_def F28 ( SOC, SOC, Op_RegF, 28, f28->as_VMReg() );
reg_def F28_H ( SOC, SOC, Op_RegF, 28, f28->as_VMReg()->next() );
reg_def F29 ( SOC, SOC, Op_RegF, 29, f29->as_VMReg() );
reg_def F29_H ( SOC, SOC, Op_RegF, 29, f29->as_VMReg()->next() );
reg_def F30 ( SOC, SOC, Op_RegF, 30, f30->as_VMReg() );
reg_def F30_H ( SOC, SOC, Op_RegF, 30, f30->as_VMReg()->next() );
reg_def F31 ( SOC, SOC, Op_RegF, 31, f31->as_VMReg() );
reg_def F31_H ( SOC, SOC, Op_RegF, 31, f31->as_VMReg()->next() );
// ----------------------------
// Vector Registers
// ----------------------------
// For RVV vector registers, we simply extend vector register size to 4
// 'logical' slots. This is nominally 128 bits but it actually covers
// all possible 'physical' RVV vector register lengths from 128 ~ 1024
// bits. The 'physical' RVV vector register length is detected during
// startup, so the register allocator is able to identify the correct
// number of bytes needed for an RVV spill/unspill.
reg_def V0 ( SOC, SOC, Op_VecA, 0, v0->as_VMReg() );
reg_def V0_H ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next() );
reg_def V0_J ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next(2) );
reg_def V0_K ( SOC, SOC, Op_VecA, 0, v0->as_VMReg()->next(3) );
reg_def V1 ( SOC, SOC, Op_VecA, 1, v1->as_VMReg() );
reg_def V1_H ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next() );
reg_def V1_J ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next(2) );
reg_def V1_K ( SOC, SOC, Op_VecA, 1, v1->as_VMReg()->next(3) );
reg_def V2 ( SOC, SOC, Op_VecA, 2, v2->as_VMReg() );
reg_def V2_H ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next() );
reg_def V2_J ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next(2) );
reg_def V2_K ( SOC, SOC, Op_VecA, 2, v2->as_VMReg()->next(3) );
reg_def V3 ( SOC, SOC, Op_VecA, 3, v3->as_VMReg() );
reg_def V3_H ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next() );
reg_def V3_J ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next(2) );
reg_def V3_K ( SOC, SOC, Op_VecA, 3, v3->as_VMReg()->next(3) );
reg_def V4 ( SOC, SOC, Op_VecA, 4, v4->as_VMReg() );
reg_def V4_H ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next() );
reg_def V4_J ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next(2) );
reg_def V4_K ( SOC, SOC, Op_VecA, 4, v4->as_VMReg()->next(3) );
reg_def V5 ( SOC, SOC, Op_VecA, 5, v5->as_VMReg() );
reg_def V5_H ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next() );
reg_def V5_J ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next(2) );
reg_def V5_K ( SOC, SOC, Op_VecA, 5, v5->as_VMReg()->next(3) );
reg_def V6 ( SOC, SOC, Op_VecA, 6, v6->as_VMReg() );
reg_def V6_H ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next() );
reg_def V6_J ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next(2) );
reg_def V6_K ( SOC, SOC, Op_VecA, 6, v6->as_VMReg()->next(3) );
reg_def V7 ( SOC, SOC, Op_VecA, 7, v7->as_VMReg() );
reg_def V7_H ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next() );
reg_def V7_J ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next(2) );
reg_def V7_K ( SOC, SOC, Op_VecA, 7, v7->as_VMReg()->next(3) );
reg_def V8 ( SOC, SOC, Op_VecA, 8, v8->as_VMReg() );
reg_def V8_H ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next() );
reg_def V8_J ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next(2) );
reg_def V8_K ( SOC, SOC, Op_VecA, 8, v8->as_VMReg()->next(3) );
reg_def V9 ( SOC, SOC, Op_VecA, 9, v9->as_VMReg() );
reg_def V9_H ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next() );
reg_def V9_J ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next(2) );
reg_def V9_K ( SOC, SOC, Op_VecA, 9, v9->as_VMReg()->next(3) );
reg_def V10 ( SOC, SOC, Op_VecA, 10, v10->as_VMReg() );
reg_def V10_H ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next() );
reg_def V10_J ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(2) );
reg_def V10_K ( SOC, SOC, Op_VecA, 10, v10->as_VMReg()->next(3) );
reg_def V11 ( SOC, SOC, Op_VecA, 11, v11->as_VMReg() );
reg_def V11_H ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next() );
reg_def V11_J ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(2) );
reg_def V11_K ( SOC, SOC, Op_VecA, 11, v11->as_VMReg()->next(3) );
reg_def V12 ( SOC, SOC, Op_VecA, 12, v12->as_VMReg() );
reg_def V12_H ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next() );
reg_def V12_J ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(2) );
reg_def V12_K ( SOC, SOC, Op_VecA, 12, v12->as_VMReg()->next(3) );
reg_def V13 ( SOC, SOC, Op_VecA, 13, v13->as_VMReg() );
reg_def V13_H ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next() );
reg_def V13_J ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(2) );
reg_def V13_K ( SOC, SOC, Op_VecA, 13, v13->as_VMReg()->next(3) );
reg_def V14 ( SOC, SOC, Op_VecA, 14, v14->as_VMReg() );
reg_def V14_H ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next() );
reg_def V14_J ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(2) );
reg_def V14_K ( SOC, SOC, Op_VecA, 14, v14->as_VMReg()->next(3) );
reg_def V15 ( SOC, SOC, Op_VecA, 15, v15->as_VMReg() );
reg_def V15_H ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next() );
reg_def V15_J ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(2) );
reg_def V15_K ( SOC, SOC, Op_VecA, 15, v15->as_VMReg()->next(3) );
reg_def V16 ( SOC, SOC, Op_VecA, 16, v16->as_VMReg() );
reg_def V16_H ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next() );
reg_def V16_J ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(2) );
reg_def V16_K ( SOC, SOC, Op_VecA, 16, v16->as_VMReg()->next(3) );
reg_def V17 ( SOC, SOC, Op_VecA, 17, v17->as_VMReg() );
reg_def V17_H ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next() );
reg_def V17_J ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(2) );
reg_def V17_K ( SOC, SOC, Op_VecA, 17, v17->as_VMReg()->next(3) );
reg_def V18 ( SOC, SOC, Op_VecA, 18, v18->as_VMReg() );
reg_def V18_H ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next() );
reg_def V18_J ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(2) );
reg_def V18_K ( SOC, SOC, Op_VecA, 18, v18->as_VMReg()->next(3) );
reg_def V19 ( SOC, SOC, Op_VecA, 19, v19->as_VMReg() );
reg_def V19_H ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next() );
reg_def V19_J ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(2) );
reg_def V19_K ( SOC, SOC, Op_VecA, 19, v19->as_VMReg()->next(3) );
reg_def V20 ( SOC, SOC, Op_VecA, 20, v20->as_VMReg() );
reg_def V20_H ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next() );
reg_def V20_J ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(2) );
reg_def V20_K ( SOC, SOC, Op_VecA, 20, v20->as_VMReg()->next(3) );
reg_def V21 ( SOC, SOC, Op_VecA, 21, v21->as_VMReg() );
reg_def V21_H ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next() );
reg_def V21_J ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(2) );
reg_def V21_K ( SOC, SOC, Op_VecA, 21, v21->as_VMReg()->next(3) );
reg_def V22 ( SOC, SOC, Op_VecA, 22, v22->as_VMReg() );
reg_def V22_H ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next() );
reg_def V22_J ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(2) );
reg_def V22_K ( SOC, SOC, Op_VecA, 22, v22->as_VMReg()->next(3) );
reg_def V23 ( SOC, SOC, Op_VecA, 23, v23->as_VMReg() );
reg_def V23_H ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next() );
reg_def V23_J ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(2) );
reg_def V23_K ( SOC, SOC, Op_VecA, 23, v23->as_VMReg()->next(3) );
reg_def V24 ( SOC, SOC, Op_VecA, 24, v24->as_VMReg() );
reg_def V24_H ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next() );
reg_def V24_J ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(2) );
reg_def V24_K ( SOC, SOC, Op_VecA, 24, v24->as_VMReg()->next(3) );
reg_def V25 ( SOC, SOC, Op_VecA, 25, v25->as_VMReg() );
reg_def V25_H ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next() );
reg_def V25_J ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(2) );
reg_def V25_K ( SOC, SOC, Op_VecA, 25, v25->as_VMReg()->next(3) );
reg_def V26 ( SOC, SOC, Op_VecA, 26, v26->as_VMReg() );
reg_def V26_H ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next() );
reg_def V26_J ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(2) );
reg_def V26_K ( SOC, SOC, Op_VecA, 26, v26->as_VMReg()->next(3) );
reg_def V27 ( SOC, SOC, Op_VecA, 27, v27->as_VMReg() );
reg_def V27_H ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next() );
reg_def V27_J ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(2) );
reg_def V27_K ( SOC, SOC, Op_VecA, 27, v27->as_VMReg()->next(3) );
reg_def V28 ( SOC, SOC, Op_VecA, 28, v28->as_VMReg() );
reg_def V28_H ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next() );
reg_def V28_J ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(2) );
reg_def V28_K ( SOC, SOC, Op_VecA, 28, v28->as_VMReg()->next(3) );
reg_def V29 ( SOC, SOC, Op_VecA, 29, v29->as_VMReg() );
reg_def V29_H ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next() );
reg_def V29_J ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(2) );
reg_def V29_K ( SOC, SOC, Op_VecA, 29, v29->as_VMReg()->next(3) );
reg_def V30 ( SOC, SOC, Op_VecA, 30, v30->as_VMReg() );
reg_def V30_H ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next() );
reg_def V30_J ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(2) );
reg_def V30_K ( SOC, SOC, Op_VecA, 30, v30->as_VMReg()->next(3) );
reg_def V31 ( SOC, SOC, Op_VecA, 31, v31->as_VMReg() );
reg_def V31_H ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next() );
reg_def V31_J ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(2) );
reg_def V31_K ( SOC, SOC, Op_VecA, 31, v31->as_VMReg()->next(3) );
// ----------------------------
// Special Registers
// ----------------------------
// On riscv, the physical flag register is missing, so we use t1 instead,
// to bridge the RegFlag semantics in share/opto
reg_def RFLAGS (SOC, SOC, Op_RegFlags, 6, x6->as_VMReg() );
// Specify priority of register selection within phases of register
// allocation. Highest priority is first. A useful heuristic is to
// give registers a low priority when they are required by machine
// instructions, like EAX and EDX on I486, and choose no-save registers
// before save-on-call, & save-on-call before save-on-entry. Registers
// which participate in fixed calling sequences should come last.
// Registers which are used as pairs must fall on an even boundary.
alloc_class chunk0(
// volatiles
R7, R7_H,
R28, R28_H,
R29, R29_H,
R30, R30_H,
R31, R31_H,
// arg registers
R10, R10_H,
R11, R11_H,
R12, R12_H,
R13, R13_H,
R14, R14_H,
R15, R15_H,
R16, R16_H,
R17, R17_H,
// non-volatiles
R9, R9_H,
R18, R18_H,
R19, R19_H,
R20, R20_H,
R21, R21_H,
R22, R22_H,
R24, R24_H,
R25, R25_H,
R26, R26_H,
// non-allocatable registers
R23, R23_H, // java thread
R27, R27_H, // heapbase
R4, R4_H, // thread
R8, R8_H, // fp
R0, R0_H, // zero
R1, R1_H, // ra
R2, R2_H, // sp
R3, R3_H, // gp
);
alloc_class chunk1(
// no save
F0, F0_H,
F1, F1_H,
F2, F2_H,
F3, F3_H,
F4, F4_H,
F5, F5_H,
F6, F6_H,
F7, F7_H,
F28, F28_H,
F29, F29_H,
F30, F30_H,
F31, F31_H,
// arg registers
F10, F10_H,
F11, F11_H,
F12, F12_H,
F13, F13_H,
F14, F14_H,
F15, F15_H,
F16, F16_H,
F17, F17_H,
// non-volatiles
F8, F8_H,
F9, F9_H,
F18, F18_H,
F19, F19_H,
F20, F20_H,
F21, F21_H,
F22, F22_H,
F23, F23_H,
F24, F24_H,
F25, F25_H,
F26, F26_H,
F27, F27_H,
);
alloc_class chunk2(
V0, V0_H, V0_J, V0_K,
V1, V1_H, V1_J, V1_K,
V2, V2_H, V2_J, V2_K,
V3, V3_H, V3_J, V3_K,
V4, V4_H, V4_J, V4_K,
V5, V5_H, V5_J, V5_K,
V6, V6_H, V6_J, V6_K,
V7, V7_H, V7_J, V7_K,
V8, V8_H, V8_J, V8_K,
V9, V9_H, V9_J, V9_K,
V10, V10_H, V10_J, V10_K,
V11, V11_H, V11_J, V11_K,
V12, V12_H, V12_J, V12_K,
V13, V13_H, V13_J, V13_K,
V14, V14_H, V14_J, V14_K,
V15, V15_H, V15_J, V15_K,
V16, V16_H, V16_J, V16_K,
V17, V17_H, V17_J, V17_K,
V18, V18_H, V18_J, V18_K,
V19, V19_H, V19_J, V19_K,
V20, V20_H, V20_J, V20_K,
V21, V21_H, V21_J, V21_K,
V22, V22_H, V22_J, V22_K,
V23, V23_H, V23_J, V23_K,
V24, V24_H, V24_J, V24_K,
V25, V25_H, V25_J, V25_K,
V26, V26_H, V26_J, V26_K,
V27, V27_H, V27_J, V27_K,
V28, V28_H, V28_J, V28_K,
V29, V29_H, V29_J, V29_K,
V30, V30_H, V30_J, V30_K,
V31, V31_H, V31_J, V31_K,
);
alloc_class chunk3(RFLAGS);
//----------Architecture Description Register Classes--------------------------
// Several register classes are automatically defined based upon information in
// this architecture description.
// 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
// 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
//
// Class for all 32 bit general purpose registers
reg_class all_reg32(
R0,
R1,
R2,
R3,
R4,
R7,
R8,
R9,
R10,
R11,
R12,
R13,
R14,
R15,
R16,
R17,
R18,
R19,
R20,
R21,
R22,
R23,
R24,
R25,
R26,
R27,
R28,
R29,
R30,
R31
);
// Class for any 32 bit integer registers (excluding zr)
reg_class any_reg32 %{
return _ANY_REG32_mask;
%}
// Singleton class for R10 int register
reg_class int_r10_reg(R10);
// Singleton class for R12 int register
reg_class int_r12_reg(R12);
// Singleton class for R13 int register
reg_class int_r13_reg(R13);
// Singleton class for R14 int register
reg_class int_r14_reg(R14);
// Class for all long integer registers
reg_class all_reg(
R0, R0_H,
R1, R1_H,
R2, R2_H,
R3, R3_H,
R4, R4_H,
R7, R7_H,
R8, R8_H,
R9, R9_H,
R10, R10_H,
R11, R11_H,
R12, R12_H,
R13, R13_H,
R14, R14_H,
R15, R15_H,
R16, R16_H,
R17, R17_H,
R18, R18_H,
R19, R19_H,
R20, R20_H,
R21, R21_H,
R22, R22_H,
R23, R23_H,
R24, R24_H,
R25, R25_H,
R26, R26_H,
R27, R27_H,
R28, R28_H,
R29, R29_H,
R30, R30_H,
R31, R31_H
);
// Class for all long integer registers (excluding zr)
reg_class any_reg %{
return _ANY_REG_mask;
%}
// Class for non-allocatable 32 bit registers
reg_class non_allocatable_reg32(
R0, // zr
R1, // ra
R2, // sp
R3, // gp
R4, // tp
R23 // java thread
);
// Class for non-allocatable 64 bit registers
reg_class non_allocatable_reg(
R0, R0_H, // zr
R1, R1_H, // ra
R2, R2_H, // sp
R3, R3_H, // gp
R4, R4_H, // tp
R23, R23_H // java thread
);
reg_class no_special_reg32 %{
return _NO_SPECIAL_REG32_mask;
%}
reg_class no_special_reg %{
return _NO_SPECIAL_REG_mask;
%}
reg_class ptr_reg %{
return _PTR_REG_mask;
%}
reg_class no_special_ptr_reg %{
return _NO_SPECIAL_PTR_REG_mask;
%}
// Class for 64 bit register r10
reg_class r10_reg(
R10, R10_H
);
// Class for 64 bit register r11
reg_class r11_reg(
R11, R11_H
);
// Class for 64 bit register r12
reg_class r12_reg(
R12, R12_H
);
// Class for 64 bit register r13
reg_class r13_reg(
R13, R13_H
);
// Class for 64 bit register r14
reg_class r14_reg(
R14, R14_H
);
// Class for 64 bit register r15
reg_class r15_reg(
R15, R15_H
);
// Class for 64 bit register r16
reg_class r16_reg(
R16, R16_H
);
// Class for method register
reg_class method_reg(
R31, R31_H
);
// Class for heapbase register
reg_class heapbase_reg(
R27, R27_H
);
// Class for java thread register
reg_class java_thread_reg(
R23, R23_H
);
reg_class r28_reg(
R28, R28_H
);
reg_class r29_reg(
R29, R29_H
);
reg_class r30_reg(
R30, R30_H
);
reg_class r31_reg(
R31, R31_H
);
// Class for zero registesr
reg_class zr_reg(
R0, R0_H
);
// Class for thread register
reg_class thread_reg(
R4, R4_H
);
// Class for frame pointer register
reg_class fp_reg(
R8, R8_H
);
// Class for link register
reg_class ra_reg(
R1, R1_H
);
// Class for long sp register
reg_class sp_reg(
R2, R2_H
);
// Class for all float registers
reg_class float_reg(
F0,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
F16,
F17,
F18,
F19,
F20,
F21,
F22,
F23,
F24,
F25,
F26,
F27,
F28,
F29,
F30,
F31
);
// Double precision float registers have virtual `high halves' that
// are needed by the allocator.
// Class for all double registers
reg_class double_reg(
F0, F0_H,
F1, F1_H,
F2, F2_H,
F3, F3_H,
F4, F4_H,
F5, F5_H,
F6, F6_H,
F7, F7_H,
F8, F8_H,
F9, F9_H,
F10, F10_H,
F11, F11_H,
F12, F12_H,
F13, F13_H,
F14, F14_H,
F15, F15_H,
F16, F16_H,
F17, F17_H,
F18, F18_H,
F19, F19_H,
F20, F20_H,
F21, F21_H,
F22, F22_H,
F23, F23_H,
F24, F24_H,
F25, F25_H,
F26, F26_H,
F27, F27_H,
F28, F28_H,
F29, F29_H,
F30, F30_H,
F31, F31_H
);
// Class for all RVV vector registers
reg_class vectora_reg(
V1, V1_H, V1_J, V1_K,
V2, V2_H, V2_J, V2_K,
V3, V3_H, V3_J, V3_K,
V4, V4_H, V4_J, V4_K,
V5, V5_H, V5_J, V5_K,
V6, V6_H, V6_J, V6_K,
V7, V7_H, V7_J, V7_K,
V8, V8_H, V8_J, V8_K,
V9, V9_H, V9_J, V9_K,
V10, V10_H, V10_J, V10_K,
V11, V11_H, V11_J, V11_K,
V12, V12_H, V12_J, V12_K,
V13, V13_H, V13_J, V13_K,
V14, V14_H, V14_J, V14_K,
V15, V15_H, V15_J, V15_K,
V16, V16_H, V16_J, V16_K,
V17, V17_H, V17_J, V17_K,
V18, V18_H, V18_J, V18_K,
V19, V19_H, V19_J, V19_K,
V20, V20_H, V20_J, V20_K,
V21, V21_H, V21_J, V21_K,
V22, V22_H, V22_J, V22_K,
V23, V23_H, V23_J, V23_K,
V24, V24_H, V24_J, V24_K,
V25, V25_H, V25_J, V25_K,
V26, V26_H, V26_J, V26_K,
V27, V27_H, V27_J, V27_K,
V28, V28_H, V28_J, V28_K,
V29, V29_H, V29_J, V29_K,
V30, V30_H, V30_J, V30_K,
V31, V31_H, V31_J, V31_K
);
// Class for 64 bit register f0
reg_class f0_reg(
F0, F0_H
);
// Class for 64 bit register f1
reg_class f1_reg(
F1, F1_H
);
// Class for 64 bit register f2
reg_class f2_reg(
F2, F2_H
);
// Class for 64 bit register f3
reg_class f3_reg(
F3, F3_H
);
// class for vector register v1
reg_class v1_reg(
V1, V1_H, V1_J, V1_K
);
// class for vector register v2
reg_class v2_reg(
V2, V2_H, V2_J, V2_K
);
// class for vector register v3
reg_class v3_reg(
V3, V3_H, V3_J, V3_K
);
// class for vector register v4
reg_class v4_reg(
V4, V4_H, V4_J, V4_K
);
// class for vector register v5
reg_class v5_reg(
V5, V5_H, V5_J, V5_K
);
// class for condition codes
reg_class reg_flags(RFLAGS);
%}
//----------DEFINITION BLOCK---------------------------------------------------
// Define name --> value mappings to inform the ADLC of an integer valued name
// Current support includes integer values in the range [0, 0x7FFFFFFF]
// Format:
// int_def <name> ( <int_value>, <expression>);
// Generated Code in ad_<arch>.hpp
// #define <name> (<expression>)
// // value == <int_value>
// Generated code in ad_<arch>.cpp adlc_verification()
// assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
//
// we follow the ppc-aix port in using a simple cost model which ranks
// register operations as cheap, memory ops as more expensive and
// branches as most expensive. the first two have a low as well as a
// normal cost. huge cost appears to be a way of saying don't do
// something
definitions %{
// The default cost (of a register move instruction).
int_def DEFAULT_COST ( 100, 100);
int_def ALU_COST ( 100, 1 * DEFAULT_COST); // unknown, const, arith, shift, slt,
// multi, auipc, nop, logical, move
int_def LOAD_COST ( 300, 3 * DEFAULT_COST); // load, fpload
int_def STORE_COST ( 100, 1 * DEFAULT_COST); // store, fpstore
int_def XFER_COST ( 300, 3 * DEFAULT_COST); // mfc, mtc, fcvt, fmove, fcmp
int_def BRANCH_COST ( 100, 1 * DEFAULT_COST); // branch, jmp, call
int_def IMUL_COST ( 1000, 10 * DEFAULT_COST); // imul
int_def IDIVSI_COST ( 3400, 34 * DEFAULT_COST); // idivdi
int_def IDIVDI_COST ( 6600, 66 * DEFAULT_COST); // idivsi
int_def FMUL_SINGLE_COST ( 500, 5 * DEFAULT_COST); // fadd, fmul, fmadd
int_def FMUL_DOUBLE_COST ( 700, 7 * DEFAULT_COST); // fadd, fmul, fmadd
int_def FDIV_COST ( 2000, 20 * DEFAULT_COST); // fdiv
int_def FSQRT_COST ( 2500, 25 * DEFAULT_COST); // fsqrt
int_def VOLATILE_REF_COST ( 1000, 10 * DEFAULT_COST);
%}
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
source_hpp %{
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "opto/addnode.hpp"
#include "opto/convertnode.hpp"
#include "runtime/objectMonitor.hpp"
extern RegMask _ANY_REG32_mask;
extern RegMask _ANY_REG_mask;
extern RegMask _PTR_REG_mask;
extern RegMask _NO_SPECIAL_REG32_mask;
extern RegMask _NO_SPECIAL_REG_mask;
extern RegMask _NO_SPECIAL_PTR_REG_mask;
class CallStubImpl {
//--------------------------------------------------------------
//---< Used for optimization in Compile::shorten_branches >---
//--------------------------------------------------------------
public:
// Size of call trampoline stub.
static uint size_call_trampoline() {
return 0; // no call trampolines on this platform
}
// number of relocations needed by a call trampoline stub
static uint reloc_call_trampoline() {
return 0; // no call trampolines on this platform
}
};
class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static uint size_exception_handler() {
return MacroAssembler::far_branch_size();
}
static uint size_deopt_handler() {
// count auipc + far branch
return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
}
};
class Node::PD {
public:
enum NodeFlags {
_last_flag = Node::_last_flag
};
};
bool is_CAS(int opcode, bool maybe_volatile);
// predicate controlling translation of CompareAndSwapX
bool needs_acquiring_load_reserved(const Node *load);
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
%}
source %{
// Derived RegMask with conditionally allocatable registers
RegMask _ANY_REG32_mask;
RegMask _ANY_REG_mask;
RegMask _PTR_REG_mask;
RegMask _NO_SPECIAL_REG32_mask;
RegMask _NO_SPECIAL_REG_mask;
RegMask _NO_SPECIAL_PTR_REG_mask;
void reg_mask_init() {
_ANY_REG32_mask = _ALL_REG32_mask;
_ANY_REG32_mask.Remove(OptoReg::as_OptoReg(x0->as_VMReg()));
_ANY_REG_mask = _ALL_REG_mask;
_ANY_REG_mask.SUBTRACT(_ZR_REG_mask);
_PTR_REG_mask = _ALL_REG_mask;
_PTR_REG_mask.SUBTRACT(_ZR_REG_mask);
_NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
_NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
_NO_SPECIAL_REG_mask = _ALL_REG_mask;
_NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
_NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
// x27 is not allocatable when compressed oops is on
if (UseCompressedOops) {
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
_NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
}
// x8 is not allocatable when PreserveFramePointer is on
if (PreserveFramePointer) {
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
_NO_SPECIAL_REG_mask.SUBTRACT(_FP_REG_mask);
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_FP_REG_mask);
}
}
void PhaseOutput::pd_perform_mach_node_analysis() {
}
int MachNode::pd_alignment_required() const {
return 1;
}
int MachNode::compute_padding(int current_offset) const {
return 0;
}
// is_CAS(int opcode, bool maybe_volatile)
//
// return true if opcode is one of the possible CompareAndSwapX
// values otherwise false.
bool is_CAS(int opcode, bool maybe_volatile)
{
switch (opcode) {
// We handle these
case Op_CompareAndSwapI:
case Op_CompareAndSwapL:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
case Op_ShenandoahCompareAndSwapP:
case Op_ShenandoahCompareAndSwapN:
case Op_CompareAndSwapB:
case Op_CompareAndSwapS:
case Op_GetAndSetI:
case Op_GetAndSetL:
case Op_GetAndSetP:
case Op_GetAndSetN:
case Op_GetAndAddI:
case Op_GetAndAddL:
return true;
case Op_CompareAndExchangeI:
case Op_CompareAndExchangeN:
case Op_CompareAndExchangeB:
case Op_CompareAndExchangeS:
case Op_CompareAndExchangeL:
case Op_CompareAndExchangeP:
case Op_WeakCompareAndSwapB:
case Op_WeakCompareAndSwapS:
case Op_WeakCompareAndSwapI:
case Op_WeakCompareAndSwapL:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_ShenandoahWeakCompareAndSwapP:
case Op_ShenandoahWeakCompareAndSwapN:
case Op_ShenandoahCompareAndExchangeP:
case Op_ShenandoahCompareAndExchangeN:
return maybe_volatile;
default:
return false;
}
}
// predicate controlling translation of CAS
//
// returns true if CAS needs to use an acquiring load otherwise false
bool needs_acquiring_load_reserved(const Node *n)
{
assert(n != NULL && is_CAS(n->Opcode(), true), "expecting a compare and swap");
LoadStoreNode* ldst = n->as_LoadStore();
if (n != NULL && is_CAS(n->Opcode(), false)) {
assert(ldst != NULL && ldst->trailing_membar() != NULL, "expected trailing membar");
} else {
return ldst != NULL && ldst->trailing_membar() != NULL;
}
// so we can just return true here
return true;
}
#define __ _masm.
// advance declarations for helper functions to convert register
// indices to register objects
// the ad file has to provide implementations of certain methods
// expected by the generic code
//
// REQUIRED FUNCTIONALITY
//=============================================================================
// !!!!! Special hack to get all types of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset()
{
// jal
return 1 * NativeInstruction::instruction_size;
}
int MachCallDynamicJavaNode::ret_addr_offset()
{
return 7 * NativeInstruction::instruction_size; // movptr, jal
}
int MachCallRuntimeNode::ret_addr_offset() {
// for generated stubs the call will be
// jal(addr)
// or with far branches
// jal(trampoline_stub)
// for real runtime callouts it will be 11 instructions
// see riscv_enc_java_to_runtime
// la(t1, retaddr) -> auipc + addi
// la(t0, RuntimeAddress(addr)) -> lui + addi + slli + addi + slli + addi
// addi(sp, sp, -2 * wordSize) -> addi
// sd(t1, Address(sp, wordSize)) -> sd
// jalr(t0) -> jalr
CodeBlob *cb = CodeCache::find_blob(_entry_point);
if (cb != NULL) {
return 1 * NativeInstruction::instruction_size;
} else {
return 11 * NativeInstruction::instruction_size;
}
}
//
// Compute padding required for nodes which need alignment
//
// With RVC a call instruction may get 2-byte aligned.
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const
{
// to make sure the address of jal 4-byte aligned.
return align_up(current_offset, alignment_required()) - current_offset;
}
// With RVC a call instruction may get 2-byte aligned.
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
{
// skip the movptr in MacroAssembler::ic_call():
// lui + addi + slli + addi + slli + addi
// Though movptr() has already 4-byte aligned with or without RVC,
// We need to prevent from further changes by explicitly calculating the size.
const int movptr_size = 6 * NativeInstruction::instruction_size;
current_offset += movptr_size;
// to make sure the address of jal 4-byte aligned.
return align_up(current_offset, alignment_required()) - current_offset;
}
//=============================================================================
#ifndef PRODUCT
void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
assert_cond(st != NULL);
st->print("BREAKPOINT");
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
__ ebreak();
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
//=============================================================================
#ifndef PRODUCT
void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
st->print("nop \t# %d bytes pad for loops and calls", _count);
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
C2_MacroAssembler _masm(&cbuf);
Assembler::CompressibleRegion cr(&_masm); // nops shall be 2-byte under RVC for alignment purposes.
for (int i = 0; i < _count; i++) {
__ nop();
}
}
uint MachNopNode::size(PhaseRegAlloc*) const {
return _count * (UseRVC ? NativeInstruction::compressed_instruction_size : NativeInstruction::instruction_size);
}
//=============================================================================
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
int ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
}
bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
ShouldNotReachHere();
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
// Empty encoding
}
uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
return 0;
}
#ifndef PRODUCT
void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
assert_cond(st != NULL);
st->print("-- \t// MachConstantBaseNode (empty encoding)");
}
#endif
#ifndef PRODUCT
void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
assert_cond(st != NULL && ra_ != NULL);
Compile* C = ra_->C;
int framesize = C->output()->frame_slots() << LogBytesPerInt;
if (C->output()->need_stack_bang(framesize)) {
st->print("# stack bang size=%d\n\t", framesize);
}
st->print("sd fp, [sp, #%d]\n\t", - 2 * wordSize);
st->print("sd ra, [sp, #%d]\n\t", - wordSize);
if (PreserveFramePointer) { st->print("sub fp, sp, #%d\n\t", 2 * wordSize); }
st->print("sub sp, sp, #%d\n\t", framesize);
if (C->stub_function() == NULL && BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
st->print("ld t0, [guard]\n\t");
st->print("membar LoadLoad\n\t");
st->print("ld t1, [xthread, #thread_disarmed_offset]\n\t");
st->print("beq t0, t1, skip\n\t");
st->print("jalr #nmethod_entry_barrier_stub\n\t");
st->print("j skip\n\t");
st->print("guard: int\n\t");
st->print("skip:\n\t");
}
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
assert_cond(ra_ != NULL);
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
// n.b. frame size includes space for return pc and fp
const int framesize = C->output()->frame_size_in_bytes();
// insert a nop at the start of the prolog so we can patch in a
// branch if we need to invalidate the method later
{
Assembler::IncompressibleRegion ir(&_masm); // keep the nop as 4 bytes for patching.
MacroAssembler::assert_alignment(__ pc());
__ nop(); // 4 bytes
}
assert_cond(C != NULL);
if (C->clinit_barrier_on_entry()) {
assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
Label L_skip_barrier;
__ mov_metadata(t1, C->method()->holder()->constant_encoding());
__ clinit_barrier(t1, t0, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
}
int bangsize = C->output()->bang_size_in_bytes();
if (C->output()->need_stack_bang(bangsize)) {
__ generate_stack_overflow_check(bangsize);
}
__ build_frame(framesize);
if (C->stub_function() == NULL) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) {
// Dummy labels for just measuring the code size
Label dummy_slow_path;
Label dummy_continuation;
Label dummy_guard;
Label* slow_path = &dummy_slow_path;
Label* continuation = &dummy_continuation;
Label* guard = &dummy_guard;
if (!Compile::current()->output()->in_scratch_emit_size()) {
// Use real labels from actual stub when not emitting code for purpose of measuring its size
C2EntryBarrierStub* stub = Compile::current()->output()->entry_barrier_table()->add_entry_barrier();
slow_path = &stub->slow_path();
continuation = &stub->continuation();
guard = &stub->guard();
}
// In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
}
}
if (VerifyStackAtCalls) {
Unimplemented();
}
C->output()->set_frame_complete(cbuf.insts_size());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
// emitted before MachConstantBaseNode.
ConstantTable& constant_table = C->output()->constant_table();
constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
}
}
uint MachPrologNode::size(PhaseRegAlloc* ra_) const
{
assert_cond(ra_ != NULL);
return MachNode::size(ra_); // too many variables; just compute it
// the hard way
}
int MachPrologNode::reloc() const
{
return 0;
}
//=============================================================================
#ifndef PRODUCT
void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
assert_cond(st != NULL && ra_ != NULL);
Compile* C = ra_->C;
assert_cond(C != NULL);
int framesize = C->output()->frame_size_in_bytes();
st->print("# pop frame %d\n\t", framesize);
if (framesize == 0) {
st->print("ld ra, [sp,#%d]\n\t", (2 * wordSize));
st->print("ld fp, [sp,#%d]\n\t", (3 * wordSize));
st->print("add sp, sp, #%d\n\t", (2 * wordSize));
} else {
st->print("add sp, sp, #%d\n\t", framesize);
st->print("ld ra, [sp,#%d]\n\t", - 2 * wordSize);
st->print("ld fp, [sp,#%d]\n\t", - wordSize);
}
if (do_polling() && C->is_method_compilation()) {
st->print("# test polling word\n\t");
st->print("ld t0, [xthread,#%d]\n\t", in_bytes(JavaThread::polling_word_offset()));
st->print("bgtu sp, t0, #slow_path");
}
}
#endif
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
assert_cond(ra_ != NULL);
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
assert_cond(C != NULL);
int framesize = C->output()->frame_size_in_bytes();
__ remove_frame(framesize);
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check();
}
if (do_polling() && C->is_method_compilation()) {
Label dummy_label;
Label* code_stub = &dummy_label;
if (!C->output()->in_scratch_emit_size()) {
code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());
}
__ relocate(relocInfo::poll_return_type);
__ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
}
}
uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
assert_cond(ra_ != NULL);
// Variable size. Determine dynamically.
return MachNode::size(ra_);
}
int MachEpilogNode::reloc() const {
// Return number of relocatable values contained in this instruction.
return 1; // 1 for polling page.
}
const Pipeline * MachEpilogNode::pipeline() const {
return MachNode::pipeline_class();
}
//=============================================================================
// Figure out which register class each belongs in: rc_int, rc_float or
// rc_stack.
enum RC { rc_bad, rc_int, rc_float, rc_vector, rc_stack };
static enum RC rc_class(OptoReg::Name reg) {
if (reg == OptoReg::Bad) {
return rc_bad;
}
// we have 30 int registers * 2 halves
// (t0 and t1 are omitted)
int slots_of_int_registers = Register::max_slots_per_register * (Register::number_of_registers - 2);
if (reg < slots_of_int_registers) {
return rc_int;
}
// we have 32 float register * 2 halves
int slots_of_float_registers = FloatRegister::max_slots_per_register * FloatRegister::number_of_registers;
if (reg < slots_of_int_registers + slots_of_float_registers) {
return rc_float;
}
// we have 32 vector register * 4 halves
int slots_of_vector_registers = VectorRegister::max_slots_per_register * VectorRegister::number_of_registers;
if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_vector_registers) {
return rc_vector;
}
// Between vector regs & stack is the flags regs.
assert(OptoReg::is_stack(reg), "blow up if spilling flags");
return rc_stack;
}
uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
assert_cond(ra_ != NULL);
Compile* C = ra_->C;
// Get registers to move.
OptoReg::Name src_hi = ra_->get_reg_second(in(1));
OptoReg::Name src_lo = ra_->get_reg_first(in(1));
OptoReg::Name dst_hi = ra_->get_reg_second(this);
OptoReg::Name dst_lo = ra_->get_reg_first(this);
enum RC src_hi_rc = rc_class(src_hi);
enum RC src_lo_rc = rc_class(src_lo);
enum RC dst_hi_rc = rc_class(dst_hi);
enum RC dst_lo_rc = rc_class(dst_lo);
assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
if (src_hi != OptoReg::Bad) {
assert((src_lo & 1) == 0 && src_lo + 1 == src_hi &&
(dst_lo & 1) == 0 && dst_lo + 1 == dst_hi,
"expected aligned-adjacent pairs");
}
if (src_lo == dst_lo && src_hi == dst_hi) {
return 0; // Self copy, no move.
}
bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
(dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
if (bottom_type()->isa_vect() != NULL) {
uint ireg = ideal_reg();
if (ireg == Op_VecA && cbuf) {
C2_MacroAssembler _masm(cbuf);
int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack to stack
__ spill_copy_vector_stack_to_stack(src_offset, dst_offset,
vector_reg_size_in_bytes);
} else if (src_lo_rc == rc_vector && dst_lo_rc == rc_stack) {
// vpr to stack
__ spill(as_VectorRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo));
} else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vector) {
// stack to vpr
__ unspill(as_VectorRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo));
} else if (src_lo_rc == rc_vector && dst_lo_rc == rc_vector) {
// vpr to vpr
__ vmv1r_v(as_VectorRegister(Matcher::_regEncode[dst_lo]), as_VectorRegister(Matcher::_regEncode[src_lo]));
} else {
ShouldNotReachHere();
}
}
} else if (cbuf != NULL) {
C2_MacroAssembler _masm(cbuf);
switch (src_lo_rc) {
case rc_int:
if (dst_lo_rc == rc_int) { // gpr --> gpr copy
if (!is64 && this->ideal_reg() != Op_RegI) { // zero extended for narrow oop or klass
__ zero_extend(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]), 32);
} else {
__ mv(as_Register(Matcher::_regEncode[dst_lo]), as_Register(Matcher::_regEncode[src_lo]));
}
} else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
if (is64) {
__ fmv_d_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
} else {
__ fmv_w_x(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
}
} else { // gpr --> stack spill
assert(dst_lo_rc == rc_stack, "spill to bad register class");
__ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
}
break;
case rc_float:
if (dst_lo_rc == rc_int) { // fpr --> gpr copy
if (is64) {
__ fmv_x_d(as_Register(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
} else {
__ fmv_x_w(as_Register(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
}
} else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
if (is64) {
__ fmv_d(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
} else {
__ fmv_s(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
}
} else { // fpr --> stack spill
assert(dst_lo_rc == rc_stack, "spill to bad register class");
__ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
is64, dst_offset);
}
break;
case rc_stack:
if (dst_lo_rc == rc_int) { // stack --> gpr load
if (this->ideal_reg() == Op_RegI) {
__ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
} else { // // zero extended for narrow oop or klass
__ unspillu(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
}
} else if (dst_lo_rc == rc_float) { // stack --> fpr load
__ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
is64, src_offset);
} else { // stack --> stack copy
assert(dst_lo_rc == rc_stack, "spill to bad register class");
if (this->ideal_reg() == Op_RegI) {
__ unspill(t0, is64, src_offset);
} else { // zero extended for narrow oop or klass
__ unspillu(t0, is64, src_offset);
}
__ spill(t0, is64, dst_offset);
}
break;
default:
ShouldNotReachHere();
}
}
if (st != NULL) {
st->print("spill ");
if (src_lo_rc == rc_stack) {
st->print("[sp, #%d] -> ", src_offset);
} else {
st->print("%s -> ", Matcher::regName[src_lo]);
}
if (dst_lo_rc == rc_stack) {
st->print("[sp, #%d]", dst_offset);
} else {
st->print("%s", Matcher::regName[dst_lo]);
}
if (bottom_type()->isa_vect() != NULL) {
int vsize = 0;
if (ideal_reg() == Op_VecA) {
vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
} else {
ShouldNotReachHere();
}
st->print("\t# vector spill size = %d", vsize);
} else {
st->print("\t# spill size = %d", is64 ? 64 : 32);
}
}
return 0;
}
#ifndef PRODUCT
void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
if (ra_ == NULL) {
st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
} else {
implementation(NULL, ra_, false, st);
}
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation(&cbuf, ra_, false, NULL);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
//=============================================================================
#ifndef PRODUCT
void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
assert_cond(ra_ != NULL && st != NULL);
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_reg_first(this);
st->print("add %s, sp, #%d\t# box lock",
Matcher::regName[reg], offset);
}
#endif
void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see BoxLockNode::size()
assert_cond(ra_ != NULL);
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
if (is_imm_in_range(offset, 12, 0)) {
__ addi(as_Register(reg), sp, offset);
} else if (is_imm_in_range(offset, 32, 0)) {
__ li32(t0, offset);
__ add(as_Register(reg), sp, t0);
} else {
ShouldNotReachHere();
}
}
uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
// BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
if (is_imm_in_range(offset, 12, 0)) {
return NativeInstruction::instruction_size;
} else {
return 3 * NativeInstruction::instruction_size; // lui + addiw + add;
}
}
//=============================================================================
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
assert_cond(st != NULL);
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
st->print_cr("\tlwu t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass");
if (CompressedKlassPointers::shift() != 0) {
st->print_cr("\tdecode_klass_not_null t0, t0");
}
} else {
st->print_cr("\tld t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass");
}
st->print_cr("\tbeq t0, t1, ic_hit");
st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
st->print_cr("\tic_hit:");
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
Label skip;
__ cmp_klass(j_rarg0, t1, t0, t2 /* call-clobbered t2 as a tmp */, skip);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(skip);
// These NOPs are critical so that verified entry point is properly
// 4 bytes aligned for patching by NativeJump::patch_verified_entry()
__ align(NativeInstruction::instruction_size);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
{
assert_cond(ra_ != NULL);
return MachNode::size(ra_);
}
// REQUIRED EMIT CODE
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
{
// la_patchable t0, #exception_blob_entry_point
// jr (offset)t0
// or
// j #exception_blob_entry_point
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
{
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == NULL) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ auipc(ra, 0);
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
__ end_a_stub();
return offset;
}
// REQUIRED MATCHER CODE
//=============================================================================
const bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode)) {
return false;
}
switch (opcode) {
case Op_CacheWB: // fall through
case Op_CacheWBPreSync: // fall through
case Op_CacheWBPostSync:
if (!VM_Version::supports_data_cache_line_flush()) {
return false;
}
break;
case Op_StrCompressedCopy: // fall through
case Op_StrInflatedCopy: // fall through
case Op_CountPositives:
return UseRVV;
case Op_EncodeISOArray:
return UseRVV && SpecialEncodeISOArray;
case Op_PopCountI:
case Op_PopCountL:
return UsePopCountInstruction;
case Op_RotateRight:
case Op_RotateLeft:
case Op_CountLeadingZerosI:
case Op_CountLeadingZerosL:
case Op_CountTrailingZerosI:
case Op_CountTrailingZerosL:
return UseZbb;
}
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
return match_rule_supported_vector(opcode, vlen, bt);
}
// Identify extra cases that we might want to provide match rules for vector nodes and
// other intrinsics guarded with vector length (vlen) and element type (bt).
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
return false;
}
return op_vec_supported(opcode);
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
return false;
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
return false;
}
const RegMask* Matcher::predicate_reg_mask(void) {
return NULL;
}
const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
return NULL;
}
// Vector calling convention not yet implemented.
const bool Matcher::supports_vector_calling_convention(void) {
return false;
}
OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
Unimplemented();
return OptoRegPair(0, 0);
}
// Is this branch offset short enough that a short branch can be used?
//
// NOTE: If the platform does not provide any short branch variants, then
// this method should return false for offset 0.
// |---label(L1)-----|
// |-----------------|
// |-----------------|----------eq: float-------------------
// |-----------------| // far_cmpD_branch | cmpD_branch
// |------- ---------| feq; | feq;
// |-far_cmpD_branch-| beqz done; | bnez L;
// |-----------------| j L; |
// |-----------------| bind(done); |
// |-----------------|--------------------------------------
// |-----------------| // so shortBrSize = br_size - 4;
// |-----------------| // so offs = offset - shortBrSize + 4;
// |---label(L2)-----|
bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
// The passed offset is relative to address of the branch.
int shortBrSize = br_size - 4;
int offs = offset - shortBrSize + 4;
return (-4096 <= offs && offs < 4096);
}
// Vector width in bytes.
const int Matcher::vector_width_in_bytes(BasicType bt) {
if (UseRVV) {
// The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV.
// MaxVectorSize == VM_Version::_initial_vector_length
return MaxVectorSize;
}
return 0;
}
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
return vector_width_in_bytes(bt) / type2aelembytes(bt);
}
--> --------------------
--> maximum size reached
--> --------------------
[ Original von:0.326Diese Quellcodebibliothek enthält Beispiele in vielen Programmiersprachen.
Man kann per Verzeichnistruktur darin navigieren.
Der Code wird farblich markiert angezeigt.
]
|
|