Quellcode-Bibliothek
© Kompilation durch diese Firma
[Weder Korrektheit noch Funktionsfähigkeit der Software werden zugesichert.]
Datei:
Sprache: Cobol
Quellsprache: Binärcode.ad aufgebrochen in jeweils 16 ZeichenSML {SML[184] C[215] BAT[443]}zum Wurzelverzeichnis wechseln //
// Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2012, 2022 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
//
// PPC64 Architecture Description File
//
//----------REGISTER DEFINITION BLOCK------------------------------------------
// This information is used by the matcher and the register allocator to
// describe individual registers and classes of registers within the target
// architecture.
register %{
//----------Architecture Description Register Definitions----------------------
// General Registers
// "reg_def" name (register save type, C convention save type,
// ideal register type, encoding);
//
// Register Save Types:
//
// NS = No-Save: The register allocator assumes that these registers
// can be used without saving upon entry to the method, &
// that they do not need to be saved at call sites.
//
// SOC = Save-On-Call: The register allocator assumes that these registers
// can be used without saving upon entry to the method,
// but that they must be saved at call sites.
// These are called "volatiles" on ppc.
//
// SOE = Save-On-Entry: The register allocator assumes that these registers
// must be saved before using them upon entry to the
// method, but they do not need to be saved at call
// sites.
// These are called "nonvolatiles" on ppc.
//
// AS = Always-Save: The register allocator assumes that these registers
// must be saved before using them upon entry to the
// method, & that they must be saved at call sites.
//
// Ideal Register Type is used to determine how to save & restore a
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
//
// The encoding number is the actual bit-pattern placed into the opcodes.
//
// PPC64 register definitions, based on the 64-bit PowerPC ELF ABI
// Supplement Version 1.7 as of 2003-10-29.
//
// For each 64-bit register we must define two registers: the register
// itself, e.g. R3, and a corresponding virtual other (32-bit-)'half',
// e.g. R3_H, which is needed by the allocator, but is not used
// for stores, loads, etc.
// ----------------------------
// Integer/Long Registers
// ----------------------------
// PPC64 has 32 64-bit integer registers.
// types: v = volatile, nv = non-volatile, s = system
reg_def R0 ( SOC, SOC, Op_RegI, 0, R0->as_VMReg() ); // v used in prologs
reg_def R0_H ( SOC, SOC, Op_RegI, 99, R0->as_VMReg()->next() );
reg_def R1 ( NS, NS, Op_RegI, 1, R1->as_VMReg() ); // s SP
reg_def R1_H ( NS, NS, Op_RegI, 99, R1->as_VMReg()->next() );
reg_def R2 ( SOC, SOC, Op_RegI, 2, R2->as_VMReg() ); // v TOC
reg_def R2_H ( SOC, SOC, Op_RegI, 99, R2->as_VMReg()->next() );
reg_def R3 ( SOC, SOC, Op_RegI, 3, R3->as_VMReg() ); // v iarg1 & iret
reg_def R3_H ( SOC, SOC, Op_RegI, 99, R3->as_VMReg()->next() );
reg_def R4 ( SOC, SOC, Op_RegI, 4, R4->as_VMReg() ); // iarg2
reg_def R4_H ( SOC, SOC, Op_RegI, 99, R4->as_VMReg()->next() );
reg_def R5 ( SOC, SOC, Op_RegI, 5, R5->as_VMReg() ); // v iarg3
reg_def R5_H ( SOC, SOC, Op_RegI, 99, R5->as_VMReg()->next() );
reg_def R6 ( SOC, SOC, Op_RegI, 6, R6->as_VMReg() ); // v iarg4
reg_def R6_H ( SOC, SOC, Op_RegI, 99, R6->as_VMReg()->next() );
reg_def R7 ( SOC, SOC, Op_RegI, 7, R7->as_VMReg() ); // v iarg5
reg_def R7_H ( SOC, SOC, Op_RegI, 99, R7->as_VMReg()->next() );
reg_def R8 ( SOC, SOC, Op_RegI, 8, R8->as_VMReg() ); // v iarg6
reg_def R8_H ( SOC, SOC, Op_RegI, 99, R8->as_VMReg()->next() );
reg_def R9 ( SOC, SOC, Op_RegI, 9, R9->as_VMReg() ); // v iarg7
reg_def R9_H ( SOC, SOC, Op_RegI, 99, R9->as_VMReg()->next() );
reg_def R10 ( SOC, SOC, Op_RegI, 10, R10->as_VMReg() ); // v iarg8
reg_def R10_H( SOC, SOC, Op_RegI, 99, R10->as_VMReg()->next());
reg_def R11 ( SOC, SOC, Op_RegI, 11, R11->as_VMReg() ); // v ENV / scratch
reg_def R11_H( SOC, SOC, Op_RegI, 99, R11->as_VMReg()->next());
reg_def R12 ( SOC, SOC, Op_RegI, 12, R12->as_VMReg() ); // v scratch
reg_def R12_H( SOC, SOC, Op_RegI, 99, R12->as_VMReg()->next());
reg_def R13 ( NS, NS, Op_RegI, 13, R13->as_VMReg() ); // s system thread id
reg_def R13_H( NS, NS, Op_RegI, 99, R13->as_VMReg()->next());
reg_def R14 ( SOC, SOE, Op_RegI, 14, R14->as_VMReg() ); // nv
reg_def R14_H( SOC, SOE, Op_RegI, 99, R14->as_VMReg()->next());
reg_def R15 ( SOC, SOE, Op_RegI, 15, R15->as_VMReg() ); // nv
reg_def R15_H( SOC, SOE, Op_RegI, 99, R15->as_VMReg()->next());
reg_def R16 ( SOC, SOE, Op_RegI, 16, R16->as_VMReg() ); // nv
reg_def R16_H( SOC, SOE, Op_RegI, 99, R16->as_VMReg()->next());
reg_def R17 ( SOC, SOE, Op_RegI, 17, R17->as_VMReg() ); // nv
reg_def R17_H( SOC, SOE, Op_RegI, 99, R17->as_VMReg()->next());
reg_def R18 ( SOC, SOE, Op_RegI, 18, R18->as_VMReg() ); // nv
reg_def R18_H( SOC, SOE, Op_RegI, 99, R18->as_VMReg()->next());
reg_def R19 ( SOC, SOE, Op_RegI, 19, R19->as_VMReg() ); // nv
reg_def R19_H( SOC, SOE, Op_RegI, 99, R19->as_VMReg()->next());
reg_def R20 ( SOC, SOE, Op_RegI, 20, R20->as_VMReg() ); // nv
reg_def R20_H( SOC, SOE, Op_RegI, 99, R20->as_VMReg()->next());
reg_def R21 ( SOC, SOE, Op_RegI, 21, R21->as_VMReg() ); // nv
reg_def R21_H( SOC, SOE, Op_RegI, 99, R21->as_VMReg()->next());
reg_def R22 ( SOC, SOE, Op_RegI, 22, R22->as_VMReg() ); // nv
reg_def R22_H( SOC, SOE, Op_RegI, 99, R22->as_VMReg()->next());
reg_def R23 ( SOC, SOE, Op_RegI, 23, R23->as_VMReg() ); // nv
reg_def R23_H( SOC, SOE, Op_RegI, 99, R23->as_VMReg()->next());
reg_def R24 ( SOC, SOE, Op_RegI, 24, R24->as_VMReg() ); // nv
reg_def R24_H( SOC, SOE, Op_RegI, 99, R24->as_VMReg()->next());
reg_def R25 ( SOC, SOE, Op_RegI, 25, R25->as_VMReg() ); // nv
reg_def R25_H( SOC, SOE, Op_RegI, 99, R25->as_VMReg()->next());
reg_def R26 ( SOC, SOE, Op_RegI, 26, R26->as_VMReg() ); // nv
reg_def R26_H( SOC, SOE, Op_RegI, 99, R26->as_VMReg()->next());
reg_def R27 ( SOC, SOE, Op_RegI, 27, R27->as_VMReg() ); // nv
reg_def R27_H( SOC, SOE, Op_RegI, 99, R27->as_VMReg()->next());
reg_def R28 ( SOC, SOE, Op_RegI, 28, R28->as_VMReg() ); // nv
reg_def R28_H( SOC, SOE, Op_RegI, 99, R28->as_VMReg()->next());
reg_def R29 ( SOC, SOE, Op_RegI, 29, R29->as_VMReg() ); // nv
reg_def R29_H( SOC, SOE, Op_RegI, 99, R29->as_VMReg()->next());
reg_def R30 ( SOC, SOE, Op_RegI, 30, R30->as_VMReg() ); // nv
reg_def R30_H( SOC, SOE, Op_RegI, 99, R30->as_VMReg()->next());
reg_def R31 ( SOC, SOE, Op_RegI, 31, R31->as_VMReg() ); // nv
reg_def R31_H( SOC, SOE, Op_RegI, 99, R31->as_VMReg()->next());
// ----------------------------
// Float/Double Registers
// ----------------------------
// Double Registers
// The rules of ADL require that double registers be defined in pairs.
// Each pair must be two 32-bit values, but not necessarily a pair of
// single float registers. In each pair, ADLC-assigned register numbers
// must be adjacent, with the lower number even. Finally, when the
// CPU stores such a register pair to memory, the word associated with
// the lower ADLC-assigned number must be stored to the lower address.
// PPC64 has 32 64-bit floating-point registers. Each can store a single
// or double precision floating-point value.
// types: v = volatile, nv = non-volatile, s = system
reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg() ); // v scratch
reg_def F0_H ( SOC, SOC, Op_RegF, 99, F0->as_VMReg()->next() );
reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg() ); // v farg1 & fret
reg_def F1_H ( SOC, SOC, Op_RegF, 99, F1->as_VMReg()->next() );
reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg() ); // v farg2
reg_def F2_H ( SOC, SOC, Op_RegF, 99, F2->as_VMReg()->next() );
reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg() ); // v farg3
reg_def F3_H ( SOC, SOC, Op_RegF, 99, F3->as_VMReg()->next() );
reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg() ); // v farg4
reg_def F4_H ( SOC, SOC, Op_RegF, 99, F4->as_VMReg()->next() );
reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg() ); // v farg5
reg_def F5_H ( SOC, SOC, Op_RegF, 99, F5->as_VMReg()->next() );
reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg() ); // v farg6
reg_def F6_H ( SOC, SOC, Op_RegF, 99, F6->as_VMReg()->next() );
reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg() ); // v farg7
reg_def F7_H ( SOC, SOC, Op_RegF, 99, F7->as_VMReg()->next() );
reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg() ); // v farg8
reg_def F8_H ( SOC, SOC, Op_RegF, 99, F8->as_VMReg()->next() );
reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg() ); // v farg9
reg_def F9_H ( SOC, SOC, Op_RegF, 99, F9->as_VMReg()->next() );
reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg() ); // v farg10
reg_def F10_H( SOC, SOC, Op_RegF, 99, F10->as_VMReg()->next());
reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg() ); // v farg11
reg_def F11_H( SOC, SOC, Op_RegF, 99, F11->as_VMReg()->next());
reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg() ); // v farg12
reg_def F12_H( SOC, SOC, Op_RegF, 99, F12->as_VMReg()->next());
reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg() ); // v farg13
reg_def F13_H( SOC, SOC, Op_RegF, 99, F13->as_VMReg()->next());
reg_def F14 ( SOC, SOE, Op_RegF, 14, F14->as_VMReg() ); // nv
reg_def F14_H( SOC, SOE, Op_RegF, 99, F14->as_VMReg()->next());
reg_def F15 ( SOC, SOE, Op_RegF, 15, F15->as_VMReg() ); // nv
reg_def F15_H( SOC, SOE, Op_RegF, 99, F15->as_VMReg()->next());
reg_def F16 ( SOC, SOE, Op_RegF, 16, F16->as_VMReg() ); // nv
reg_def F16_H( SOC, SOE, Op_RegF, 99, F16->as_VMReg()->next());
reg_def F17 ( SOC, SOE, Op_RegF, 17, F17->as_VMReg() ); // nv
reg_def F17_H( SOC, SOE, Op_RegF, 99, F17->as_VMReg()->next());
reg_def F18 ( SOC, SOE, Op_RegF, 18, F18->as_VMReg() ); // nv
reg_def F18_H( SOC, SOE, Op_RegF, 99, F18->as_VMReg()->next());
reg_def F19 ( SOC, SOE, Op_RegF, 19, F19->as_VMReg() ); // nv
reg_def F19_H( SOC, SOE, Op_RegF, 99, F19->as_VMReg()->next());
reg_def F20 ( SOC, SOE, Op_RegF, 20, F20->as_VMReg() ); // nv
reg_def F20_H( SOC, SOE, Op_RegF, 99, F20->as_VMReg()->next());
reg_def F21 ( SOC, SOE, Op_RegF, 21, F21->as_VMReg() ); // nv
reg_def F21_H( SOC, SOE, Op_RegF, 99, F21->as_VMReg()->next());
reg_def F22 ( SOC, SOE, Op_RegF, 22, F22->as_VMReg() ); // nv
reg_def F22_H( SOC, SOE, Op_RegF, 99, F22->as_VMReg()->next());
reg_def F23 ( SOC, SOE, Op_RegF, 23, F23->as_VMReg() ); // nv
reg_def F23_H( SOC, SOE, Op_RegF, 99, F23->as_VMReg()->next());
reg_def F24 ( SOC, SOE, Op_RegF, 24, F24->as_VMReg() ); // nv
reg_def F24_H( SOC, SOE, Op_RegF, 99, F24->as_VMReg()->next());
reg_def F25 ( SOC, SOE, Op_RegF, 25, F25->as_VMReg() ); // nv
reg_def F25_H( SOC, SOE, Op_RegF, 99, F25->as_VMReg()->next());
reg_def F26 ( SOC, SOE, Op_RegF, 26, F26->as_VMReg() ); // nv
reg_def F26_H( SOC, SOE, Op_RegF, 99, F26->as_VMReg()->next());
reg_def F27 ( SOC, SOE, Op_RegF, 27, F27->as_VMReg() ); // nv
reg_def F27_H( SOC, SOE, Op_RegF, 99, F27->as_VMReg()->next());
reg_def F28 ( SOC, SOE, Op_RegF, 28, F28->as_VMReg() ); // nv
reg_def F28_H( SOC, SOE, Op_RegF, 99, F28->as_VMReg()->next());
reg_def F29 ( SOC, SOE, Op_RegF, 29, F29->as_VMReg() ); // nv
reg_def F29_H( SOC, SOE, Op_RegF, 99, F29->as_VMReg()->next());
reg_def F30 ( SOC, SOE, Op_RegF, 30, F30->as_VMReg() ); // nv
reg_def F30_H( SOC, SOE, Op_RegF, 99, F30->as_VMReg()->next());
reg_def F31 ( SOC, SOE, Op_RegF, 31, F31->as_VMReg() ); // nv
reg_def F31_H( SOC, SOE, Op_RegF, 99, F31->as_VMReg()->next());
// ----------------------------
// Special Registers
// ----------------------------
// Condition Codes Flag Registers
// PPC64 has 8 condition code "registers" which are all contained
// in the CR register.
// types: v = volatile, nv = non-volatile, s = system
reg_def CCR0(SOC, SOC, Op_RegFlags, 0, CCR0->as_VMReg()); // v
reg_def CCR1(SOC, SOC, Op_RegFlags, 1, CCR1->as_VMReg()); // v
reg_def CCR2(SOC, SOC, Op_RegFlags, 2, CCR2->as_VMReg()); // nv
reg_def CCR3(SOC, SOC, Op_RegFlags, 3, CCR3->as_VMReg()); // nv
reg_def CCR4(SOC, SOC, Op_RegFlags, 4, CCR4->as_VMReg()); // nv
reg_def CCR5(SOC, SOC, Op_RegFlags, 5, CCR5->as_VMReg()); // v
reg_def CCR6(SOC, SOC, Op_RegFlags, 6, CCR6->as_VMReg()); // v
reg_def CCR7(SOC, SOC, Op_RegFlags, 7, CCR7->as_VMReg()); // v
// Special registers of PPC64
reg_def SR_XER( SOC, SOC, Op_RegP, 0, SR_XER->as_VMReg()); // v
reg_def SR_LR( SOC, SOC, Op_RegP, 1, SR_LR->as_VMReg()); // v
reg_def SR_CTR( SOC, SOC, Op_RegP, 2, SR_CTR->as_VMReg()); // v
reg_def SR_VRSAVE( SOC, SOC, Op_RegP, 3, SR_VRSAVE->as_VMReg()); // v
reg_def SR_SPEFSCR(SOC, SOC, Op_RegP, 4, SR_SPEFSCR->as_VMReg()); // v
reg_def SR_PPR( SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg()); // v
// ----------------------------
// Vector-Scalar Registers
// ----------------------------
// 1st 32 VSRs are aliases for the FPRs which are already defined above.
reg_def VSR0 ( SOC, SOC, Op_VecX, 0, VMRegImpl::Bad());
reg_def VSR1 ( SOC, SOC, Op_VecX, 1, VMRegImpl::Bad());
reg_def VSR2 ( SOC, SOC, Op_VecX, 2, VMRegImpl::Bad());
reg_def VSR3 ( SOC, SOC, Op_VecX, 3, VMRegImpl::Bad());
reg_def VSR4 ( SOC, SOC, Op_VecX, 4, VMRegImpl::Bad());
reg_def VSR5 ( SOC, SOC, Op_VecX, 5, VMRegImpl::Bad());
reg_def VSR6 ( SOC, SOC, Op_VecX, 6, VMRegImpl::Bad());
reg_def VSR7 ( SOC, SOC, Op_VecX, 7, VMRegImpl::Bad());
reg_def VSR8 ( SOC, SOC, Op_VecX, 8, VMRegImpl::Bad());
reg_def VSR9 ( SOC, SOC, Op_VecX, 9, VMRegImpl::Bad());
reg_def VSR10 ( SOC, SOC, Op_VecX, 10, VMRegImpl::Bad());
reg_def VSR11 ( SOC, SOC, Op_VecX, 11, VMRegImpl::Bad());
reg_def VSR12 ( SOC, SOC, Op_VecX, 12, VMRegImpl::Bad());
reg_def VSR13 ( SOC, SOC, Op_VecX, 13, VMRegImpl::Bad());
reg_def VSR14 ( SOC, SOE, Op_VecX, 14, VMRegImpl::Bad());
reg_def VSR15 ( SOC, SOE, Op_VecX, 15, VMRegImpl::Bad());
reg_def VSR16 ( SOC, SOE, Op_VecX, 16, VMRegImpl::Bad());
reg_def VSR17 ( SOC, SOE, Op_VecX, 17, VMRegImpl::Bad());
reg_def VSR18 ( SOC, SOE, Op_VecX, 18, VMRegImpl::Bad());
reg_def VSR19 ( SOC, SOE, Op_VecX, 19, VMRegImpl::Bad());
reg_def VSR20 ( SOC, SOE, Op_VecX, 20, VMRegImpl::Bad());
reg_def VSR21 ( SOC, SOE, Op_VecX, 21, VMRegImpl::Bad());
reg_def VSR22 ( SOC, SOE, Op_VecX, 22, VMRegImpl::Bad());
reg_def VSR23 ( SOC, SOE, Op_VecX, 23, VMRegImpl::Bad());
reg_def VSR24 ( SOC, SOE, Op_VecX, 24, VMRegImpl::Bad());
reg_def VSR25 ( SOC, SOE, Op_VecX, 25, VMRegImpl::Bad());
reg_def VSR26 ( SOC, SOE, Op_VecX, 26, VMRegImpl::Bad());
reg_def VSR27 ( SOC, SOE, Op_VecX, 27, VMRegImpl::Bad());
reg_def VSR28 ( SOC, SOE, Op_VecX, 28, VMRegImpl::Bad());
reg_def VSR29 ( SOC, SOE, Op_VecX, 29, VMRegImpl::Bad());
reg_def VSR30 ( SOC, SOE, Op_VecX, 30, VMRegImpl::Bad());
reg_def VSR31 ( SOC, SOE, Op_VecX, 31, VMRegImpl::Bad());
// 2nd 32 VSRs are aliases for the VRs which are only defined here.
reg_def VSR32 ( SOC, SOC, Op_VecX, 32, VSR32->as_VMReg());
reg_def VSR33 ( SOC, SOC, Op_VecX, 33, VSR33->as_VMReg());
reg_def VSR34 ( SOC, SOC, Op_VecX, 34, VSR34->as_VMReg());
reg_def VSR35 ( SOC, SOC, Op_VecX, 35, VSR35->as_VMReg());
reg_def VSR36 ( SOC, SOC, Op_VecX, 36, VSR36->as_VMReg());
reg_def VSR37 ( SOC, SOC, Op_VecX, 37, VSR37->as_VMReg());
reg_def VSR38 ( SOC, SOC, Op_VecX, 38, VSR38->as_VMReg());
reg_def VSR39 ( SOC, SOC, Op_VecX, 39, VSR39->as_VMReg());
reg_def VSR40 ( SOC, SOC, Op_VecX, 40, VSR40->as_VMReg());
reg_def VSR41 ( SOC, SOC, Op_VecX, 41, VSR41->as_VMReg());
reg_def VSR42 ( SOC, SOC, Op_VecX, 42, VSR42->as_VMReg());
reg_def VSR43 ( SOC, SOC, Op_VecX, 43, VSR43->as_VMReg());
reg_def VSR44 ( SOC, SOC, Op_VecX, 44, VSR44->as_VMReg());
reg_def VSR45 ( SOC, SOC, Op_VecX, 45, VSR45->as_VMReg());
reg_def VSR46 ( SOC, SOC, Op_VecX, 46, VSR46->as_VMReg());
reg_def VSR47 ( SOC, SOC, Op_VecX, 47, VSR47->as_VMReg());
reg_def VSR48 ( SOC, SOC, Op_VecX, 48, VSR48->as_VMReg());
reg_def VSR49 ( SOC, SOC, Op_VecX, 49, VSR49->as_VMReg());
reg_def VSR50 ( SOC, SOC, Op_VecX, 50, VSR50->as_VMReg());
reg_def VSR51 ( SOC, SOC, Op_VecX, 51, VSR51->as_VMReg());
reg_def VSR52 ( SOC, SOE, Op_VecX, 52, VSR52->as_VMReg());
reg_def VSR53 ( SOC, SOE, Op_VecX, 53, VSR53->as_VMReg());
reg_def VSR54 ( SOC, SOE, Op_VecX, 54, VSR54->as_VMReg());
reg_def VSR55 ( SOC, SOE, Op_VecX, 55, VSR55->as_VMReg());
reg_def VSR56 ( SOC, SOE, Op_VecX, 56, VSR56->as_VMReg());
reg_def VSR57 ( SOC, SOE, Op_VecX, 57, VSR57->as_VMReg());
reg_def VSR58 ( SOC, SOE, Op_VecX, 58, VSR58->as_VMReg());
reg_def VSR59 ( SOC, SOE, Op_VecX, 59, VSR59->as_VMReg());
reg_def VSR60 ( SOC, SOE, Op_VecX, 60, VSR60->as_VMReg());
reg_def VSR61 ( SOC, SOE, Op_VecX, 61, VSR61->as_VMReg());
reg_def VSR62 ( SOC, SOE, Op_VecX, 62, VSR62->as_VMReg());
reg_def VSR63 ( SOC, SOE, Op_VecX, 63, VSR63->as_VMReg());
// ----------------------------
// Specify priority of register selection within phases of register
// allocation. Highest priority is first. A useful heuristic is to
// give registers a low priority when they are required by machine
// instructions, like EAX and EDX on I486, and choose no-save registers
// before save-on-call, & save-on-call before save-on-entry. Registers
// which participate in fixed calling sequences should come last.
// Registers which are used as pairs must fall on an even boundary.
// It's worth about 1% on SPEC geomean to get this right.
// Chunk0, chunk1, and chunk2 form the MachRegisterNumbers enumeration
// in adGlobals_ppc.hpp which defines the <register>_num values, e.g.
// R3_num. Therefore, R3_num may not be (and in reality is not)
// the same as R3->encoding()! Furthermore, we cannot make any
// assumptions on ordering, e.g. R3_num may be less than R2_num.
// Additionally, the function
// static enum RC rc_class(OptoReg::Name reg )
// maps a given <register>_num value to its chunk type (except for flags)
// and its current implementation relies on chunk0 and chunk1 having a
// size of 64 each.
// If you change this allocation class, please have a look at the
// default values for the parameters RoundRobinIntegerRegIntervalStart
// and RoundRobinFloatRegIntervalStart
alloc_class chunk0 (
// Chunk0 contains *all* 64 integer registers halves.
// "non-volatile" registers
R14, R14_H,
R15, R15_H,
R17, R17_H,
R18, R18_H,
R19, R19_H,
R20, R20_H,
R21, R21_H,
R22, R22_H,
R23, R23_H,
R24, R24_H,
R25, R25_H,
R26, R26_H,
R27, R27_H,
R28, R28_H,
R29, R29_H,
R30, R30_H,
R31, R31_H,
// scratch/special registers
R11, R11_H,
R12, R12_H,
// argument registers
R10, R10_H,
R9, R9_H,
R8, R8_H,
R7, R7_H,
R6, R6_H,
R5, R5_H,
R4, R4_H,
R3, R3_H,
// special registers, not available for allocation
R16, R16_H, // R16_thread
R13, R13_H, // system thread id
R2, R2_H, // may be used for TOC
R1, R1_H, // SP
R0, R0_H // R0 (scratch)
);
// If you change this allocation class, please have a look at the
// default values for the parameters RoundRobinIntegerRegIntervalStart
// and RoundRobinFloatRegIntervalStart
alloc_class chunk1 (
// Chunk1 contains *all* 64 floating-point registers halves.
// scratch register
F0, F0_H,
// argument registers
F13, F13_H,
F12, F12_H,
F11, F11_H,
F10, F10_H,
F9, F9_H,
F8, F8_H,
F7, F7_H,
F6, F6_H,
F5, F5_H,
F4, F4_H,
F3, F3_H,
F2, F2_H,
F1, F1_H,
// non-volatile registers
F14, F14_H,
F15, F15_H,
F16, F16_H,
F17, F17_H,
F18, F18_H,
F19, F19_H,
F20, F20_H,
F21, F21_H,
F22, F22_H,
F23, F23_H,
F24, F24_H,
F25, F25_H,
F26, F26_H,
F27, F27_H,
F28, F28_H,
F29, F29_H,
F30, F30_H,
F31, F31_H
);
alloc_class chunk2 (
// Chunk2 contains *all* 8 condition code registers.
CCR0,
CCR1,
CCR2,
CCR3,
CCR4,
CCR5,
CCR6,
CCR7
);
alloc_class chunk3 (
VSR0,
VSR1,
VSR2,
VSR3,
VSR4,
VSR5,
VSR6,
VSR7,
VSR8,
VSR9,
VSR10,
VSR11,
VSR12,
VSR13,
VSR14,
VSR15,
VSR16,
VSR17,
VSR18,
VSR19,
VSR20,
VSR21,
VSR22,
VSR23,
VSR24,
VSR25,
VSR26,
VSR27,
VSR28,
VSR29,
VSR30,
VSR31,
VSR32,
VSR33,
VSR34,
VSR35,
VSR36,
VSR37,
VSR38,
VSR39,
VSR40,
VSR41,
VSR42,
VSR43,
VSR44,
VSR45,
VSR46,
VSR47,
VSR48,
VSR49,
VSR50,
VSR51,
VSR52,
VSR53,
VSR54,
VSR55,
VSR56,
VSR57,
VSR58,
VSR59,
VSR60,
VSR61,
VSR62,
VSR63
);
alloc_class chunk4 (
// special registers
// These registers are not allocated, but used for nodes generated by postalloc expand.
SR_XER,
SR_LR,
SR_CTR,
SR_VRSAVE,
SR_SPEFSCR,
SR_PPR
);
//-------Architecture Description Register Classes-----------------------
// Several register classes are automatically defined based upon
// information in this architecture description.
// 1) reg_class inline_cache_reg ( as defined in frame section )
// 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
//
// ----------------------------
// 32 Bit Register Classes
// ----------------------------
// We specify registers twice, once as read/write, and once read-only.
// We use the read-only registers for source operands. With this, we
// can include preset read only registers in this class, as a hard-coded
// '0'-register. (We used to simulate this on ppc.)
// 32 bit registers that can be read and written i.e. these registers
// can be dest (or src) of normal instructions.
reg_class bits32_reg_rw(
/*R0*/ // R0
/*R1*/ // SP
R2, // TOC
R3,
R4,
R5,
R6,
R7,
R8,
R9,
R10,
R11,
R12,
/*R13*/ // system thread id
R14,
R15,
/*R16*/ // R16_thread
R17,
R18,
R19,
R20,
R21,
R22,
R23,
R24,
R25,
R26,
R27,
R28,
/*R29,*/ // global TOC
R30,
R31
);
// 32 bit registers that can only be read i.e. these registers can
// only be src of all instructions.
reg_class bits32_reg_ro(
/*R0*/ // R0
/*R1*/ // SP
R2 // TOC
R3,
R4,
R5,
R6,
R7,
R8,
R9,
R10,
R11,
R12,
/*R13*/ // system thread id
R14,
R15,
/*R16*/ // R16_thread
R17,
R18,
R19,
R20,
R21,
R22,
R23,
R24,
R25,
R26,
R27,
R28,
/*R29,*/
R30,
R31
);
reg_class rscratch1_bits32_reg(R11);
reg_class rscratch2_bits32_reg(R12);
reg_class rarg1_bits32_reg(R3);
reg_class rarg2_bits32_reg(R4);
reg_class rarg3_bits32_reg(R5);
reg_class rarg4_bits32_reg(R6);
// ----------------------------
// 64 Bit Register Classes
// ----------------------------
// 64-bit build means 64-bit pointers means hi/lo pairs
reg_class rscratch1_bits64_reg(R11_H, R11);
reg_class rscratch2_bits64_reg(R12_H, R12);
reg_class rarg1_bits64_reg(R3_H, R3);
reg_class rarg2_bits64_reg(R4_H, R4);
reg_class rarg3_bits64_reg(R5_H, R5);
reg_class rarg4_bits64_reg(R6_H, R6);
// Thread register, 'written' by tlsLoadP, see there.
reg_class thread_bits64_reg(R16_H, R16);
reg_class r19_bits64_reg(R19_H, R19);
// 64 bit registers that can be read and written i.e. these registers
// can be dest (or src) of normal instructions.
reg_class bits64_reg_rw(
/*R0_H, R0*/ // R0
/*R1_H, R1*/ // SP
R2_H, R2, // TOC
R3_H, R3,
R4_H, R4,
R5_H, R5,
R6_H, R6,
R7_H, R7,
R8_H, R8,
R9_H, R9,
R10_H, R10,
R11_H, R11,
R12_H, R12,
/*R13_H, R13*/ // system thread id
R14_H, R14,
R15_H, R15,
/*R16_H, R16*/ // R16_thread
R17_H, R17,
R18_H, R18,
R19_H, R19,
R20_H, R20,
R21_H, R21,
R22_H, R22,
R23_H, R23,
R24_H, R24,
R25_H, R25,
R26_H, R26,
R27_H, R27,
R28_H, R28,
/*R29_H, R29,*/
R30_H, R30,
R31_H, R31
);
// 64 bit registers used excluding r2, r11 and r12
// Used to hold the TOC to avoid collisions with expanded LeafCall which uses
// r2, r11 and r12 internally.
reg_class bits64_reg_leaf_call(
/*R0_H, R0*/ // R0
/*R1_H, R1*/ // SP
/*R2_H, R2*/ // TOC
R3_H, R3,
R4_H, R4,
R5_H, R5,
R6_H, R6,
R7_H, R7,
R8_H, R8,
R9_H, R9,
R10_H, R10,
/*R11_H, R11*/
/*R12_H, R12*/
/*R13_H, R13*/ // system thread id
R14_H, R14,
R15_H, R15,
/*R16_H, R16*/ // R16_thread
R17_H, R17,
R18_H, R18,
R19_H, R19,
R20_H, R20,
R21_H, R21,
R22_H, R22,
R23_H, R23,
R24_H, R24,
R25_H, R25,
R26_H, R26,
R27_H, R27,
R28_H, R28,
/*R29_H, R29,*/
R30_H, R30,
R31_H, R31
);
// Used to hold the TOC to avoid collisions with expanded DynamicCall
// which uses r19 as inline cache internally and expanded LeafCall which uses
// r2, r11 and r12 internally.
reg_class bits64_constant_table_base(
/*R0_H, R0*/ // R0
/*R1_H, R1*/ // SP
/*R2_H, R2*/ // TOC
R3_H, R3,
R4_H, R4,
R5_H, R5,
R6_H, R6,
R7_H, R7,
R8_H, R8,
R9_H, R9,
R10_H, R10,
/*R11_H, R11*/
/*R12_H, R12*/
/*R13_H, R13*/ // system thread id
R14_H, R14,
R15_H, R15,
/*R16_H, R16*/ // R16_thread
R17_H, R17,
R18_H, R18,
/*R19_H, R19*/
R20_H, R20,
R21_H, R21,
R22_H, R22,
R23_H, R23,
R24_H, R24,
R25_H, R25,
R26_H, R26,
R27_H, R27,
R28_H, R28,
/*R29_H, R29,*/
R30_H, R30,
R31_H, R31
);
// 64 bit registers that can only be read i.e. these registers can
// only be src of all instructions.
reg_class bits64_reg_ro(
/*R0_H, R0*/ // R0
R1_H, R1,
R2_H, R2, // TOC
R3_H, R3,
R4_H, R4,
R5_H, R5,
R6_H, R6,
R7_H, R7,
R8_H, R8,
R9_H, R9,
R10_H, R10,
R11_H, R11,
R12_H, R12,
/*R13_H, R13*/ // system thread id
R14_H, R14,
R15_H, R15,
R16_H, R16, // R16_thread
R17_H, R17,
R18_H, R18,
R19_H, R19,
R20_H, R20,
R21_H, R21,
R22_H, R22,
R23_H, R23,
R24_H, R24,
R25_H, R25,
R26_H, R26,
R27_H, R27,
R28_H, R28,
/*R29_H, R29,*/ // TODO: let allocator handle TOC!!
R30_H, R30,
R31_H, R31
);
// ----------------------------
// Special Class for Condition Code Flags Register
reg_class int_flags(
/*CCR0*/ // scratch
/*CCR1*/ // scratch
/*CCR2*/ // nv!
/*CCR3*/ // nv!
/*CCR4*/ // nv!
CCR5,
CCR6,
CCR7
);
reg_class int_flags_ro(
CCR0,
CCR1,
CCR2,
CCR3,
CCR4,
CCR5,
CCR6,
CCR7
);
reg_class int_flags_CR0(CCR0);
reg_class int_flags_CR1(CCR1);
reg_class int_flags_CR6(CCR6);
reg_class ctr_reg(SR_CTR);
// ----------------------------
// Float Register Classes
// ----------------------------
reg_class flt_reg(
F0,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14, // nv!
F15, // nv!
F16, // nv!
F17, // nv!
F18, // nv!
F19, // nv!
F20, // nv!
F21, // nv!
F22, // nv!
F23, // nv!
F24, // nv!
F25, // nv!
F26, // nv!
F27, // nv!
F28, // nv!
F29, // nv!
F30, // nv!
F31 // nv!
);
// Double precision float registers have virtual `high halves' that
// are needed by the allocator.
reg_class dbl_reg(
F0, F0_H,
F1, F1_H,
F2, F2_H,
F3, F3_H,
F4, F4_H,
F5, F5_H,
F6, F6_H,
F7, F7_H,
F8, F8_H,
F9, F9_H,
F10, F10_H,
F11, F11_H,
F12, F12_H,
F13, F13_H,
F14, F14_H, // nv!
F15, F15_H, // nv!
F16, F16_H, // nv!
F17, F17_H, // nv!
F18, F18_H, // nv!
F19, F19_H, // nv!
F20, F20_H, // nv!
F21, F21_H, // nv!
F22, F22_H, // nv!
F23, F23_H, // nv!
F24, F24_H, // nv!
F25, F25_H, // nv!
F26, F26_H, // nv!
F27, F27_H, // nv!
F28, F28_H, // nv!
F29, F29_H, // nv!
F30, F30_H, // nv!
F31, F31_H // nv!
);
// ----------------------------
// Vector-Scalar Register Class
// ----------------------------
reg_class vs_reg(
// Attention: Only these ones are saved & restored at safepoint by RegisterSaver.
VSR32,
VSR33,
VSR34,
VSR35,
VSR36,
VSR37,
VSR38,
VSR39,
VSR40,
VSR41,
VSR42,
VSR43,
VSR44,
VSR45,
VSR46,
VSR47,
VSR48,
VSR49,
VSR50,
VSR51
// VSR52-VSR63 // nv!
);
%}
//----------DEFINITION BLOCK---------------------------------------------------
// Define name --> value mappings to inform the ADLC of an integer valued name
// Current support includes integer values in the range [0, 0x7FFFFFFF]
// Format:
// int_def <name> ( <int_value>, <expression>);
// Generated Code in ad_<arch>.hpp
// #define <name> (<expression>)
// // value == <int_value>
// Generated code in ad_<arch>.cpp adlc_verification()
// assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
//
definitions %{
// The default cost (of an ALU instruction).
int_def DEFAULT_COST_LOW ( 30, 30);
int_def DEFAULT_COST ( 100, 100);
int_def HUGE_COST (1000000, 1000000);
// Memory refs
int_def MEMORY_REF_COST_LOW ( 200, DEFAULT_COST * 2);
int_def MEMORY_REF_COST ( 300, DEFAULT_COST * 3);
// Branches are even more expensive.
int_def BRANCH_COST ( 900, DEFAULT_COST * 9);
int_def CALL_COST ( 1300, DEFAULT_COST * 13);
%}
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description.
source_hpp %{
// Header information of the source block.
// Method declarations/definitions which are used outside
// the ad-scope can conveniently be defined here.
//
// To keep related declarations/definitions/uses close together,
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
#include "opto/convertnode.hpp"
// Returns true if Node n is followed by a MemBar node that
// will do an acquire. If so, this node must not do the acquire
// operation.
bool followed_by_acquire(const Node *n);
%}
source %{
#include "oops/klass.inline.hpp"
void PhaseOutput::pd_perform_mach_node_analysis() {
}
int MachNode::pd_alignment_required() const {
return 1;
}
int MachNode::compute_padding(int current_offset) const {
return 0;
}
// Should the matcher clone input 'm' of node 'n'?
bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
return false;
}
// Should the Matcher clone shifts on addressing modes, expecting them
// to be subsumed into complex addressing expressions or compute them
// into registers?
bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
return clone_base_plus_offset_address(m, mstack, address_visited);
}
// Optimize load-acquire.
//
// Check if acquire is unnecessary due to following operation that does
// acquire anyways.
// Walk the pattern:
//
// n: Load.acq
// |
// MemBarAcquire
// | |
// Proj(ctrl) Proj(mem)
// | |
// MemBarRelease/Volatile
//
bool followed_by_acquire(const Node *load) {
assert(load->is_Load(), "So far implemented only for loads.");
// Find MemBarAcquire.
const Node *mba = NULL;
for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
const Node *out = load->fast_out(i);
if (out->Opcode() == Op_MemBarAcquire) {
if (out->in(0) == load) continue; // Skip control edge, membar should be found via precedence edge.
mba = out;
break;
}
}
if (!mba) return false;
// Find following MemBar node.
//
// The following node must be reachable by control AND memory
// edge to assure no other operations are in between the two nodes.
//
// So first get the Proj node, mem_proj, to use it to iterate forward.
Node *mem_proj = NULL;
for (DUIterator_Fast imax, i = mba->fast_outs(imax); i < imax; i++) {
mem_proj = mba->fast_out(i); // Runs out of bounds and asserts if Proj not found.
assert(mem_proj->is_Proj(), "only projections here");
ProjNode *proj = mem_proj->as_Proj();
if (proj->_con == TypeFunc::Memory &&
!Compile::current()->node_arena()->contains(mem_proj)) // Unmatched old-space only
break;
}
assert(mem_proj->as_Proj()->_con == TypeFunc::Memory, "Graph broken");
// Search MemBar behind Proj. If there are other memory operations
// behind the Proj we lost.
for (DUIterator_Fast jmax, j = mem_proj->fast_outs(jmax); j < jmax; j++) {
Node *x = mem_proj->fast_out(j);
// Proj might have an edge to a store or load node which precedes the membar.
if (x->is_Mem()) return false;
// On PPC64 release and volatile are implemented by an instruction
// that also has acquire semantics. I.e. there is no need for an
// acquire before these.
int xop = x->Opcode();
if (xop == Op_MemBarRelease || xop == Op_MemBarVolatile) {
// Make sure we're not missing Call/Phi/MergeMem by checking
// control edges. The control edge must directly lead back
// to the MemBarAcquire
Node *ctrl_proj = x->in(0);
if (ctrl_proj->is_Proj() && ctrl_proj->in(0) == mba) {
return true;
}
}
}
return false;
}
#define __ _masm.
// Tertiary op of a LoadP or StoreP encoding.
#define REGP_OP true
// ****************************************************************************
// REQUIRED FUNCTIONALITY
// !!!!! Special hack to get all type of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
// PPC port: Removed use of lazy constant construct.
int MachCallStaticJavaNode::ret_addr_offset() {
// It's only a single branch-and-link instruction.
return 4;
}
int MachCallDynamicJavaNode::ret_addr_offset() {
// Offset is 4 with postalloc expanded calls (bl is one instruction). We use
// postalloc expanded calls if we use inline caches and do not update method data.
if (UseInlineCaches) return 4;
int vtable_index = this->_vtable_index;
if (vtable_index < 0) {
// Must be invalid_vtable_index, not nonvirtual_vtable_index.
assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
return 12;
} else {
return 24 + MacroAssembler::instr_size_for_decode_klass_not_null();
}
}
int MachCallRuntimeNode::ret_addr_offset() {
if (rule() == CallRuntimeDirect_rule) {
// CallRuntimeDirectNode uses call_c.
#if defined(ABI_ELFv2)
return 28;
#else
return 40;
#endif
}
assert(rule() == CallLeafDirect_rule, "unexpected node with rule %u", rule());
// CallLeafDirectNode uses bl.
return 4;
}
//=============================================================================
// condition code conversions
static int cc_to_boint(int cc) {
return Assembler::bcondCRbiIs0 | (cc & 8);
}
static int cc_to_inverse_boint(int cc) {
return Assembler::bcondCRbiIs0 | (8-(cc & 8));
}
static int cc_to_biint(int cc, int flags_reg) {
return (flags_reg << 2) | (cc & 3);
}
//=============================================================================
// Compute padding required for nodes which need alignment. The padding
// is the number of bytes (not instructions) which will be inserted before
// the instruction. The padding must match the size of a NOP instruction.
// Add nop if a prefixed (two-word) instruction is going to cross a 64-byte boundary.
// (See Section 1.6 of Power ISA Version 3.1)
static int compute_prefix_padding(int current_offset) {
assert(PowerArchitecturePPC64 >= 10 && (CodeEntryAlignment & 63) == 0,
"Code buffer must be aligned to a multiple of 64 bytes");
if (is_aligned(current_offset + BytesPerInstWord, 64)) {
return BytesPerInstWord;
}
return 0;
}
int loadConI32Node::compute_padding(int current_offset) const {
return compute_prefix_padding(current_offset);
}
int loadConL34Node::compute_padding(int current_offset) const {
return compute_prefix_padding(current_offset);
}
int addI_reg_imm32Node::compute_padding(int current_offset) const {
return compute_prefix_padding(current_offset);
}
int addL_reg_imm34Node::compute_padding(int current_offset) const {
return compute_prefix_padding(current_offset);
}
int addP_reg_imm34Node::compute_padding(int current_offset) const {
return compute_prefix_padding(current_offset);
}
int cmprb_Whitespace_reg_reg_prefixedNode::compute_padding(int current_offset) const {
return compute_prefix_padding(current_offset);
}
//=============================================================================
// Emit an interrupt that is caught by the debugger (for debugging compiler).
void emit_break(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
__ illtrap();
}
#ifndef PRODUCT
void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
st->print("BREAKPOINT");
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_break(cbuf);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
//=============================================================================
void emit_nop(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
__ nop();
}
static inline void emit_long(CodeBuffer &cbuf, int value) {
*((int*)(cbuf.insts_end())) = value;
cbuf.set_insts_end(cbuf.insts_end() + BytesPerInstWord);
}
//=============================================================================
%} // interrupt source
source_hpp %{ // Header information of the source block.
//--------------------------------------------------------------
//---< Used for optimization in Compile::Shorten_branches >---
//--------------------------------------------------------------
class C2_MacroAssembler;
class CallStubImpl {
public:
// Emit call stub, compiled java to interpreter.
static void emit_trampoline_stub(C2_MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
// Size of call trampoline stub.
// This doesn't need to be accurate to the byte, but it
// must be larger than or equal to the real size of the stub.
static uint size_call_trampoline() {
return MacroAssembler::trampoline_stub_size;
}
// number of relocations needed by a call trampoline stub
static uint reloc_call_trampoline() {
return 5;
}
};
%} // end source_hpp
source %{
// Emit a trampoline stub for a call to a target which is too far away.
//
// code sequences:
//
// call-site:
// branch-and-link to <destination> or <trampoline stub>
//
// Related trampoline stub for this call-site in the stub section:
// load the call target from the constant pool
// branch via CTR (LR/link still points to the call-site above)
void CallStubImpl::emit_trampoline_stub(C2_MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
address stub = __ emit_trampoline_stub(destination_toc_offset, insts_call_instruction_offset);
if (stub == NULL) {
ciEnv::current()->record_out_of_memory_failure();
}
}
//=============================================================================
// Emit an inline branch-and-link call and a related trampoline stub.
//
// code sequences:
//
// call-site:
// branch-and-link to <destination> or <trampoline stub>
//
// Related trampoline stub for this call-site in the stub section:
// load the call target from the constant pool
// branch via CTR (LR/link still points to the call-site above)
//
typedef struct {
int insts_call_instruction_offset;
int ret_addr_offset;
} EmitCallOffsets;
// Emit a branch-and-link instruction that branches to a trampoline.
// - Remember the offset of the branch-and-link instruction.
// - Add a relocation at the branch-and-link instruction.
// - Emit a branch-and-link.
// - Remember the return pc offset.
EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler &_masm, address entry_point, relocInfo::relocType rtype) {
EmitCallOffsets offsets = { -1, -1 };
const int start_offset = __ offset();
offsets.insts_call_instruction_offset = __ offset();
// No entry point given, use the current pc.
if (entry_point == NULL) entry_point = __ pc();
// Put the entry point as a constant into the constant pool.
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
if (entry_point_toc_addr == NULL) {
ciEnv::current()->record_out_of_memory_failure();
return offsets;
}
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
__ relocate(rtype);
// Note: At this point we do not have the address of the trampoline
// stub, and the entry point might be too far away for bl, so __ pc()
// serves as dummy and the bl will be patched later.
__ bl((address) __ pc());
offsets.ret_addr_offset = __ offset() - start_offset;
return offsets;
}
//=============================================================================
// Factory for creating loadConL* nodes for large/small constant pool.
static inline jlong replicate_immF(float con) {
// Replicate float con 2 times and pack into vector.
int val = *((int*)&con);
jlong lval = val;
lval = (lval << 32) | (lval & 0xFFFFFFFFl);
return lval;
}
//=============================================================================
const RegMask& MachConstantBaseNode::_out_RegMask = BITS64_CONSTANT_TABLE_BASE_mask();
int ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
}
bool MachConstantBaseNode::requires_postalloc_expand() const { return true; }
void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
iRegPdstOper *op_dst = new iRegPdstOper();
MachNode *m1 = new loadToc_hiNode();
MachNode *m2 = new loadToc_loNode();
m1->add_req(NULL);
m2->add_req(NULL, m1);
m1->_opnds[0] = op_dst;
m2->_opnds[0] = op_dst;
m2->_opnds[1] = op_dst;
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
nodes->push(m1);
nodes->push(m2);
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
// Is postalloc expanded.
ShouldNotReachHere();
}
uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
return 0;
}
#ifndef PRODUCT
void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("-- \t// MachConstantBaseNode (empty encoding)");
}
#endif
//=============================================================================
#ifndef PRODUCT
void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
Compile* C = ra_->C;
const long framesize = C->output()->frame_slots() << LogBytesPerInt;
st->print("PROLOG\n\t");
if (C->output()->need_stack_bang(framesize)) {
st->print("stack_overflow_check\n\t");
}
if (!false /* TODO: PPC port C->is_frameless_method()*/) {
st->print("save return pc\n\t");
st->print("push frame %ld\n\t", -framesize);
}
if (C->stub_function() == NULL) {
st->print("nmethod entry barrier\n\t");
}
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
const long framesize = C->output()->frame_size_in_bytes();
assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/;
const Register return_pc = R20; // Must match return_addr() in frame section.
const Register callers_sp = R21;
const Register push_frame_temp = R22;
const Register toc_temp = R23;
assert_different_registers(R11, return_pc, callers_sp, push_frame_temp, toc_temp);
if (method_is_frameless) {
// Add nop at beginning of all frameless methods to prevent any
// oop instructions from getting overwritten by make_not_entrant
// (patching attempt would fail).
__ nop();
} else {
// Get return pc.
__ mflr(return_pc);
}
if (C->clinit_barrier_on_entry()) {
assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
Label L_skip_barrier;
Register klass = toc_temp;
// Notify OOP recorder (don't need the relocation)
AddressLiteral md = __ constant_metadata_address(C->method()->holder()->constant_encoding());
__ load_const_optimized(klass, md.value(), R0);
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
__ mtctr(klass);
__ bctr();
__ bind(L_skip_barrier);
}
// Calls to C2R adapters often do not accept exceptional returns.
// We require that their callers must bang for them. But be
// careful, because some VM calls (such as call site linkage) can
// use several kilobytes of stack. But the stack safety zone should
// account for that. See bugs 4446381, 4468289, 4497237.
int bangsize = C->output()->bang_size_in_bytes();
assert(bangsize >= framesize || bangsize <= 0, "stack bang size incorrect");
if (C->output()->need_stack_bang(bangsize)) {
// Unfortunately we cannot use the function provided in
// assembler.cpp as we have to emulate the pipes. So I had to
// insert the code of generate_stack_overflow_check(), see
// assembler.cpp for some illuminative comments.
const int page_size = os::vm_page_size();
int bang_end = StackOverflow::stack_shadow_zone_size();
// This is how far the previous frame's stack banging extended.
const int bang_end_safe = bang_end;
if (bangsize > page_size) {
bang_end += bangsize;
}
int bang_offset = bang_end_safe;
while (bang_offset <= bang_end) {
// Need at least one stack bang at end of shadow zone.
// Again I had to copy code, this time from assembler_ppc.cpp,
// bang_stack_with_offset - see there for comments.
// Stack grows down, caller passes positive offset.
assert(bang_offset > 0, "must bang with positive offset");
long stdoffset = -bang_offset;
if (Assembler::is_simm(stdoffset, 16)) {
// Signed 16 bit offset, a simple std is ok.
if (UseLoadInstructionsForStackBangingPPC64) {
__ ld(R0, (int)(signed short)stdoffset, R1_SP);
} else {
__ std(R0, (int)(signed short)stdoffset, R1_SP);
}
} else if (Assembler::is_simm(stdoffset, 31)) {
// Use largeoffset calculations for addis & ld/std.
const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
Register tmp = R11;
__ addis(tmp, R1_SP, hi);
if (UseLoadInstructionsForStackBangingPPC64) {
__ ld(R0, lo, tmp);
} else {
__ std(R0, lo, tmp);
}
} else {
ShouldNotReachHere();
}
bang_offset += page_size;
}
// R11 trashed
} // C->output()->need_stack_bang(framesize)
unsigned int bytes = (unsigned int)framesize;
long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
ciMethod *currMethod = C->method();
if (!method_is_frameless) {
// Get callers sp.
__ mr(callers_sp, R1_SP);
// Push method's frame, modifies SP.
assert(Assembler::is_uimm(framesize, 32U), "wrong type");
// The ABI is already accounted for in 'framesize' via the
// 'out_preserve' area.
Register tmp = push_frame_temp;
// Had to insert code of push_frame((unsigned int)framesize, push_frame_temp).
if (Assembler::is_simm(-offset, 16)) {
__ stdu(R1_SP, -offset, R1_SP);
} else {
long x = -offset;
// Had to insert load_const(tmp, -offset).
__ lis( tmp, (int)((signed short)(((x >> 32) & 0xffff0000) >> 16)));
__ ori( tmp, tmp, ((x >> 32) & 0x0000ffff));
__ sldi(tmp, tmp, 32);
__ oris(tmp, tmp, (x & 0xffff0000) >> 16);
__ ori( tmp, tmp, (x & 0x0000ffff));
__ stdux(R1_SP, R1_SP, tmp);
}
}
#if 0 // TODO: PPC port
// For testing large constant pools, emit a lot of constants to constant pool.
// "Randomize" const_size.
if (ConstantsALot) {
const int num_consts = const_size();
for (int i = 0; i < num_consts; i++) {
__ long_constant(0xB0B5B00BBABE);
}
}
#endif
if (!method_is_frameless) {
// Save return pc.
__ std(return_pc, _abi0(lr), callers_sp);
}
if (C->stub_function() == NULL) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(&_masm, push_frame_temp);
}
C->output()->set_frame_complete(cbuf.insts_size());
}
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
// Variable size. determine dynamically.
return MachNode::size(ra_);
}
int MachPrologNode::reloc() const {
// Return number of relocatable values contained in this instruction.
return 1; // 1 reloc entry for load_const(toc).
}
//=============================================================================
#ifndef PRODUCT
void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
Compile* C = ra_->C;
st->print("EPILOG\n\t");
st->print("restore return pc\n\t");
st->print("pop frame\n\t");
if (do_polling() && C->is_method_compilation()) {
st->print("safepoint poll\n\t");
}
}
#endif
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
const long framesize = ((long)C->output()->frame_slots()) << LogBytesPerInt;
assert(framesize >= 0, "negative frame-size?");
const bool method_needs_polling = do_polling() && C->is_method_compilation();
const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/;
const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().
const Register temp = R12;
if (!method_is_frameless) {
// Restore return pc relative to callers' sp.
__ ld(return_pc, ((int)framesize) + _abi0(lr), R1_SP);
// Move return pc to LR.
__ mtlr(return_pc);
// Pop frame (fixed frame-size).
__ addi(R1_SP, R1_SP, (int)framesize);
}
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check(return_pc);
}
if (method_needs_polling) {
Label dummy_label;
Label* code_stub = &dummy_label;
if (!UseSIGTRAP && !C->output()->in_scratch_emit_size()) {
code_stub = &C->output()->safepoint_poll_table()->add_safepoint(__ offset());
__ relocate(relocInfo::poll_return_type);
}
__ safepoint_poll(*code_stub, temp, true /* at_return */, true /* in_nmethod */);
}
}
uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
// Variable size. Determine dynamically.
return MachNode::size(ra_);
}
int MachEpilogNode::reloc() const {
// Return number of relocatable values contained in this instruction.
return 1; // 1 for load_from_polling_page.
}
const Pipeline * MachEpilogNode::pipeline() const {
return MachNode::pipeline_class();
}
// =============================================================================
// Figure out which register class each belongs in: rc_int, rc_float, rc_vs or
// rc_stack.
enum RC { rc_bad, rc_int, rc_float, rc_vs, rc_stack };
static enum RC rc_class(OptoReg::Name reg) {
// Return the register class for the given register. The given register
// reg is a <register>_num value, which is an index into the MachRegisterNumbers
// enumeration in adGlobals_ppc.hpp.
if (reg == OptoReg::Bad) return rc_bad;
// We have 64 integer register halves, starting at index 0.
if (reg < 64) return rc_int;
// We have 64 floating-point register halves, starting at index 64.
if (reg < 64+64) return rc_float;
// We have 64 vector-scalar registers, starting at index 128.
if (reg < 64+64+64) return rc_vs;
// Between float regs & stack are the flags regs.
assert(OptoReg::is_stack(reg) || reg < 64+64+64, "blow up if spilling flags");
return rc_stack;
}
static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int reg, int offset,
bool do_print, Compile* C, outputStream *st) {
assert(opcode == Assembler::LD_OPCODE ||
opcode == Assembler::STD_OPCODE ||
opcode == Assembler::LWZ_OPCODE ||
opcode == Assembler::STW_OPCODE ||
opcode == Assembler::LFD_OPCODE ||
opcode == Assembler::STFD_OPCODE ||
opcode == Assembler::LFS_OPCODE ||
opcode == Assembler::STFS_OPCODE,
"opcode not supported");
if (cbuf) {
int d =
(Assembler::LD_OPCODE == opcode || Assembler::STD_OPCODE == opcode) ?
Assembler::ds(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/)
: Assembler::d1(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/); // Makes no difference in opt build.
emit_long(*cbuf, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP));
}
#ifndef PRODUCT
else if (do_print) {
st->print("%-7s %s, [R1_SP + #%d+%d] \t// spill copy",
op_str,
Matcher::regName[reg],
offset, 0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/);
}
#endif
return 4; // size
}
uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
Compile* C = ra_->C;
// Get registers to move.
OptoReg::Name src_hi = ra_->get_reg_second(in(1));
OptoReg::Name src_lo = ra_->get_reg_first(in(1));
OptoReg::Name dst_hi = ra_->get_reg_second(this);
OptoReg::Name dst_lo = ra_->get_reg_first(this);
enum RC src_hi_rc = rc_class(src_hi);
enum RC src_lo_rc = rc_class(src_lo);
enum RC dst_hi_rc = rc_class(dst_hi);
enum RC dst_lo_rc = rc_class(dst_lo);
assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
if (src_hi != OptoReg::Bad)
assert((src_lo&1)==0 && src_lo+1==src_hi &&
(dst_lo&1)==0 && dst_lo+1==dst_hi,
"expected aligned-adjacent pairs");
// Generate spill code!
int size = 0;
if (src_lo == dst_lo && src_hi == dst_hi)
return size; // Self copy, no move.
if (bottom_type()->isa_vect() != NULL && ideal_reg() == Op_VecX) {
// Memory->Memory Spill.
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
__ ld(R0, src_offset, R1_SP);
__ std(R0, dst_offset, R1_SP);
__ ld(R0, src_offset+8, R1_SP);
__ std(R0, dst_offset+8, R1_SP);
}
size += 16;
}
// VectorSRegister->Memory Spill.
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) {
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
int dst_offset = ra_->reg2offset(dst_lo);
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
__ addi(R0, R1_SP, dst_offset);
__ stxvd2x(Rsrc, R0);
}
size += 8;
}
// Memory->VectorSRegister Spill.
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) {
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
int src_offset = ra_->reg2offset(src_lo);
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
__ addi(R0, R1_SP, src_offset);
__ lxvd2x(Rdst, R0);
}
size += 8;
}
// VectorSRegister->VectorSRegister.
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) {
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
__ xxlor(Rdst, Rsrc, Rsrc);
}
size += 4;
}
else {
ShouldNotReachHere(); // No VSR spill.
}
return size;
}
// --------------------------------------
// Memory->Memory Spill. Use R0 to hold the value.
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_stack && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "LD ", Assembler::LD_OPCODE, R0_num, src_offset, !do_size, C, st);
if (!cbuf && !do_size) st->print("\n\t");
size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st);
if (!cbuf && !do_size) st->print("\n\t");
size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st);
}
return size;
}
// --------------------------------------
// Check for float->int copy; requires a trip through memory.
if (src_lo_rc == rc_float && dst_lo_rc == rc_int) {
Unimplemented();
}
// --------------------------------------
// Check for integer reg-reg copy.
if (src_lo_rc == rc_int && dst_lo_rc == rc_int) {
Register Rsrc = as_Register(Matcher::_regEncode[src_lo]);
Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
size = (Rsrc != Rdst) ? 4 : 0;
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
if (size) {
__ mr(Rdst, Rsrc);
}
}
#ifndef PRODUCT
else if (!do_size) {
if (size) {
st->print("%-7s %s, %s \t// spill copy", "MR", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
} else {
st->print("%-7s %s, %s \t// spill copy", "MR-NOP", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
}
}
#endif
return size;
}
// Check for integer store.
if (src_lo_rc == rc_int && dst_lo_rc == rc_stack) {
int dst_offset = ra_->reg2offset(dst_lo);
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_int && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st);
}
return size;
}
// Check for integer load.
if (dst_lo_rc == rc_int && src_lo_rc == rc_stack) {
int src_offset = ra_->reg2offset(src_lo);
if (src_hi != OptoReg::Bad) {
assert(dst_hi_rc==rc_int && src_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "LD ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st);
}
return size;
}
// Check for float reg-reg copy.
if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
__ fmr(Rdst, Rsrc);
}
#ifndef PRODUCT
else if (!do_size) {
st->print("%-7s %s, %s \t// spill copy", "FMR", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
}
#endif
return 4;
}
// Check for float store.
if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
int dst_offset = ra_->reg2offset(dst_lo);
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_float && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st);
}
return size;
}
// Check for float load.
if (dst_lo_rc == rc_float && src_lo_rc == rc_stack) {
int src_offset = ra_->reg2offset(src_lo);
if (src_hi != OptoReg::Bad) {
assert(dst_hi_rc==rc_float && src_hi_rc==rc_stack,
"expected same type of move for high parts");
size += ld_st_helper(cbuf, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st);
} else {
size += ld_st_helper(cbuf, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st);
}
return size;
}
// --------------------------------------------------------------------
// Check for hi bits still needing moving. Only happens for misaligned
// arguments to native calls.
if (src_hi == dst_hi)
return size; // Self copy; no move.
assert(src_hi_rc != rc_bad && dst_hi_rc != rc_bad, "src_hi & dst_hi cannot be Bad");
ShouldNotReachHere(); // Unimplemented
return 0;
}
#ifndef PRODUCT
void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
if (!ra_)
st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
else
implementation(NULL, ra_, false, st);
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation(&cbuf, ra_, false, NULL);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
return implementation(NULL, ra_, true, NULL);
}
#ifndef PRODUCT
void MachNopNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
st->print("NOP \t// %d nops to pad for loops or prefixed instructions.", _count);
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *)
C2_MacroAssembler _masm(&cbuf);
// _count contains the number of nops needed for padding.
for (int i = 0; i < _count; i++) {
__ nop();
}
}
uint MachNopNode::size(PhaseRegAlloc *ra_) const {
return _count * 4;
}
#ifndef PRODUCT
void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
char reg_str[128];
ra_->dump_register(this, reg_str);
st->print("ADDI %s, SP, %d \t// box node", reg_str, offset);
}
#endif
void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
if (Assembler::is_simm(offset, 16)) {
__ addi(as_Register(reg), R1, offset);
} else {
ShouldNotReachHere();
}
}
uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
// BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
return 4;
}
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
st->print_cr("---- MachUEPNode ----");
st->print_cr("...");
}
#endif
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
// Inline_cache contains a klass.
Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
Register receiver_klass = R12_scratch2; // tmp
assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
--> --------------------
--> maximum size reached
--> --------------------
[ Original von:0.493Diese Quellcodebibliothek enthält Beispiele in vielen Programmiersprachen.
Man kann per Verzeichnistruktur darin navigieren.
Der Code wird farblich markiert angezeigt.
]
|
|