|
|
Quellcode-Bibliothek
© Kompilation durch diese Firma
[Weder Korrektheit noch Funktionsfähigkeit der Software werden zugesichert.]
Datei:
Sprache: Unknown
Spracherkennung für: .ad vermutete Sprache: SML {SML[191] C[220] BAT[542]} [Methode: Schwerpunktbildung, einfache Gewichte, sechs Dimensionen] //
// Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2017, 2022 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
// z/Architecture Architecture Description File
// Major contributions by AS, JL, LS.
//
// Following information is derived from private mail communication
// (Oct. 2011).
//
// General branch target alignment considerations
//
// z/Architecture does not imply a general branch target alignment requirement.
// There are side effects and side considerations, though, which may
// provide some performance benefit. These are:
// - Align branch target on octoword (32-byte) boundary
// On more recent models (from z9 on), I-fetch is done on a Octoword
// (32 bytes at a time) basis. To avoid I-fetching unnecessary
// instructions, branch targets should be 32-byte aligend. If this
// exact alignment cannot be achieved, having the branch target in
// the first doubleword still provides some benefit.
// - Avoid branch targets at the end of cache lines (> 64 bytes distance).
// Sequential instruction prefetching after the branch target starts
// immediately after having fetched the octoword containing the
// branch target. When I-fetching crosses a cache line, there may be
// a small stall. The worst case: the branch target (at the end of
// a cache line) is a L1 I-cache miss and the next line as well.
// Then, the entire target line must be filled first (to continue at the
// branch target). Only then can the next sequential line be filled.
// - Avoid multiple poorly predicted branches in a row.
//
//----------REGISTER DEFINITION BLOCK------------------------------------------
// This information is used by the matcher and the register allocator to
// describe individual registers and classes of registers within the target
// architecture.
register %{
//----------Architecture Description Register Definitions----------------------
// General Registers
// "reg_def" name (register save type, C convention save type,
// ideal register type, encoding);
//
// Register Save Types:
//
// NS = No-Save: The register allocator assumes that these registers
// can be used without saving upon entry to the method, &
// that they do not need to be saved at call sites.
//
// SOC = Save-On-Call: The register allocator assumes that these registers
// can be used without saving upon entry to the method,
// but that they must be saved at call sites.
//
// SOE = Save-On-Entry: The register allocator assumes that these registers
// must be saved before using them upon entry to the
// method, but they do not need to be saved at call sites.
//
// AS = Always-Save: The register allocator assumes that these registers
// must be saved before using them upon entry to the
// method, & that they must be saved at call sites.
//
// Ideal Register Type is used to determine how to save & restore a
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
//
// The encoding number is the actual bit-pattern placed into the opcodes.
// z/Architecture register definitions, based on the z/Architecture Principles
// of Operation, 5th Edition, September 2005, and z/Linux Elf ABI Supplement,
// 5th Edition, March 2001.
//
// For each 64-bit register we must define two registers: the register
// itself, e.g. Z_R3, and a corresponding virtual other (32-bit-)'half',
// e.g. Z_R3_H, which is needed by the allocator, but is not used
// for stores, loads, etc.
// Integer/Long Registers
// ----------------------------
// z/Architecture has 16 64-bit integer registers.
// types: v = volatile, nv = non-volatile, s = system
reg_def Z_R0 (SOC, SOC, Op_RegI, 0, Z_R0->as_VMReg()); // v scratch1
reg_def Z_R0_H (SOC, SOC, Op_RegI, 99, Z_R0->as_VMReg()->next());
reg_def Z_R1 (SOC, SOC, Op_RegI, 1, Z_R1->as_VMReg()); // v scratch2
reg_def Z_R1_H (SOC, SOC, Op_RegI, 99, Z_R1->as_VMReg()->next());
reg_def Z_R2 (SOC, SOC, Op_RegI, 2, Z_R2->as_VMReg()); // v iarg1 & iret
reg_def Z_R2_H (SOC, SOC, Op_RegI, 99, Z_R2->as_VMReg()->next());
reg_def Z_R3 (SOC, SOC, Op_RegI, 3, Z_R3->as_VMReg()); // v iarg2
reg_def Z_R3_H (SOC, SOC, Op_RegI, 99, Z_R3->as_VMReg()->next());
reg_def Z_R4 (SOC, SOC, Op_RegI, 4, Z_R4->as_VMReg()); // v iarg3
reg_def Z_R4_H (SOC, SOC, Op_RegI, 99, Z_R4->as_VMReg()->next());
reg_def Z_R5 (SOC, SOC, Op_RegI, 5, Z_R5->as_VMReg()); // v iarg4
reg_def Z_R5_H (SOC, SOC, Op_RegI, 99, Z_R5->as_VMReg()->next());
reg_def Z_R6 (SOC, SOE, Op_RegI, 6, Z_R6->as_VMReg()); // v iarg5
reg_def Z_R6_H (SOC, SOE, Op_RegI, 99, Z_R6->as_VMReg()->next());
reg_def Z_R7 (SOC, SOE, Op_RegI, 7, Z_R7->as_VMReg());
reg_def Z_R7_H (SOC, SOE, Op_RegI, 99, Z_R7->as_VMReg()->next());
reg_def Z_R8 (SOC, SOE, Op_RegI, 8, Z_R8->as_VMReg());
reg_def Z_R8_H (SOC, SOE, Op_RegI, 99, Z_R8->as_VMReg()->next());
reg_def Z_R9 (SOC, SOE, Op_RegI, 9, Z_R9->as_VMReg());
reg_def Z_R9_H (SOC, SOE, Op_RegI, 99, Z_R9->as_VMReg()->next());
reg_def Z_R10 (SOC, SOE, Op_RegI, 10, Z_R10->as_VMReg());
reg_def Z_R10_H(SOC, SOE, Op_RegI, 99, Z_R10->as_VMReg()->next());
reg_def Z_R11 (SOC, SOE, Op_RegI, 11, Z_R11->as_VMReg());
reg_def Z_R11_H(SOC, SOE, Op_RegI, 99, Z_R11->as_VMReg()->next());
reg_def Z_R12 (SOC, SOE, Op_RegI, 12, Z_R12->as_VMReg());
reg_def Z_R12_H(SOC, SOE, Op_RegI, 99, Z_R12->as_VMReg()->next());
reg_def Z_R13 (SOC, SOE, Op_RegI, 13, Z_R13->as_VMReg());
reg_def Z_R13_H(SOC, SOE, Op_RegI, 99, Z_R13->as_VMReg()->next());
reg_def Z_R14 (NS, NS, Op_RegI, 14, Z_R14->as_VMReg()); // s return_pc
reg_def Z_R14_H(NS, NS, Op_RegI, 99, Z_R14->as_VMReg()->next());
reg_def Z_R15 (NS, NS, Op_RegI, 15, Z_R15->as_VMReg()); // s SP
reg_def Z_R15_H(NS, NS, Op_RegI, 99, Z_R15->as_VMReg()->next());
// Float/Double Registers
// The rules of ADL require that double registers be defined in pairs.
// Each pair must be two 32-bit values, but not necessarily a pair of
// single float registers. In each pair, ADLC-assigned register numbers
// must be adjacent, with the lower number even. Finally, when the
// CPU stores such a register pair to memory, the word associated with
// the lower ADLC-assigned number must be stored to the lower address.
// z/Architecture has 16 64-bit floating-point registers. Each can store a single
// or double precision floating-point value.
// types: v = volatile, nv = non-volatile, s = system
reg_def Z_F0 (SOC, SOC, Op_RegF, 0, Z_F0->as_VMReg()); // v farg1 & fret
reg_def Z_F0_H (SOC, SOC, Op_RegF, 99, Z_F0->as_VMReg()->next());
reg_def Z_F1 (SOC, SOC, Op_RegF, 1, Z_F1->as_VMReg());
reg_def Z_F1_H (SOC, SOC, Op_RegF, 99, Z_F1->as_VMReg()->next());
reg_def Z_F2 (SOC, SOC, Op_RegF, 2, Z_F2->as_VMReg()); // v farg2
reg_def Z_F2_H (SOC, SOC, Op_RegF, 99, Z_F2->as_VMReg()->next());
reg_def Z_F3 (SOC, SOC, Op_RegF, 3, Z_F3->as_VMReg());
reg_def Z_F3_H (SOC, SOC, Op_RegF, 99, Z_F3->as_VMReg()->next());
reg_def Z_F4 (SOC, SOC, Op_RegF, 4, Z_F4->as_VMReg()); // v farg3
reg_def Z_F4_H (SOC, SOC, Op_RegF, 99, Z_F4->as_VMReg()->next());
reg_def Z_F5 (SOC, SOC, Op_RegF, 5, Z_F5->as_VMReg());
reg_def Z_F5_H (SOC, SOC, Op_RegF, 99, Z_F5->as_VMReg()->next());
reg_def Z_F6 (SOC, SOC, Op_RegF, 6, Z_F6->as_VMReg());
reg_def Z_F6_H (SOC, SOC, Op_RegF, 99, Z_F6->as_VMReg()->next());
reg_def Z_F7 (SOC, SOC, Op_RegF, 7, Z_F7->as_VMReg());
reg_def Z_F7_H (SOC, SOC, Op_RegF, 99, Z_F7->as_VMReg()->next());
reg_def Z_F8 (SOC, SOE, Op_RegF, 8, Z_F8->as_VMReg());
reg_def Z_F8_H (SOC, SOE, Op_RegF, 99, Z_F8->as_VMReg()->next());
reg_def Z_F9 (SOC, SOE, Op_RegF, 9, Z_F9->as_VMReg());
reg_def Z_F9_H (SOC, SOE, Op_RegF, 99, Z_F9->as_VMReg()->next());
reg_def Z_F10 (SOC, SOE, Op_RegF, 10, Z_F10->as_VMReg());
reg_def Z_F10_H(SOC, SOE, Op_RegF, 99, Z_F10->as_VMReg()->next());
reg_def Z_F11 (SOC, SOE, Op_RegF, 11, Z_F11->as_VMReg());
reg_def Z_F11_H(SOC, SOE, Op_RegF, 99, Z_F11->as_VMReg()->next());
reg_def Z_F12 (SOC, SOE, Op_RegF, 12, Z_F12->as_VMReg());
reg_def Z_F12_H(SOC, SOE, Op_RegF, 99, Z_F12->as_VMReg()->next());
reg_def Z_F13 (SOC, SOE, Op_RegF, 13, Z_F13->as_VMReg());
reg_def Z_F13_H(SOC, SOE, Op_RegF, 99, Z_F13->as_VMReg()->next());
reg_def Z_F14 (SOC, SOE, Op_RegF, 14, Z_F14->as_VMReg());
reg_def Z_F14_H(SOC, SOE, Op_RegF, 99, Z_F14->as_VMReg()->next());
reg_def Z_F15 (SOC, SOE, Op_RegF, 15, Z_F15->as_VMReg());
reg_def Z_F15_H(SOC, SOE, Op_RegF, 99, Z_F15->as_VMReg()->next());
// Special Registers
// Condition Codes Flag Registers
// z/Architecture has the PSW (program status word) that contains
// (among other information) the condition code. We treat this
// part of the PSW as a condition register CR. It consists of 4
// bits. Floating point instructions influence the same condition register CR.
reg_def Z_CR(SOC, SOC, Op_RegFlags, 0, Z_CR->as_VMReg()); // volatile
// Specify priority of register selection within phases of register
// allocation. Highest priority is first. A useful heuristic is to
// give registers a low priority when they are required by machine
// instructions, and choose no-save registers before save-on-call, and
// save-on-call before save-on-entry. Registers which participate in
// fix calling sequences should come last. Registers which are used
// as pairs must fall on an even boundary.
// It's worth about 1% on SPEC geomean to get this right.
// Chunk0, chunk1, and chunk2 form the MachRegisterNumbers enumeration
// in adGlobals_s390.hpp which defines the <register>_num values, e.g.
// Z_R3_num. Therefore, Z_R3_num may not be (and in reality is not)
// the same as Z_R3->encoding()! Furthermore, we cannot make any
// assumptions on ordering, e.g. Z_R3_num may be less than Z_R2_num.
// Additionally, the function
// static enum RC rc_class(OptoReg::Name reg)
// maps a given <register>_num value to its chunk type (except for flags)
// and its current implementation relies on chunk0 and chunk1 having a
// size of 64 each.
alloc_class chunk0(
// chunk0 contains *all* 32 integer registers halves.
// potential SOE regs
Z_R13,Z_R13_H,
Z_R12,Z_R12_H,
Z_R11,Z_R11_H,
Z_R10,Z_R10_H,
Z_R9,Z_R9_H,
Z_R8,Z_R8_H,
Z_R7,Z_R7_H,
Z_R1,Z_R1_H,
Z_R0,Z_R0_H,
// argument registers
Z_R6,Z_R6_H,
Z_R5,Z_R5_H,
Z_R4,Z_R4_H,
Z_R3,Z_R3_H,
Z_R2,Z_R2_H,
// special registers
Z_R14,Z_R14_H,
Z_R15,Z_R15_H
);
alloc_class chunk1(
// Chunk1 contains *all* 64 floating-point registers halves.
Z_F15,Z_F15_H,
Z_F14,Z_F14_H,
Z_F13,Z_F13_H,
Z_F12,Z_F12_H,
Z_F11,Z_F11_H,
Z_F10,Z_F10_H,
Z_F9,Z_F9_H,
Z_F8,Z_F8_H,
// scratch register
Z_F7,Z_F7_H,
Z_F5,Z_F5_H,
Z_F3,Z_F3_H,
Z_F1,Z_F1_H,
// argument registers
Z_F6,Z_F6_H,
Z_F4,Z_F4_H,
Z_F2,Z_F2_H,
Z_F0,Z_F0_H
);
alloc_class chunk2(
Z_CR
);
//-------Architecture Description Register Classes-----------------------
// Several register classes are automatically defined based upon
// information in this architecture description.
// 1) reg_class inline_cache_reg (as defined in frame section)
// 2) reg_class stack_slots(/* one chunk of stack-based "registers" */)
// Integer Register Classes
reg_class z_int_reg(
/*Z_R0*/ // R0
/*Z_R1*/
Z_R2,
Z_R3,
Z_R4,
Z_R5,
Z_R6,
Z_R7,
/*Z_R8,*/ // Z_thread
Z_R9,
Z_R10,
Z_R11,
Z_R12,
Z_R13
/*Z_R14*/ // return_pc
/*Z_R15*/ // SP
);
reg_class z_no_odd_int_reg(
/*Z_R0*/ // R0
/*Z_R1*/
Z_R2,
Z_R3,
Z_R4,
/*Z_R5,*/ // odd part of fix register pair
Z_R6,
Z_R7,
/*Z_R8,*/ // Z_thread
Z_R9,
Z_R10,
Z_R11,
Z_R12,
Z_R13
/*Z_R14*/ // return_pc
/*Z_R15*/ // SP
);
reg_class z_no_arg_int_reg(
/*Z_R0*/ // R0
/*Z_R1*/ // scratch
/*Z_R2*/
/*Z_R3*/
/*Z_R4*/
/*Z_R5*/
/*Z_R6*/
Z_R7,
/*Z_R8*/ // Z_thread
Z_R9,
Z_R10,
Z_R11,
Z_R12,
Z_R13
/*Z_R14*/ // return_pc
/*Z_R15*/ // SP
);
reg_class z_rarg1_int_reg(Z_R2);
reg_class z_rarg2_int_reg(Z_R3);
reg_class z_rarg3_int_reg(Z_R4);
reg_class z_rarg4_int_reg(Z_R5);
reg_class z_rarg5_int_reg(Z_R6);
// Pointer Register Classes
// 64-bit build means 64-bit pointers means hi/lo pairs.
reg_class z_rarg5_ptrN_reg(Z_R6);
reg_class z_rarg1_ptr_reg(Z_R2_H,Z_R2);
reg_class z_rarg2_ptr_reg(Z_R3_H,Z_R3);
reg_class z_rarg3_ptr_reg(Z_R4_H,Z_R4);
reg_class z_rarg4_ptr_reg(Z_R5_H,Z_R5);
reg_class z_rarg5_ptr_reg(Z_R6_H,Z_R6);
reg_class z_thread_ptr_reg(Z_R8_H,Z_R8);
reg_class z_ptr_reg(
/*Z_R0_H,Z_R0*/ // R0
/*Z_R1_H,Z_R1*/
Z_R2_H,Z_R2,
Z_R3_H,Z_R3,
Z_R4_H,Z_R4,
Z_R5_H,Z_R5,
Z_R6_H,Z_R6,
Z_R7_H,Z_R7,
/*Z_R8_H,Z_R8,*/ // Z_thread
Z_R9_H,Z_R9,
Z_R10_H,Z_R10,
Z_R11_H,Z_R11,
Z_R12_H,Z_R12,
Z_R13_H,Z_R13
/*Z_R14_H,Z_R14*/ // return_pc
/*Z_R15_H,Z_R15*/ // SP
);
reg_class z_lock_ptr_reg(
/*Z_R0_H,Z_R0*/ // R0
/*Z_R1_H,Z_R1*/
Z_R2_H,Z_R2,
Z_R3_H,Z_R3,
Z_R4_H,Z_R4,
/*Z_R5_H,Z_R5,*/
/*Z_R6_H,Z_R6,*/
Z_R7_H,Z_R7,
/*Z_R8_H,Z_R8,*/ // Z_thread
Z_R9_H,Z_R9,
Z_R10_H,Z_R10,
Z_R11_H,Z_R11,
Z_R12_H,Z_R12,
Z_R13_H,Z_R13
/*Z_R14_H,Z_R14*/ // return_pc
/*Z_R15_H,Z_R15*/ // SP
);
reg_class z_no_arg_ptr_reg(
/*Z_R0_H,Z_R0*/ // R0
/*Z_R1_H,Z_R1*/ // scratch
/*Z_R2_H,Z_R2*/
/*Z_R3_H,Z_R3*/
/*Z_R4_H,Z_R4*/
/*Z_R5_H,Z_R5*/
/*Z_R6_H,Z_R6*/
Z_R7_H, Z_R7,
/*Z_R8_H,Z_R8*/ // Z_thread
Z_R9_H,Z_R9,
Z_R10_H,Z_R10,
Z_R11_H,Z_R11,
Z_R12_H,Z_R12,
Z_R13_H,Z_R13
/*Z_R14_H,Z_R14*/ // return_pc
/*Z_R15_H,Z_R15*/ // SP
);
// Special class for storeP instructions, which can store SP or RPC to
// TLS. (Note: Do not generalize this to "any_reg". If you add
// another register, such as FP, to this mask, the allocator may try
// to put a temp in it.)
// Register class for memory access base registers,
// This class is a superset of z_ptr_reg including Z_thread.
reg_class z_memory_ptr_reg(
/*Z_R0_H,Z_R0*/ // R0
/*Z_R1_H,Z_R1*/
Z_R2_H,Z_R2,
Z_R3_H,Z_R3,
Z_R4_H,Z_R4,
Z_R5_H,Z_R5,
Z_R6_H,Z_R6,
Z_R7_H,Z_R7,
Z_R8_H,Z_R8, // Z_thread
Z_R9_H,Z_R9,
Z_R10_H,Z_R10,
Z_R11_H,Z_R11,
Z_R12_H,Z_R12,
Z_R13_H,Z_R13
/*Z_R14_H,Z_R14*/ // return_pc
/*Z_R15_H,Z_R15*/ // SP
);
// Other special pointer regs.
reg_class z_r1_regP(Z_R1_H,Z_R1);
reg_class z_r9_regP(Z_R9_H,Z_R9);
// Long Register Classes
reg_class z_rarg1_long_reg(Z_R2_H,Z_R2);
reg_class z_rarg2_long_reg(Z_R3_H,Z_R3);
reg_class z_rarg3_long_reg(Z_R4_H,Z_R4);
reg_class z_rarg4_long_reg(Z_R5_H,Z_R5);
reg_class z_rarg5_long_reg(Z_R6_H,Z_R6);
// Longs in 1 register. Aligned adjacent hi/lo pairs.
reg_class z_long_reg(
/*Z_R0_H,Z_R0*/ // R0
/*Z_R1_H,Z_R1*/
Z_R2_H,Z_R2,
Z_R3_H,Z_R3,
Z_R4_H,Z_R4,
Z_R5_H,Z_R5,
Z_R6_H,Z_R6,
Z_R7_H,Z_R7,
/*Z_R8_H,Z_R8,*/ // Z_thread
Z_R9_H,Z_R9,
Z_R10_H,Z_R10,
Z_R11_H,Z_R11,
Z_R12_H,Z_R12,
Z_R13_H,Z_R13
/*Z_R14_H,Z_R14,*/ // return_pc
/*Z_R15_H,Z_R15*/ // SP
);
// z_long_reg without even registers
reg_class z_long_odd_reg(
/*Z_R0_H,Z_R0*/ // R0
/*Z_R1_H,Z_R1*/
Z_R3_H,Z_R3,
Z_R5_H,Z_R5,
Z_R7_H,Z_R7,
Z_R9_H,Z_R9,
Z_R11_H,Z_R11,
Z_R13_H,Z_R13
/*Z_R14_H,Z_R14,*/ // return_pc
/*Z_R15_H,Z_R15*/ // SP
);
// Special Class for Condition Code Flags Register
reg_class z_condition_reg(
Z_CR
);
// Scratch register for late profiling. Callee saved.
reg_class z_rscratch2_bits64_reg(Z_R2_H, Z_R2);
// Float Register Classes
reg_class z_flt_reg(
Z_F0,
/*Z_F1,*/ // scratch
Z_F2,
Z_F3,
Z_F4,
Z_F5,
Z_F6,
Z_F7,
Z_F8,
Z_F9,
Z_F10,
Z_F11,
Z_F12,
Z_F13,
Z_F14,
Z_F15
);
reg_class z_rscratch1_flt_reg(Z_F1);
// Double precision float registers have virtual `high halves' that
// are needed by the allocator.
reg_class z_dbl_reg(
Z_F0,Z_F0_H,
/*Z_F1,Z_F1_H,*/ // scratch
Z_F2,Z_F2_H,
Z_F3,Z_F3_H,
Z_F4,Z_F4_H,
Z_F5,Z_F5_H,
Z_F6,Z_F6_H,
Z_F7,Z_F7_H,
Z_F8,Z_F8_H,
Z_F9,Z_F9_H,
Z_F10,Z_F10_H,
Z_F11,Z_F11_H,
Z_F12,Z_F12_H,
Z_F13,Z_F13_H,
Z_F14,Z_F14_H,
Z_F15,Z_F15_H
);
reg_class z_rscratch1_dbl_reg(Z_F1,Z_F1_H);
%}
//----------DEFINITION BLOCK---------------------------------------------------
// Define 'name --> value' mappings to inform the ADLC of an integer valued name.
// Current support includes integer values in the range [0, 0x7FFFFFFF].
// Format:
// int_def <name> (<int_value>, <expression>);
// Generated Code in ad_<arch>.hpp
// #define <name> (<expression>)
// // value == <int_value>
// Generated code in ad_<arch>.cpp adlc_verification()
// assert(<name> == <int_value>, "Expect (<expression>) to equal <int_value>");
//
definitions %{
// The default cost (of an ALU instruction).
int_def DEFAULT_COST ( 100, 100);
int_def DEFAULT_COST_LOW ( 80, 80);
int_def DEFAULT_COST_HIGH ( 120, 120);
int_def HUGE_COST (1000000, 1000000);
// Put an advantage on REG_MEM vs. MEM+REG_REG operations.
int_def ALU_REG_COST ( 100, DEFAULT_COST);
int_def ALU_MEMORY_COST ( 150, 150);
// Memory refs are twice as expensive as run-of-the-mill.
int_def MEMORY_REF_COST_HI ( 220, 2 * DEFAULT_COST+20);
int_def MEMORY_REF_COST ( 200, 2 * DEFAULT_COST);
int_def MEMORY_REF_COST_LO ( 180, 2 * DEFAULT_COST-20);
// Branches are even more expensive.
int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
int_def CALL_COST ( 300, DEFAULT_COST * 3);
%}
source %{
#ifdef PRODUCT
#define BLOCK_COMMENT(str)
#define BIND(label) __ bind(label)
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":")
#endif
#define __ _masm.
#define Z_DISP_SIZE Immediate::is_uimm12((long)opnd_array(1)->disp(ra_,this,2)) ? 4 : 6
#define Z_DISP3_SIZE 6
// Tertiary op of a LoadP or StoreP encoding.
#define REGP_OP true
// Given a register encoding, produce an Integer Register object.
static Register reg_to_register_object(int register_encoding);
// ****************************************************************************
// REQUIRED FUNCTIONALITY
// !!!!! Special hack to get all type of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
void PhaseOutput::pd_perform_mach_node_analysis() {
}
int MachNode::pd_alignment_required() const {
return 1;
}
int MachNode::compute_padding(int current_offset) const {
return 0;
}
int MachCallStaticJavaNode::ret_addr_offset() {
if (_method) {
return 8;
} else {
return MacroAssembler::call_far_patchable_ret_addr_offset();
}
}
int MachCallDynamicJavaNode::ret_addr_offset() {
// Consider size of receiver type profiling (C2 tiers).
int profile_receiver_type_size = 0;
int vtable_index = this->_vtable_index;
if (vtable_index == -4) {
return 14 + profile_receiver_type_size;
} else {
assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
return 36 + profile_receiver_type_size;
}
}
int MachCallRuntimeNode::ret_addr_offset() {
return 12 + MacroAssembler::call_far_patchable_ret_addr_offset();
}
// Compute padding required for nodes which need alignment
//
// The addresses of the call instructions needs to be 4-byte aligned to
// ensure that they don't span a cache line so that they are atomically patchable.
// The actual calls get emitted at different offsets within the node emitters.
// ins_alignment needs to be set to 2 which means that up to 1 nop may get inserted.
int CallStaticJavaDirect_dynTOCNode::compute_padding(int current_offset) const {
return (0 - current_offset) & 2;
}
int CallDynamicJavaDirect_dynTOCNode::compute_padding(int current_offset) const {
return (6 - current_offset) & 2;
}
int CallRuntimeDirectNode::compute_padding(int current_offset) const {
return (12 - current_offset) & 2;
}
int CallLeafDirectNode::compute_padding(int current_offset) const {
return (12 - current_offset) & 2;
}
int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
return (12 - current_offset) & 2;
}
void emit_nop(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
__ z_nop();
}
// Emit an interrupt that is caught by the debugger (for debugging compiler).
void emit_break(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
__ z_illtrap();
}
#if !defined(PRODUCT)
void MachBreakpointNode::format(PhaseRegAlloc *, outputStream *os) const {
os->print("TA");
}
#endif
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_break(cbuf);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
static inline void z_emit16(CodeBuffer &cbuf, long value) {
C2_MacroAssembler _masm(&cbuf);
__ emit_instruction((unsigned long)value, 2);
}
static inline void z_emit32(CodeBuffer &cbuf, long value) {
C2_MacroAssembler _masm(&cbuf);
__ emit_instruction((unsigned long)value, 4);
}
static inline void z_emit48(CodeBuffer &cbuf, long value) {
C2_MacroAssembler _masm(&cbuf);
__ emit_instruction((unsigned long)value, 6);
}
static inline unsigned int z_emit_inst(CodeBuffer &cbuf, long value) {
if (value < 0) {
// There obviously has been an unintended sign extension (int->long). Revert it.
value = (long)((unsigned long)((unsigned int)value));
}
C2_MacroAssembler _masm(&cbuf);
int len = __ emit_instruction((unsigned long)value, 0);
return len;
}
// Check effective address (at runtime) for required alignment.
static inline void z_assert_aligned(CodeBuffer &cbuf, int disp, Register index, Register base, int alignment) {
C2_MacroAssembler _masm(&cbuf);
__ z_lay(Z_R0, disp, index, base);
__ z_nill(Z_R0, alignment-1);
__ z_brc(Assembler::bcondEqual, +3);
__ z_illtrap();
}
int emit_call_reloc(C2_MacroAssembler &_masm, intptr_t entry_point, relocInfo::relocType rtype,
PhaseRegAlloc* ra_, bool is_native_call = false) {
__ set_inst_mark(); // Used in z_enc_java_static_call() and emit_java_to_interp().
address old_mark = __ inst_mark();
unsigned int start_off = __ offset();
if (is_native_call) {
ShouldNotReachHere();
}
if (rtype == relocInfo::runtime_call_w_cp_type) {
assert((__ offset() & 2) == 0, "misaligned emit_call_reloc");
address call_addr = __ call_c_opt((address)entry_point);
if (call_addr == NULL) {
Compile::current()->env()->record_out_of_memory_failure();
return -1;
}
} else {
assert(rtype == relocInfo::none || rtype == relocInfo::opt_virtual_call_type ||
rtype == relocInfo::static_call_type, "unexpected rtype");
__ relocate(rtype);
// BRASL must be prepended with a nop to identify it in the instruction stream.
__ z_nop();
__ z_brasl(Z_R14, (address)entry_point);
}
unsigned int ret_off = __ offset();
return (ret_off - start_off);
}
static int emit_call_reloc(C2_MacroAssembler &_masm, intptr_t entry_point, RelocationHolder const& rspec) {
__ set_inst_mark(); // Used in z_enc_java_static_call() and emit_java_to_interp().
address old_mark = __ inst_mark();
unsigned int start_off = __ offset();
relocInfo::relocType rtype = rspec.type();
assert(rtype == relocInfo::opt_virtual_call_type || rtype == relocInfo::static_call_type,
"unexpected rtype");
__ relocate(rspec);
__ z_nop();
__ z_brasl(Z_R14, (address)entry_point);
unsigned int ret_off = __ offset();
return (ret_off - start_off);
}
//=============================================================================
const RegMask& MachConstantBaseNode::_out_RegMask = _Z_PTR_REG_mask;
int ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
}
bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
ShouldNotReachHere();
}
// Even with PC-relative TOC addressing, we still need this node.
// Float loads/stores do not support PC-relative addresses.
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
C2_MacroAssembler _masm(&cbuf);
Register Rtoc = as_Register(ra_->get_encode(this));
__ load_toc(Rtoc);
}
uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
// PCrelative TOC access.
return 6; // sizeof(LARL)
}
#if !defined(PRODUCT)
void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
Register r = as_Register(ra_->get_encode(this));
st->print("LARL %s,&constant_pool # MachConstantBaseNode", r->name());
}
#endif
//=============================================================================
#include "gc/shared/barrierSetAssembler.hpp"
#if !defined(PRODUCT)
void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
Compile* C = ra_->C;
st->print_cr("--- MachPrologNode ---");
st->print("\t");
for (int i = 0; i < OptoPrologueNops; i++) {
st->print_cr("NOP"); st->print("\t");
}
if (VerifyThread) {
st->print_cr("Verify_Thread");
st->print("\t");
}
long framesize = C->output()->frame_size_in_bytes();
int bangsize = C->output()->bang_size_in_bytes();
// Calls to C2R adapters often do not accept exceptional returns.
// We require that their callers must bang for them. But be
// careful, because some VM calls (such as call site linkage) can
// use several kilobytes of stack. But the stack safety zone should
// account for that. See bugs 4446381, 4468289, 4497237.
if (C->output()->need_stack_bang(bangsize)) {
st->print_cr("# stack bang"); st->print("\t");
}
st->print_cr("push_frame %d", (int)-framesize);
st->print("\t");
if (C->stub_function() == NULL) {
st->print("nmethod entry barrier\n\t");
}
}
#endif
void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
C2_MacroAssembler _masm(&cbuf);
__ verify_thread();
size_t framesize = C->output()->frame_size_in_bytes();
size_t bangsize = C->output()->bang_size_in_bytes();
assert(framesize % wordSize == 0, "must preserve wordSize alignment");
if (C->clinit_barrier_on_entry()) {
assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
Label L_skip_barrier;
Register klass = Z_R1_scratch;
// Notify OOP recorder (don't need the relocation)
AddressLiteral md = __ constant_metadata_address(C->method()->holder()->constant_encoding());
__ load_const_optimized(klass, md.value());
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
__ z_br(klass);
__ bind(L_skip_barrier);
}
// Calls to C2R adapters often do not accept exceptional returns.
// We require that their callers must bang for them. But be
// careful, because some VM calls (such as call site linkage) can
// use several kilobytes of stack. But the stack safety zone should
// account for that. See bugs 4446381, 4468289, 4497237.
if (C->output()->need_stack_bang(bangsize)) {
__ generate_stack_overflow_check(bangsize);
}
assert(Immediate::is_uimm32((long)framesize), "to do: choose suitable types!");
__ save_return_pc();
// The z/Architecture abi is already accounted for in `framesize' via the
// 'out_preserve_stack_slots' declaration.
__ push_frame((unsigned int)framesize/*includes JIT ABI*/);
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
// emitted before MachConstantBaseNode.
ConstantTable& constant_table = C->output()->constant_table();
constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
}
if (C->stub_function() == NULL) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(&_masm);
}
C->output()->set_frame_complete(cbuf.insts_size());
}
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
// Variable size. Determine dynamically.
return MachNode::size(ra_);
}
int MachPrologNode::reloc() const {
// Return number of relocatable values contained in this instruction.
return 1; // One reloc entry for load_const(toc).
}
//=============================================================================
#if !defined(PRODUCT)
void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
os->print_cr("epilog");
os->print("\t");
if (do_polling() && ra_->C->is_method_compilation()) {
os->print_cr("load_from_polling_page Z_R1_scratch");
os->print("\t");
}
}
#endif
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
Compile* C = ra_->C;
__ verify_thread();
// If this does safepoint polling, then do it here.
bool need_polling = do_polling() && C->is_method_compilation();
// Pop frame, restore return_pc, and all stuff needed by interpreter.
int frame_size_in_bytes = Assembler::align((C->output()->frame_slots() << LogBytesPerInt), frame::alignment_in_bytes);
__ pop_frame_restore_retPC(frame_size_in_bytes);
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check(Z_R14);
}
// Touch the polling page.
if (need_polling) {
__ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::polling_page_offset()));
// We need to mark the code position where the load from the safepoint
// polling page was emitted as relocInfo::poll_return_type here.
__ relocate(relocInfo::poll_return_type);
__ load_from_polling_page(Z_R1_scratch);
}
}
uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
// Variable size. determine dynamically.
return MachNode::size(ra_);
}
int MachEpilogNode::reloc() const {
// Return number of relocatable values contained in this instruction.
return 1; // One for load_from_polling_page.
}
const Pipeline * MachEpilogNode::pipeline() const {
return MachNode::pipeline_class();
}
//=============================================================================
// Figure out which register class each belongs in: rc_int, rc_float, rc_stack.
enum RC { rc_bad, rc_int, rc_float, rc_stack };
static enum RC rc_class(OptoReg::Name reg) {
// Return the register class for the given register. The given register
// reg is a <register>_num value, which is an index into the MachRegisterNumbers
// enumeration in adGlobals_s390.hpp.
if (reg == OptoReg::Bad) {
return rc_bad;
}
// We have 32 integer register halves, starting at index 0.
if (reg < 32) {
return rc_int;
}
// We have 32 floating-point register halves, starting at index 32.
if (reg < 32+32) {
return rc_float;
}
// Between float regs & stack are the flags regs.
assert(reg >= OptoReg::stack0(), "blow up if spilling flags");
return rc_stack;
}
// Returns size as obtained from z_emit_instr.
static unsigned int z_ld_st_helper(CodeBuffer *cbuf, const char *op_str, unsigned long opcode,
int reg, int offset, bool do_print, outputStream *os) {
if (cbuf) {
if (opcode > (1L<<32)) {
return z_emit_inst(*cbuf, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 48) |
Assembler::simm20(offset) | Assembler::reg(Z_R0, 12, 48) | Assembler::regz(Z_SP, 16, 48));
} else {
return z_emit_inst(*cbuf, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 32) |
Assembler::uimm12(offset, 20, 32) | Assembler::reg(Z_R0, 12, 32) | Assembler::regz(Z_SP, 16, 32));
}
}
#if !defined(PRODUCT)
if (do_print) {
os->print("%s %s,#%d[,SP]\t # MachCopy spill code",op_str, Matcher::regName[reg], offset);
}
#endif
return (opcode > (1L << 32)) ? 6 : 4;
}
static unsigned int z_mvc_helper(CodeBuffer *cbuf, int len, int dst_off, int src_off, bool do_print, outputStream *os) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
__ z_mvc(dst_off, len-1, Z_SP, src_off, Z_SP);
}
#if !defined(PRODUCT)
else if (do_print) {
os->print("MVC %d(%d,SP),%d(SP)\t # MachCopy spill code",dst_off, len, src_off);
}
#endif
return 6;
}
uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *os) const {
// Get registers to move.
OptoReg::Name src_hi = ra_->get_reg_second(in(1));
OptoReg::Name src_lo = ra_->get_reg_first(in(1));
OptoReg::Name dst_hi = ra_->get_reg_second(this);
OptoReg::Name dst_lo = ra_->get_reg_first(this);
enum RC src_hi_rc = rc_class(src_hi);
enum RC src_lo_rc = rc_class(src_lo);
enum RC dst_hi_rc = rc_class(dst_hi);
enum RC dst_lo_rc = rc_class(dst_lo);
assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
bool is64 = (src_hi_rc != rc_bad);
assert(!is64 ||
((src_lo&1) == 0 && src_lo+1 == src_hi && (dst_lo&1) == 0 && dst_lo+1 == dst_hi),
"expected aligned-adjacent pairs");
// Generate spill code!
if (src_lo == dst_lo && src_hi == dst_hi) {
return 0; // Self copy, no move.
}
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
bool print = !do_size;
bool src12 = Immediate::is_uimm12(src_offset);
bool dst12 = Immediate::is_uimm12(dst_offset);
const char *mnemo = NULL;
unsigned long opc = 0;
// Memory->Memory Spill. Use Z_R0 to hold the value.
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
assert(!is64 || (src_hi_rc==rc_stack && dst_hi_rc==rc_stack),
"expected same type of move for high parts");
if (src12 && dst12) {
return z_mvc_helper(cbuf, is64 ? 8 : 4, dst_offset, src_offset, print, os);
}
int r0 = Z_R0_num;
if (is64) {
return z_ld_st_helper(cbuf, "LG ", LG_ZOPC, r0, src_offset, print, os) +
z_ld_st_helper(cbuf, "STG ", STG_ZOPC, r0, dst_offset, print, os);
}
return z_ld_st_helper(cbuf, "LY ", LY_ZOPC, r0, src_offset, print, os) +
z_ld_st_helper(cbuf, "STY ", STY_ZOPC, r0, dst_offset, print, os);
}
// Check for float->int copy. Requires a trip through memory.
if (src_lo_rc == rc_float && dst_lo_rc == rc_int) {
Unimplemented(); // Unsafe, do not remove!
}
// Check for integer reg-reg copy.
if (src_lo_rc == rc_int && dst_lo_rc == rc_int) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
Register Rsrc = as_Register(Matcher::_regEncode[src_lo]);
Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
__ z_lgr(Rdst, Rsrc);
return 4;
}
#if !defined(PRODUCT)
// else
if (print) {
os->print("LGR %s,%s\t # MachCopy spill code", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
}
#endif
return 4;
}
// Check for integer store.
if (src_lo_rc == rc_int && dst_lo_rc == rc_stack) {
assert(!is64 || (src_hi_rc==rc_int && dst_hi_rc==rc_stack),
"expected same type of move for high parts");
if (is64) {
return z_ld_st_helper(cbuf, "STG ", STG_ZOPC, src_lo, dst_offset, print, os);
}
// else
mnemo = dst12 ? "ST " : "STY ";
opc = dst12 ? ST_ZOPC : STY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
}
// Check for integer load
// Always load cOops zero-extended. That doesn't hurt int loads.
if (dst_lo_rc == rc_int && src_lo_rc == rc_stack) {
assert(!is64 || (dst_hi_rc==rc_int && src_hi_rc==rc_stack),
"expected same type of move for high parts");
mnemo = is64 ? "LG " : "LLGF";
opc = is64 ? LG_ZOPC : LLGF_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
}
// Check for float reg-reg copy.
if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
if (cbuf) {
C2_MacroAssembler _masm(cbuf);
FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
__ z_ldr(Rdst, Rsrc);
return 2;
}
#if !defined(PRODUCT)
// else
if (print) {
os->print("LDR %s,%s\t # MachCopy spill code", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
}
#endif
return 2;
}
// Check for float store.
if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
assert(!is64 || (src_hi_rc==rc_float && dst_hi_rc==rc_stack),
"expected same type of move for high parts");
if (is64) {
mnemo = dst12 ? "STD " : "STDY ";
opc = dst12 ? STD_ZOPC : STDY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
}
// else
mnemo = dst12 ? "STE " : "STEY ";
opc = dst12 ? STE_ZOPC : STEY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
}
// Check for float load.
if (dst_lo_rc == rc_float && src_lo_rc == rc_stack) {
assert(!is64 || (dst_hi_rc==rc_float && src_hi_rc==rc_stack),
"expected same type of move for high parts");
if (is64) {
mnemo = src12 ? "LD " : "LDY ";
opc = src12 ? LD_ZOPC : LDY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
}
// else
mnemo = src12 ? "LE " : "LEY ";
opc = src12 ? LE_ZOPC : LEY_ZOPC;
return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
}
// --------------------------------------------------------------------
// Check for hi bits still needing moving. Only happens for misaligned
// arguments to native calls.
if (src_hi == dst_hi) {
return 0; // Self copy, no move.
}
assert(is64 && dst_hi_rc != rc_bad, "src_hi & dst_hi cannot be Bad");
Unimplemented(); // Unsafe, do not remove!
return 0; // never reached, but make the compiler shut up!
}
#if !defined(PRODUCT)
void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
if (ra_ && ra_->node_regs_max_index() > 0) {
implementation(NULL, ra_, false, os);
} else {
if (req() == 2 && in(1)) {
os->print("N%d = N%d\n", _idx, in(1)->_idx);
} else {
const char *c = "(";
os->print("N%d = ", _idx);
for (uint i = 1; i < req(); ++i) {
os->print("%sN%d", c, in(i)->_idx);
c = ", ";
}
os->print(")");
}
}
}
#endif
void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
implementation(&cbuf, ra_, false, NULL);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
return implementation(NULL, ra_, true, NULL);
}
//=============================================================================
#if !defined(PRODUCT)
void MachNopNode::format(PhaseRegAlloc *, outputStream *os) const {
os->print("NOP # pad for alignment (%d nops, %d bytes)", _count, _count*MacroAssembler::nop_size());
}
#endif
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ra_) const {
C2_MacroAssembler _masm(&cbuf);
int rem_space = 0;
if (!(ra_->C->output()->in_scratch_emit_size())) {
rem_space = cbuf.insts()->remaining();
if (rem_space <= _count*2 + 8) {
tty->print("NopNode: _count = %3.3d, remaining space before = %d", _count, rem_space);
}
}
for (int i = 0; i < _count; i++) {
__ z_nop();
}
if (!(ra_->C->output()->in_scratch_emit_size())) {
if (rem_space <= _count*2 + 8) {
int rem_space2 = cbuf.insts()->remaining();
tty->print_cr(", after = %d", rem_space2);
}
}
}
uint MachNopNode::size(PhaseRegAlloc *ra_) const {
return 2 * _count;
}
#if !defined(PRODUCT)
void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
if (ra_ && ra_->node_regs_max_index() > 0) {
int reg = ra_->get_reg_first(this);
os->print("ADDHI %s, SP, %d\t//box node", Matcher::regName[reg], offset);
} else {
os->print("ADDHI N%d = SP + %d\t// box node", _idx, offset);
}
}
#endif
// Take care of the size function, if you make changes here!
void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
__ z_lay(as_Register(reg), offset, Z_SP);
}
uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
// BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
return 6;
}
%} // end source section
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
source_hpp %{
// Header information of the source block.
// Method declarations/definitions which are used outside
// the ad-scope can conveniently be defined here.
//
// To keep related declarations/definitions/uses close together,
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
#include "oops/klass.inline.hpp"
//--------------------------------------------------------------
// Used for optimization in Compile::Shorten_branches
//--------------------------------------------------------------
class CallStubImpl {
public:
// call trampolines
// Size of call trampoline stub. For add'l comments, see size_java_to_interp().
static uint size_call_trampoline() {
return 0; // no call trampolines on this platform
}
// call trampolines
// Number of relocations needed by a call trampoline stub.
static uint reloc_call_trampoline() {
return 0; // No call trampolines on this platform.
}
};
%} // end source_hpp section
source %{
#if !defined(PRODUCT)
void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
os->print_cr("---- MachUEPNode ----");
os->print_cr("\tTA");
os->print_cr("\tload_const Z_R1, SharedRuntime::get_ic_miss_stub()");
os->print_cr("\tBR(Z_R1)");
os->print_cr("\tTA # pad with illtraps");
os->print_cr("\t...");
os->print_cr("\tTA");
os->print_cr("\tLTGR Z_R2, Z_R2");
os->print_cr("\tBRU ic_miss");
}
#endif
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
const int ic_miss_offset = 2;
// Inline_cache contains a klass.
Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
// ARG1 is the receiver oop.
Register R2_receiver = Z_ARG1;
int klass_offset = oopDesc::klass_offset_in_bytes();
AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
Register R1_ic_miss_stub_addr = Z_R1_scratch;
// Null check of receiver.
// This is the null check of the receiver that actually should be
// done in the caller. It's here because in case of implicit null
// checks we get it for free.
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
"second word in oop should not require explicit null check.");
if (!ImplicitNullChecks) {
Label valid;
if (VM_Version::has_CompareBranch()) {
__ z_cgij(R2_receiver, 0, Assembler::bcondNotEqual, valid);
} else {
__ z_ltgr(R2_receiver, R2_receiver);
__ z_bre(valid);
}
// The ic_miss_stub will handle the null pointer exception.
__ load_const_optimized(R1_ic_miss_stub_addr, icmiss);
__ z_br(R1_ic_miss_stub_addr);
__ bind(valid);
}
// Check whether this method is the proper implementation for the class of
// the receiver (ic miss check).
{
Label valid;
// Compare cached class against klass from receiver.
// This also does an implicit null check!
__ compare_klass_ptr(ic_klass, klass_offset, R2_receiver, false);
__ z_bre(valid);
// The inline cache points to the wrong method. Call the
// ic_miss_stub to find the proper method.
__ load_const_optimized(R1_ic_miss_stub_addr, icmiss);
__ z_br(R1_ic_miss_stub_addr);
__ bind(valid);
}
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
// Determine size dynamically.
return MachNode::size(ra_);
}
//=============================================================================
%} // interrupt source section
source_hpp %{ // Header information of the source block.
class HandlerImpl {
public:
static int emit_exception_handler(CodeBuffer &cbuf);
static int emit_deopt_handler(CodeBuffer& cbuf);
static uint size_exception_handler() {
return NativeJump::max_instruction_size();
}
static uint size_deopt_handler() {
return NativeCall::max_instruction_size();
}
};
class Node::PD {
public:
enum NodeFlags {
_last_flag = Node::_last_flag
};
};
%} // end source_hpp section
source %{
// This exception handler code snippet is placed after the method's
// code. It is the return point if an exception occurred. it jumps to
// the exception blob.
//
// If the method gets deoptimized, the method and this code snippet
// get patched.
//
// 1) Trampoline code gets patched into the end of this exception
// handler. the trampoline code jumps to the deoptimization blob.
//
// 2) The return address in the method's code will get patched such
// that it jumps to the trampoline.
//
// 3) The handler will get patched such that it does not jump to the
// exception blob, but to an entry in the deoptimization blob being
// aware of the exception.
int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
Register temp_reg = Z_R1;
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == NULL) {
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
// Use unconditional pc-relative jump with 32-bit range here.
__ load_const_optimized(temp_reg, (address)OptoRuntime::exception_blob()->content_begin());
__ z_br(temp_reg);
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == NULL) {
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
// Size_deopt_handler() must be exact on zarch, so for simplicity
// we do not use load_const_opt here.
__ load_const(Z_R1, SharedRuntime::deopt_blob()->unpack());
__ call(Z_R1);
assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
__ end_a_stub();
return offset;
}
//=============================================================================
// Given a register encoding, produce an Integer Register object.
static Register reg_to_register_object(int register_encoding) {
assert(Z_R12->encoding() == Z_R12_enc, "wrong coding");
return as_Register(register_encoding);
}
const bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode)) {
return false; // no match rule present
}
switch (opcode) {
case Op_ReverseBytesI:
case Op_ReverseBytesL:
return UseByteReverseInstruction;
case Op_PopCountI:
case Op_PopCountL:
// PopCount supported by H/W from z/Architecture G5 (z196) on.
return (UsePopCountInstruction && VM_Version::has_PopCount());
}
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) {
return match_rule_supported_vector(opcode, vlen, bt);
}
const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
return false;
}
return true; // Per default match rules are supported.
}
const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
return false;
}
const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
return false;
}
const RegMask* Matcher::predicate_reg_mask(void) {
return NULL;
}
const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) {
return NULL;
}
// Vector calling convention not yet implemented.
const bool Matcher::supports_vector_calling_convention(void) {
return false;
}
OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
Unimplemented();
return OptoRegPair(0, 0);
}
//----------SUPERWORD HELPERS----------------------------------------
// Vector width in bytes.
const int Matcher::vector_width_in_bytes(BasicType bt) {
assert(MaxVectorSize == 8, "");
return 8;
}
// Vector ideal reg.
const uint Matcher::vector_ideal_reg(int size) {
assert(MaxVectorSize == 8 && size == 8, "");
return Op_RegL;
}
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
assert(is_java_primitive(bt), "only primitive type vectors");
return vector_width_in_bytes(bt)/type2aelembytes(bt);
}
const int Matcher::min_vector_size(const BasicType bt) {
return max_vector_size(bt); // Same as max.
}
const int Matcher::scalable_vector_reg_size(const BasicType bt) {
return -1;
}
// RETURNS: whether this branch offset is short enough that a short
// branch can be used.
//
// If the platform does not provide any short branch variants, then
// this method should return `false' for offset 0.
//
// `Compile::Fill_buffer' will decide on basis of this information
// whether to do the pass `Compile::Shorten_branches' at all.
//
// And `Compile::Shorten_branches' will decide on basis of this
// information whether to replace particular branch sites by short
// ones.
bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
// On zarch short branches use a 16 bit signed immediate that
// is the pc-relative offset in halfword (= 2 bytes) units.
return Assembler::is_within_range_of_RelAddr16((address)((long)offset), (address)0);
}
MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
ShouldNotReachHere(); // generic vector operands not supported
return NULL;
}
bool Matcher::is_reg2reg_move(MachNode* m) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
ShouldNotReachHere(); // generic vector operands not supported
return false;
}
// Constants for c2c and c calling conventions.
const MachRegisterNumbers z_iarg_reg[5] = {
Z_R2_num, Z_R3_num, Z_R4_num, Z_R5_num, Z_R6_num
};
const MachRegisterNumbers z_farg_reg[4] = {
Z_F0_num, Z_F2_num, Z_F4_num, Z_F6_num
};
const int z_num_iarg_registers = sizeof(z_iarg_reg) / sizeof(z_iarg_reg[0]);
const int z_num_farg_registers = sizeof(z_farg_reg) / sizeof(z_farg_reg[0]);
// Return whether or not this register is ever used as an argument. This
// function is used on startup to build the trampoline stubs in generateOptoStub.
// Registers not mentioned will be killed by the VM call in the trampoline, and
// arguments in those registers not be available to the callee.
bool Matcher::can_be_java_arg(int reg) {
// We return true for all registers contained in z_iarg_reg[] and
// z_farg_reg[] and their virtual halves.
// We must include the virtual halves in order to get STDs and LDs
// instead of STWs and LWs in the trampoline stubs.
if (reg == Z_R2_num || reg == Z_R2_H_num ||
reg == Z_R3_num || reg == Z_R3_H_num ||
reg == Z_R4_num || reg == Z_R4_H_num ||
reg == Z_R5_num || reg == Z_R5_H_num ||
reg == Z_R6_num || reg == Z_R6_H_num) {
return true;
}
if (reg == Z_F0_num || reg == Z_F0_H_num ||
reg == Z_F2_num || reg == Z_F2_H_num ||
reg == Z_F4_num || reg == Z_F4_H_num ||
reg == Z_F6_num || reg == Z_F6_H_num) {
return true;
}
return false;
}
bool Matcher::is_spillable_arg(int reg) {
return can_be_java_arg(reg);
}
uint Matcher::int_pressure_limit()
{
// Medium size register set, 6 special purpose regs, 3 SOE regs.
return (INTPRESSURE == -1) ? 10 : INTPRESSURE;
}
uint Matcher::float_pressure_limit()
{
return (FLOATPRESSURE == -1) ? 15 : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
// Register for DIVI projection of divmodI
RegMask Matcher::divI_proj_mask() {
return _Z_RARG4_INT_REG_mask;
}
// Register for MODI projection of divmodI
RegMask Matcher::modI_proj_mask() {
return _Z_RARG3_INT_REG_mask;
}
// Register for DIVL projection of divmodL
RegMask Matcher::divL_proj_mask() {
return _Z_RARG4_LONG_REG_mask;
}
// Register for MODL projection of divmodL
RegMask Matcher::modL_proj_mask() {
return _Z_RARG3_LONG_REG_mask;
}
// Copied from sparc.
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return RegMask();
}
// Should the matcher clone input 'm' of node 'n'?
bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
return false;
}
// Should the Matcher clone shifts on addressing modes, expecting them
// to be subsumed into complex addressing expressions or compute them
// into registers?
bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
return clone_base_plus_offset_address(m, mstack, address_visited);
}
%} // source
//----------ENCODING BLOCK-----------------------------------------------------
// This block specifies the encoding classes used by the compiler to output
// byte streams. Encoding classes are parameterized macros used by
// Machine Instruction Nodes in order to generate the bit encoding of the
// instruction. Operands specify their base encoding interface with the
// interface keyword. There are currently supported four interfaces,
// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
// operand to generate a function which returns its register number when
// queried. CONST_INTER causes an operand to generate a function which
// returns the value of the constant when queried. MEMORY_INTER causes an
// operand to generate four functions which return the Base Register, the
// Index Register, the Scale Value, and the Offset Value of the operand when
// queried. COND_INTER causes an operand to generate six functions which
// return the encoding code (ie - encoding bits for the instruction)
// associated with each basic boolean condition for a conditional instruction.
//
// Instructions specify two basic values for encoding. Again, a function
// is available to check if the constant displacement is an oop. They use the
// ins_encode keyword to specify their encoding classes (which must be
// a sequence of enc_class names, and their parameters, specified in
// the encoding block), and they use the
// opcode keyword to specify, in order, their primary, secondary, and
// tertiary opcode. Only the opcode sections which a particular instruction
// needs for encoding need to be specified.
encode %{
enc_class enc_unimplemented %{
C2_MacroAssembler _masm(&cbuf);
__ unimplemented("Unimplemented mach node encoding in AD file.", 13);
%}
enc_class enc_untested %{
#ifdef ASSERT
C2_MacroAssembler _masm(&cbuf);
__ untested("Untested mach node encoding in AD file.");
#endif
%}
enc_class z_rrform(iRegI dst, iRegI src) %{
assert((($primary >> 14) & 0x03) == 0, "Instruction format error");
assert( ($primary >> 16) == 0, "Instruction format error");
z_emit16(cbuf, $primary |
Assembler::reg($dst$$reg,8,16) |
Assembler::reg($src$$reg,12,16));
%}
enc_class z_rreform(iRegI dst1, iRegI src2) %{
assert((($primary >> 30) & 0x03) == 2, "Instruction format error");
z_emit32(cbuf, $primary |
Assembler::reg($dst1$$reg,24,32) |
Assembler::reg($src2$$reg,28,32));
%}
enc_class z_rrfform(iRegI dst1, iRegI src2, iRegI src3) %{
assert((($primary >> 30) & 0x03) == 2, "Instruction format error");
z_emit32(cbuf, $primary |
Assembler::reg($dst1$$reg,24,32) |
Assembler::reg($src2$$reg,28,32) |
Assembler::reg($src3$$reg,16,32));
%}
enc_class z_riform_signed(iRegI dst, immI16 src) %{
assert((($primary>>30) & 0x03) == 2, "Instruction format error");
z_emit32(cbuf, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::simm16($src$$constant,16,32));
%}
enc_class z_riform_unsigned(iRegI dst, uimmI16 src) %{
assert((($primary>>30) & 0x03) == 2, "Instruction format error");
z_emit32(cbuf, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::uimm16($src$$constant,16,32));
%}
enc_class z_rieform_d(iRegI dst1, iRegI src3, immI src2) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
z_emit48(cbuf, $primary |
Assembler::reg($dst1$$reg,8,48) |
Assembler::reg($src3$$reg,12,48) |
Assembler::simm16($src2$$constant,16,48));
%}
enc_class z_rilform_signed(iRegI dst, immL32 src) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
z_emit48(cbuf, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::simm32($src$$constant,16,48));
%}
enc_class z_rilform_unsigned(iRegI dst, uimmL32 src) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
z_emit48(cbuf, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::uimm32($src$$constant,16,48));
%}
enc_class z_rsyform_const(iRegI dst, iRegI src1, immI src2) %{
z_emit48(cbuf, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src1$$reg,12,48) |
Assembler::simm20($src2$$constant));
%}
enc_class z_rsyform_reg_reg(iRegI dst, iRegI src, iRegI shft) %{
z_emit48(cbuf, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src$$reg,12,48) |
Assembler::reg($shft$$reg,16,48) |
Assembler::simm20(0));
%}
enc_class z_rxform_imm_reg_reg(iRegL dst, immL con, iRegL src1, iRegL src2) %{
assert((($primary>>30) & 0x03) == 1, "Instruction format error");
z_emit32(cbuf, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::reg($src1$$reg,12,32) |
Assembler::reg($src2$$reg,16,32) |
Assembler::uimm12($con$$constant,20,32));
%}
enc_class z_rxform_imm_reg(iRegL dst, immL con, iRegL src) %{
assert((($primary>>30) & 0x03) == 1, "Instruction format error");
z_emit32(cbuf, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::reg($src$$reg,16,32) |
Assembler::uimm12($con$$constant,20,32));
%}
enc_class z_rxyform_imm_reg_reg(iRegL dst, immL con, iRegL src1, iRegL src2) %{
z_emit48(cbuf, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src1$$reg,12,48) |
Assembler::reg($src2$$reg,16,48) |
Assembler::simm20($con$$constant));
%}
enc_class z_rxyform_imm_reg(iRegL dst, immL con, iRegL src) %{
z_emit48(cbuf, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src$$reg,16,48) |
Assembler::simm20($con$$constant));
%}
// Direct memory arithmetic.
enc_class z_siyform(memoryRSY mem, immI8 src) %{
int disp = $mem$$disp;
Register base = reg_to_register_object($mem$$base);
int con = $src$$constant;
assert(VM_Version::has_MemWithImmALUOps(), "unsupported CPU");
z_emit_inst(cbuf, $primary |
Assembler::regz(base,16,48) |
Assembler::simm20(disp) |
Assembler::simm8(con,8,48));
%}
enc_class z_silform(memoryRS mem, immI16 src) %{
z_emit_inst(cbuf, $primary |
Assembler::regz(reg_to_register_object($mem$$base),16,48) |
Assembler::uimm12($mem$$disp,20,48) |
Assembler::simm16($src$$constant,32,48));
%}
// Encoder for FP ALU reg/mem instructions (support only short displacements).
enc_class z_form_rt_memFP(RegF dst, memoryRX mem) %{
Register Ridx = $mem$$index$$Register;
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if ($primary > (1L << 32)) {
z_emit_inst(cbuf, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::uimm12($mem$$disp, 20, 48) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
z_emit_inst(cbuf, $primary |
Assembler::reg($dst$$reg, 8, 32) |
Assembler::uimm12($mem$$disp, 20, 32) |
Assembler::reg(Ridx, 12, 32) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 32));
}
%}
enc_class z_form_rt_mem(iRegI dst, memory mem) %{
Register Ridx = $mem$$index$$Register;
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if ($primary > (1L<<32)) {
z_emit_inst(cbuf, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::simm20($mem$$disp) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
z_emit_inst(cbuf, $primary |
Assembler::reg($dst$$reg, 8, 32) |
Assembler::uimm12($mem$$disp, 20, 32) |
Assembler::reg(Ridx, 12, 32) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 32));
}
%}
enc_class z_form_rt_mem_opt(iRegI dst, memory mem) %{
int isize = $secondary > 1L << 32 ? 48 : 32;
Register Ridx = $mem$$index$$Register;
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if (Displacement::is_shortDisp((long)$mem$$disp)) {
z_emit_inst(cbuf, $secondary |
Assembler::reg($dst$$reg, 8, isize) |
Assembler::uimm12($mem$$disp, 20, isize) |
Assembler::reg(Ridx, 12, isize) |
Assembler::regz(reg_to_register_object($mem$$base), 16, isize));
} else if (Displacement::is_validDisp((long)$mem$$disp)) {
z_emit_inst(cbuf, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::simm20($mem$$disp) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
C2_MacroAssembler _masm(&cbuf);
__ load_const_optimized(Z_R1_scratch, $mem$$disp);
if (Ridx != Z_R0) { __ z_agr(Z_R1_scratch, Ridx); }
z_emit_inst(cbuf, $secondary |
Assembler::reg($dst$$reg, 8, isize) |
Assembler::uimm12(0, 20, isize) |
Assembler::reg(Z_R1_scratch, 12, isize) |
Assembler::regz(reg_to_register_object($mem$$base), 16, isize));
}
%}
enc_class z_enc_brul(Label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `NULL' when this encoding class is used only to
// determine the size of the encoded instruction.
// Use a bound dummy label in that case.
Label d;
__ bind(d);
Label& l = (NULL == p) ? d : *(p);
__ z_brul(l);
%}
enc_class z_enc_bru(Label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `NULL' when this encoding class is used only to
// determine the size of the encoded instruction.
// Use a bound dummy label in that case.
Label d;
__ bind(d);
Label& l = (NULL == p) ? d : *(p);
__ z_bru(l);
%}
enc_class z_enc_branch_con_far(cmpOp cmp, Label lbl) %{
C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `NULL' when this encoding class is used only to
// determine the size of the encoded instruction.
// Use a bound dummy label in that case.
Label d;
__ bind(d);
Label& l = (NULL == p) ? d : *(p);
__ z_brcl((Assembler::branch_condition)$cmp$$cmpcode, l);
%}
enc_class z_enc_branch_con_short(cmpOp cmp, Label lbl) %{
--> --------------------
--> maximum size reached
--> --------------------
[ Original von:0.380Diese Quellcodebibliothek enthält Beispiele in vielen Programmiersprachen.
Man kann per Verzeichnistruktur darin navigieren.
Der Code wird farblich markiert angezeigt.
]
|
|
|
|
|