/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// MacroAssembler extends Assembler by frequently used macros. // // Instructions for which a 'better' code sequence exists depending // on arguments should also go in here.
class MacroAssembler: public Assembler { friendclass LIR_Assembler; friendclass Runtime1; // as_Address()
public: // Support for VM calls // // This is the base routine called by the different versions of call_VM_leaf. The interpreter // may customize this version by overriding it for its purposes (e.g., to save/restore // additional registers when doing a VM call).
virtualvoid call_VM_leaf_base(
address entry_point, // the entry point int number_of_arguments // the number of arguments to pop after the call
);
protected: // This is the base routine called by the different versions of call_VM. The interpreter // may customize this version by overriding it for its purposes (e.g., to save/restore // additional registers when doing a VM call). // // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base // returns the register which contains the thread upon return. If a thread register has been // specified, the return value will correspond to that register. If no last_java_sp is specified // (noreg) than rsp will be used instead. virtualvoid call_VM_base( // returns the register containing the thread upon return Register oop_result, // where an oop-result ends up if any; use noreg otherwise Register java_thread, // the thread if computed before ; use noreg otherwise Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
address entry_point, // the entry point int number_of_arguments, // the number of arguments (w/o thread) to pop after the call bool check_exceptions // whether to check for pending exceptions after return
);
// helpers for FPU flag access // tmp is a temporary register, if none is available use noreg void save_rax (Register tmp); void restore_rax(Register tmp);
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. // The implementation is only non-empty for the InterpreterMacroAssembler, // as only the interpreter handles PopFrame and ForceEarlyReturn requests. virtualvoid check_and_handle_popframe(Register java_thread); virtualvoid check_and_handle_earlyret(Register java_thread);
// Support for NULL-checks // // Generates code that causes a NULL OS exception if the content of reg is NULL. // If the accessed location is M[reg + offset] and the offset is known, provide the // offset. No explicit code generation is needed if the offset is within a certain // range (0 <= offset <= page_size).
// The following 4 methods return the offset of the appropriate move instruction
// Support for fast byte/short loading with zero extension (depending on particular CPU) int load_unsigned_byte(Register dst, Address src); int load_unsigned_short(Register dst, Address src);
// Support for fast byte/short loading with sign extension (depending on particular CPU) int load_signed_byte(Register dst, Address src); int load_signed_short(Register dst, Address src);
// Support for sign-extension (hi:lo = extend_sign(lo)) void extend_sign(Register hi, Register lo);
// Load and store values by size and signed-ness void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
// Support for inc/dec with optimal instruction selection depending on value
void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
void decrementl(Address dst, int value = 1); void decrementl(Register reg, int value = 1);
void decrementq(Register reg, int value = 1); void decrementq(Address dst, int value = 1);
void incrementl(Address dst, int value = 1); void incrementl(Register reg, int value = 1);
void incrementq(Register reg, int value = 1); void incrementq(Address dst, int value = 1);
// Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) // The pointer will be loaded into the thread register. void get_thread(Register thread);
#ifdef _LP64 // Support for argument shuffling
// bias in bytes void move32_64(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); void long_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); void float_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); void double_move(VMRegPair src, VMRegPair dst, Register tmp = rax, int in_stk_bias = 0, int out_stk_bias = 0); void move_ptr(VMRegPair src, VMRegPair dst); void object_move(OopMap* map, int oop_handle_offset, int framesize_in_slots,
VMRegPair src,
VMRegPair dst, bool is_receiver, int* receiver_offset); #endif// _LP64
// Support for VM calls // // It is imperative that all calls into the VM are handled via the call_VM macros. // They make sure that the stack linkage is setup correctly. call_VM's correspond // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
// This dummy is to prevent a call to store_heap_oop from // converting a zero (like NULL) into a Register by giving // the compiler two choices it can't resolve
// if heap base register is used - reinit it with the correct value void reinit_heapbase();
DEBUG_ONLY(void verify_heapbase(constchar* msg);)
#endif// _LP64
// Int division/remainder for Java // (as idivl, but checks for special case as described in JVM spec.) // returns idivl instruction offset for implicit exception handling int corrected_idivl(Register reg);
// Long division/remainder for Java // (as idivq, but checks for special case as described in JVM spec.) // returns idivq instruction offset for implicit exception handling int corrected_idivq(Register reg);
void int3();
// Long operation macros for a 32bit cpu // Long negation for Java void lneg(Register hi, Register lo);
// Long multiplication for Java // (destroys contents of eax, ebx, ecx and edx) void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
// Long shifts for Java // (semantics as described in JVM spec.) void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f)
// Long compare for Java // (semantics as described in JVM spec.) void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
// Division by power of 2, rounding towards 0 void division_with_shift(Register reg, int shift_value);
#ifndef _LP64 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: // // CF (corresponds to C0) if x < y // PF (corresponds to C2) if unordered // ZF (corresponds to C3) if x = y // // The arguments are in reversed order on the stack (i.e., top of stack is first argument). // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) void fcmp(Register tmp); // Variant of the above which allows y to be further down the stack // and which only pops x and y if specified. If pop_right is // specified then pop_left must also be specified. void fcmp(Register tmp, int index, bool pop_left, bool pop_right);
// Floating-point comparison for Java // Compares the top-most stack entries on the FPU stack and stores the result in dst. // The arguments are in reversed order on the stack (i.e., top of stack is first argument). // (semantics as described in JVM spec.) void fcmp2int(Register dst, bool unordered_is_less); // Variant of the above which allows y to be further down the stack // and which only pops x and y if specified. If pop_right is // specified then pop_left must also be specified. void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right);
// Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) // tmp is a temporary register, if none is available use noreg void fremr(Register tmp);
// only if +VerifyFPU void verify_FPU(int stack_depth, constchar* s = "illegal FPU state"); #endif// !LP64
// dst = c = a * b + c void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c);
void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len);
// same as fcmp2int, but using SSE2 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less);
// branch to L if FPU flag C2 is set/not set // tmp is a temporary register, if none is available use noreg void jC2 (Register tmp, Label& L); void jnC2(Register tmp, Label& L);
// Load float value from 'address'. If UseSSE >= 1, the value is loaded into // register xmm0. Otherwise, the value is loaded onto the FPU stack. void load_float(Address src);
// Store float value to 'address'. If UseSSE >= 1, the value is stored // from register xmm0. Otherwise, the value is stored from the FPU stack. void store_float(Address dst);
// Load double value from 'address'. If UseSSE >= 2, the value is loaded into // register xmm0. Otherwise, the value is loaded onto the FPU stack. void load_double(Address src);
// Store double value to 'address'. If UseSSE >= 2, the value is stored // from register xmm0. Otherwise, the value is stored from the FPU stack. void store_double(Address dst);
#ifndef _LP64 // Pop ST (ffree & fincstp combined) void fpop();
// Round up to a power of two void round_to(Register reg, int modulus);
private: // General purpose and XMM registers potentially clobbered by native code; there // is no need for FPU or AVX opmask related methods because C1/interpreter // - we save/restore FPU state as a whole always // - do not care about AVX-512 opmask static RegSet call_clobbered_gp_registers(); static XMMRegSet call_clobbered_xmm_registers();
void push_set(XMMRegSet set, int offset); void pop_set(XMMRegSet set, int offset);
public: void push_set(RegSet set, int offset = -1); void pop_set(RegSet set, int offset = -1);
// Push and pop everything that might be clobbered by a native // runtime call. // Only save the lower 64 bits of each vector register. // Additional registers can be excluded in a passed RegSet. void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true); void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
// allocation void tlab_allocate( Register thread, // Current thread Register obj, // result: pointer to object after successful allocation Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise int con_size_in_bytes, // object size in bytes if known at compile time Register t1, // temp register Register t2, // temp register
Label& slow_case // continuation point if fast allocation fails
); void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow. // One of the three labels can be NULL, meaning take the fall-through. // If super_check_offset is -1, the value is loaded up from super_klass. // No registers are killed, except temp_reg. void check_klass_subtype_fast_path(Register sub_klass, Register super_klass, Register temp_reg,
Label* L_success,
Label* L_failure,
Label* L_slow_path,
RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
// The rest of the type check; must be wired to a corresponding fast path. // It does not repeat the fast path logic, so don't use it standalone. // The temp_reg and temp2_reg can be noreg, if no temps are available. // Updates the sub's secondary super cache as necessary. // If set_cond_codes, condition codes will be Z on success, NZ on failure. void check_klass_subtype_slow_path(Register sub_klass, Register super_klass, Register temp_reg, Register temp2_reg,
Label* L_success,
Label* L_failure, bool set_cond_codes = false);
// Simplified, combined version, good for typical uses. // Falls through on failure. void check_klass_subtype(Register sub_klass, Register super_klass, Register temp_reg,
Label& L_success);
// Verify or restore cpu control state after JNI call void restore_cpu_control_state_after_jni(Register rscratch);
// prints msg, dumps registers and stops execution void stop(constchar* msg);
// prints msg and continues void warn(constchar* msg);
// dumps registers and other state void print_state();
staticvoid debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); staticvoid debug64(char* msg, int64_t pc, int64_t regs[]); staticvoid print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); staticvoid print_state64(int64_t pc, int64_t regs[]);
void os_breakpoint();
void untested() { stop("untested"); }
void unimplemented(constchar* what = "");
void should_not_reach_here() { stop("should not reach here"); }
// Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit // operands. In general the names are modified to avoid hiding the instruction in Assembler // so that we don't need to implement all the varieties in the Assembler with trivial wrappers // here in MacroAssembler. The major exception to this rule is call
// Import other testl() methods from the parent class or else // they will be hidden by the following overriding declaration. using Assembler::testl; void testl(Address dst, int32_t imm32); void testl(Register dst, int32_t imm32); void testl(Register dst, AddressLiteral src); // requires reachable address using Assembler::testq; void testq(Address dst, int32_t imm32); void testq(Register dst, int32_t imm32);
// NOTE: this call transfers to the effective address of entry NOT // the address contained by entry. This is because this is more natural // for jumps/calls. void call(AddressLiteral entry, Register rscratch = rax);
// NOTE: these jumps transfer to the effective address of dst NOT // the address contained by dst. This is because this is more natural // for jumps/calls. void jump(AddressLiteral dst, Register rscratch = noreg);
// 32bit can do a case table jump in one instruction but we no longer allow the base // to be installed in the Address class. This jump will transfer to the address // contained in the location described by entry (not the address of entry) void jump(ArrayAddress entry, Register rscratch);
void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
--> --------------------
--> maximum size reached
--> --------------------
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.72Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.