/* * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// These masks are used to provide 128-bit aligned bitmasks to the XMM // instructions, to allow sign-masking or sign-bit flipping. They allow // fast versions of NegF/NegD and AbsF/AbsD.
// Note: 'double' and 'long long' have 32-bits alignment on x86. static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { // Use the expression (adr)&(~0xF) to provide 128-bits aligned address // of 128-bits operands for SSE instructions.
jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF))); // Store the value to a 128-bits operand.
operand[0] = lo;
operand[1] = hi; return operand;
}
// Buffer for 128-bits masks used by SSE instructions. static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
NEEDS_CLEANUP // remove this definitions ? constRegister IC_Klass = rax; // where the IC klass is cached constRegister SYNC_header = rax; // synchronization header constRegister SHIFT_count = rcx; // where count for shift operations must be
// we jump here if osr happens with the interpreter // state set up to continue at the beginning of the // loop that triggered osr - in particular, we have // the following registers setup: // // rcx: osr buffer //
// build frame
ciMethod* m = compilation()->method();
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
// OSR buffer is // // locals[nlocals-1..0] // monitors[0..number_of_locks] // // locals is a direct copy of the interpreter frame so in the osr buffer // so first slot in the local array is the last local from the interpreter // and last slot is local[0] (receiver) from the interpreter // // Similarly with locks. The first lock slot in the osr buffer is the nth lock // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock // in the interpreter frame (the method lock if a sync method)
// Initialize monitors in the compiled activation. // rcx: pointer to osr buffer // // All other registers are dead at this point and the locals will be // copied into place by code emitted in the IR.
Register OSR_buf = osrBufferPointer()->as_pointer_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); int monitor_offset = BytesPerWord * method()->max_locals() +
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1); // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in // the OSR buffer using 2 word entries: first the lock and then // the oop. for (int i = 0; i < number_of_locks; i++) { int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); #ifdef ASSERT // verify the interpreter's monitor has a non-null object
{
Label L;
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("locked object is NULL");
__ bind(L);
} #endif
__ movptr(rbx, Address(OSR_buf, slot_offset + 0));
__ movptr(frame_map()->address_for_monitor_lock(i), rbx);
__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
__ movptr(frame_map()->address_for_monitor_object(i), rbx);
}
}
}
// inline cache check; done before the frame is built. int LIR_Assembler::check_icache() { Register receiver = FrameMap::receiver_opr->as_register(); Register ic_klass = IC_Klass; constint ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); constbool do_post_padding = VerifyOops || UseCompressedClassPointers; if (!do_post_padding) { // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
__ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
} int offset = __ offset();
__ inline_cache_check(receiver, IC_Klass);
assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); if (do_post_padding) { // force alignment after the cache check. // It's been verified to be aligned if !VerifyOops
__ align(CodeEntryAlignment);
} return offset;
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
Label L_skip_barrier; Register klass = rscratch1; Register thread = LP64_ONLY( r15_thread ) NOT_LP64( noreg );
assert(thread != noreg, "x86_32 not implemented");
// This specifies the rsp decrement needed to build the frame int LIR_Assembler::initial_frame_size_in_bytes() const { // if rounding, must let FrameMap know!
// The frame_map records size in slots (32bit word)
// subtract two words to account for return address and link return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
}
int LIR_Assembler::emit_exception_handler() { // generate code for exception handler
address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // not enough space left for the handler
bailout("exception handler overflow"); return -1;
}
int offset = code_offset();
// the exception oop and pc are in rax, and rdx // no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception
__ verify_not_null_oop(rax);
// Emit the code to remove the frame from the stack in the exception // unwind path. int LIR_Assembler::emit_unwind_handler() { #ifndef PRODUCT if (CommentedAssembly) {
_masm->block_comment("Unwind handler");
} #endif
int offset = code_offset();
// Fetch the exception from TLS and clear out exception related thread state Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
NOT_LP64(__ get_thread(thread));
__ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
__ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
__ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
__ bind(_unwind_handler_entry);
__ verify_not_null_oop(rax); if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
}
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mov(rax, rbx); // Restore the exception
}
// remove the activation and dispatch to the unwind handler
__ remove_frame(initial_frame_size_in_bytes());
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly if (stub != NULL) {
stub->emit_code(this);
}
return offset;
}
int LIR_Assembler::emit_deopt_handler() { // generate code for exception handler
address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // not enough space left for the handler
bailout("deopt handler overflow"); return -1;
}
int offset = code_offset();
InternalAddress here(__ pc());
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
assert(src->is_constant(), "should not call otherwise");
assert(dest->is_stack(), "should not call otherwise");
LIR_Const* c = src->as_constant_ptr();
switch (c->type()) { case T_INT: // fall through case T_FLOAT:
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); break;
case T_ADDRESS:
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); break;
case T_OBJECT:
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject(), rscratch1); break;
case T_LONG: // fall through case T_DOUBLE: #ifdef _LP64
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
lo_word_offset_in_bytes),
(intptr_t)c->as_jlong_bits(),
rscratch1); #else
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
lo_word_offset_in_bytes), c->as_jint_lo_bits());
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
hi_word_offset_in_bytes), c->as_jint_hi_bits()); #endif// _LP64 break;
default:
ShouldNotReachHere();
}
}
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
assert(src->is_constant(), "should not call otherwise");
assert(dest->is_address(), "should not call otherwise");
LIR_Const* c = src->as_constant_ptr();
LIR_Address* addr = dest->as_address_ptr();
int null_check_here = code_offset(); switch (type) { case T_INT: // fall through case T_FLOAT:
__ movl(as_Address(addr), c->as_jint_bits()); break;
case T_ADDRESS:
__ movptr(as_Address(addr), c->as_jint_bits()); break;
#ifndef _LP64 // move between fpu-registers (no instruction necessary because of fpu-stack)
} elseif (dest->is_single_fpu() || dest->is_double_fpu()) {
assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
assert(src->fpu() == dest->fpu(), "currently should be nothing to do"); #endif// !_LP64
} else {
ShouldNotReachHere();
}
}
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
assert(src->is_register(), "should not call otherwise");
assert(dest->is_stack(), "should not call otherwise");
if (is_reference_type(type)) {
__ verify_oop(src->as_register()); #ifdef _LP64 if (UseCompressedOops && !wide) {
__ movptr(compressed_src, src->as_register());
__ encode_heap_oop(compressed_src); if (patch_code != lir_patch_none) {
info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
}
} #endif
}
if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
Address toa = as_Address(to_addr);
assert(toa.disp() != 0, "must have");
}
int null_check_here = code_offset(); switch (type) { case T_FLOAT: { #ifdef _LP64
assert(src->is_single_xmm(), "not a float");
__ movflt(as_Address(to_addr), src->as_xmm_float_reg()); #else if (src->is_single_xmm()) {
__ movflt(as_Address(to_addr), src->as_xmm_float_reg());
} else {
assert(src->is_single_fpu(), "must be");
assert(src->fpu_regnr() == 0, "argument must be on TOS"); if (pop_fpu_stack) __ fstp_s(as_Address(to_addr)); else __ fst_s (as_Address(to_addr));
} #endif// _LP64 break;
}
case T_DOUBLE: { #ifdef _LP64
assert(src->is_double_xmm(), "not a double");
__ movdbl(as_Address(to_addr), src->as_xmm_double_reg()); #else if (src->is_double_xmm()) {
__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
} else {
assert(src->is_double_fpu(), "must be");
assert(src->fpu_regnrLo() == 0, "argument must be on TOS"); if (pop_fpu_stack) __ fstp_d(as_Address(to_addr)); else __ fst_d (as_Address(to_addr));
} #endif// _LP64 break;
}
case T_ARRAY: // fall through case T_OBJECT: // fall through if (UseCompressedOops && !wide) {
__ movl(as_Address(to_addr), compressed_src);
} else {
__ movptr(as_Address(to_addr), src->as_register());
} break; case T_METADATA: // We get here to store a method pointer to the stack to pass to // a dtrace runtime call. This can't work on 64 bit with // compressed klass ptrs: T_METADATA can be a compressed klass // ptr or a 64 bit method pointer.
LP64_ONLY(ShouldNotReachHere());
__ movptr(as_Address(to_addr), src->as_register()); break; case T_ADDRESS:
__ movptr(as_Address(to_addr), src->as_register()); break; case T_INT:
__ movl(as_Address(to_addr), src->as_register()); break;
case T_LONG: { Register from_lo = src->as_register_lo(); Register from_hi = src->as_register_hi(); #ifdef _LP64
__ movptr(as_Address_lo(to_addr), from_lo); #else Register base = to_addr->base()->as_register(); Register index = noreg; if (to_addr->index()->is_register()) {
index = to_addr->index()->as_register();
} if (base == from_lo || index == from_lo) {
assert(base != from_hi, "can't be");
assert(index == noreg || (index != base && index != from_hi), "can't handle this");
__ movl(as_Address_hi(to_addr), from_hi); if (patch != NULL) {
patching_epilog(patch, lir_patch_high, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_low;
}
__ movl(as_Address_lo(to_addr), from_lo);
} else {
assert(index == noreg || (index != base && index != from_lo), "can't handle this");
__ movl(as_Address_lo(to_addr), from_lo); if (patch != NULL) {
patching_epilog(patch, lir_patch_low, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high;
}
__ movl(as_Address_hi(to_addr), from_hi);
} #endif// _LP64 break;
}
case T_BYTE: // fall through case T_BOOLEAN: { Register src_reg = src->as_register();
Address dst_addr = as_Address(to_addr);
assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
__ movb(dst_addr, src_reg); break;
}
case T_CHAR: // fall through case T_SHORT:
__ movw(as_Address(to_addr), src->as_register()); break;
if (addr->base()->type() == T_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register());
}
switch (type) { case T_BOOLEAN: // fall through case T_BYTE: // fall through case T_CHAR: // fall through case T_SHORT: if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) { // on pre P6 processors we may get partial register stalls // so blow away the value of to_rinfo before loading a // partial word into it. Do it here so that it precedes // the potential patch point below.
__ xorptr(dest->as_register(), dest->as_register());
} break; default: break;
}
PatchingStub* patch = NULL; if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
assert(from_addr.disp() != 0, "must have");
} if (info != NULL) {
add_debug_info_for_null_check_here(info);
}
switch (type) { case T_FLOAT: { if (dest->is_single_xmm()) {
__ movflt(dest->as_xmm_float_reg(), from_addr);
} else { #ifndef _LP64
assert(dest->is_single_fpu(), "must be");
assert(dest->fpu_regnr() == 0, "dest must be TOS");
__ fld_s(from_addr); #else
ShouldNotReachHere(); #endif// !LP64
} break;
}
case T_DOUBLE: { if (dest->is_double_xmm()) {
__ movdbl(dest->as_xmm_double_reg(), from_addr);
} else { #ifndef _LP64
assert(dest->is_double_fpu(), "must be");
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
__ fld_d(from_addr); #else
ShouldNotReachHere(); #endif// !LP64
} break;
}
case T_OBJECT: // fall through case T_ARRAY: // fall through if (UseCompressedOops && !wide) {
__ movl(dest->as_register(), from_addr);
} else {
__ movptr(dest->as_register(), from_addr);
} break;
case T_ADDRESS:
__ movptr(dest->as_register(), from_addr); break; case T_INT:
__ movl(dest->as_register(), from_addr); break;
case T_LONG: { Register to_lo = dest->as_register_lo(); Register to_hi = dest->as_register_hi(); #ifdef _LP64
__ movptr(to_lo, as_Address_lo(addr)); #else Register base = addr->base()->as_register(); Register index = noreg; if (addr->index()->is_register()) {
index = addr->index()->as_register();
} if ((base == to_lo && index == to_hi) ||
(base == to_hi && index == to_lo)) { // addresses with 2 registers are only formed as a result of // array access so this code will never have to deal with // patches or null checks.
assert(info == NULL && patch == NULL, "must be");
__ lea(to_hi, as_Address(addr));
__ movl(to_lo, Address(to_hi, 0));
__ movl(to_hi, Address(to_hi, BytesPerWord));
} elseif (base == to_lo || index == to_lo) {
assert(base != to_hi, "can't be");
assert(index == noreg || (index != base && index != to_hi), "can't handle this");
__ movl(to_hi, as_Address_hi(addr)); if (patch != NULL) {
patching_epilog(patch, lir_patch_high, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_low;
}
__ movl(to_lo, as_Address_lo(addr));
} else {
assert(index == noreg || (index != base && index != to_lo), "can't handle this");
__ movl(to_lo, as_Address_lo(addr)); if (patch != NULL) {
patching_epilog(patch, lir_patch_low, base, info);
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
patch_code = lir_patch_high;
}
__ movl(to_hi, as_Address_hi(addr));
} #endif// _LP64 break;
}
case T_BOOLEAN: // fall through case T_BYTE: { Register dest_reg = dest->as_register();
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
__ movsbl(dest_reg, from_addr);
} else {
__ movb(dest_reg, from_addr);
__ shll(dest_reg, 24);
__ sarl(dest_reg, 24);
} break;
}
case T_CHAR: { Register dest_reg = dest->as_register();
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6"); if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
__ movzwl(dest_reg, from_addr);
} else {
__ movw(dest_reg, from_addr);
} break;
}
if (patch != NULL) {
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
}
if (is_reference_type(type)) { #ifdef _LP64 if (UseCompressedOops && !wide) {
__ decode_heap_oop(dest->as_register());
} #endif
// Load barrier has not yet been applied, so ZGC can't verify the oop here if (!UseZGC) {
__ verify_oop(dest->as_register());
}
}
}
NEEDS_CLEANUP; // This could be static?
Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const { int elem_size = type2aelembytes(type); switch (elem_size) { case 1: return Address::times_1; case 2: return Address::times_2; case 4: return Address::times_4; case 8: return Address::times_8;
}
ShouldNotReachHere(); return Address::no_scale;
}
void LIR_Assembler::emit_op3(LIR_Op3* op) { switch (op->code()) { case lir_idiv: case lir_irem:
arithmetic_idiv(op->code(),
op->in_opr1(),
op->in_opr2(),
op->in_opr3(),
op->result_opr(),
op->info()); break; case lir_fmad:
__ fmad(op->result_opr()->as_xmm_double_reg(),
op->in_opr1()->as_xmm_double_reg(),
op->in_opr2()->as_xmm_double_reg(),
op->in_opr3()->as_xmm_double_reg()); break; case lir_fmaf:
__ fmaf(op->result_opr()->as_xmm_float_reg(),
op->in_opr1()->as_xmm_float_reg(),
op->in_opr2()->as_xmm_float_reg(),
op->in_opr3()->as_xmm_float_reg()); break; default: ShouldNotReachHere(); break;
}
}
case Bytecodes::_i2b:
move_regs(src->as_register(), dest->as_register());
__ sign_extend_byte(dest->as_register()); break;
case Bytecodes::_i2c:
move_regs(src->as_register(), dest->as_register());
__ andl(dest->as_register(), 0xFFFF); break;
case Bytecodes::_i2s:
move_regs(src->as_register(), dest->as_register());
__ sign_extend_short(dest->as_register()); break;
#ifdef _LP64 case Bytecodes::_f2d:
__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg()); break;
case Bytecodes::_d2f:
__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg()); break;
case Bytecodes::_i2f:
__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register()); break;
case Bytecodes::_i2d:
__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register()); break;
case Bytecodes::_l2f:
__ cvtsi2ssq(dest->as_xmm_float_reg(), src->as_register_lo()); break;
case Bytecodes::_l2d:
__ cvtsi2sdq(dest->as_xmm_double_reg(), src->as_register_lo()); break;
case Bytecodes::_f2i:
__ convert_f2i(dest->as_register(), src->as_xmm_float_reg()); break;
case Bytecodes::_d2i:
__ convert_d2i(dest->as_register(), src->as_xmm_double_reg()); break;
case Bytecodes::_f2l:
__ convert_f2l(dest->as_register_lo(), src->as_xmm_float_reg()); break;
case Bytecodes::_d2l:
__ convert_d2l(dest->as_register_lo(), src->as_xmm_double_reg()); break; #else case Bytecodes::_f2d: case Bytecodes::_d2f: if (dest->is_single_xmm()) {
__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
} elseif (dest->is_double_xmm()) {
__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
} else {
assert(src->fpu() == dest->fpu(), "register must be equal"); // do nothing (float result is rounded later through spilling)
} break;
case Bytecodes::_i2f: case Bytecodes::_i2d: if (dest->is_single_xmm()) {
__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
} elseif (dest->is_double_xmm()) {
__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
} else {
assert(dest->fpu() == 0, "result must be on TOS");
__ movl(Address(rsp, 0), src->as_register());
__ fild_s(Address(rsp, 0));
} break;
case Bytecodes::_l2f: case Bytecodes::_l2d:
assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
assert(dest->fpu() == 0, "result must be on TOS");
__ movptr(Address(rsp, 0), src->as_register_lo());
__ movl(Address(rsp, BytesPerWord), src->as_register_hi());
__ fild_d(Address(rsp, 0)); // float result is rounded later through spilling break;
case Bytecodes::_f2i: case Bytecodes::_d2i: if (src->is_single_xmm()) {
__ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
} elseif (src->is_double_xmm()) {
__ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
} else {
assert(src->fpu() == 0, "input must be on TOS");
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_trunc()));
__ fist_s(Address(rsp, 0));
__ movl(dest->as_register(), Address(rsp, 0));
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
} // IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
assert(op->stub() != NULL, "stub required");
__ cmpl(dest->as_register(), 0x80000000);
__ jcc(Assembler::equal, *op->stub()->entry());
__ bind(*op->stub()->continuation()); break;
case Bytecodes::_f2l: case Bytecodes::_d2l:
assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
assert(src->fpu() == 0, "input must be on TOS");
assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
// instruction sequence too long to inline it here
{
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
} break; #endif// _LP64
if (op->fast_check()) { // get object class // not a safepoint as obj null check happens earlier #ifdef _LP64 if (UseCompressedClassPointers) {
__ load_klass(Rtmp1, obj, tmp_load_klass);
__ cmpptr(k_RInfo, Rtmp1);
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
} #else if (k->is_loaded()) {
__ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
} else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
} #endif
__ jcc(Assembler::notEqual, *failure_target); // successful cast, fall through to profile or jump
} else { // get object class // not a safepoint as obj null check happens earlier
__ load_klass(klass_RInfo, obj, tmp_load_klass); if (k->is_loaded()) { // See if we get an immediate positive hit #ifdef _LP64
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset())); #else
__ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding()); #endif// _LP64 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *failure_target); // successful cast, fall through to profile or jump
} else { // See if we get an immediate positive hit
__ jcc(Assembler::equal, *success_target); // check for self #ifdef _LP64
__ cmpptr(klass_RInfo, k_RInfo); #else
__ cmpklass(klass_RInfo, k->constant_encoding()); #endif// _LP64
__ jcc(Assembler::equal, *success_target);
__ push(klass_RInfo); #ifdef _LP64
__ push(k_RInfo); #else
__ pushklass(k->constant_encoding(), noreg); #endif// _LP64
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(klass_RInfo); // result is a boolean
__ cmpl(klass_RInfo, 0);
__ jcc(Assembler::equal, *failure_target); // successful cast, fall through to profile or jump
}
} else { // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); // call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(k_RInfo); // result is a boolean
__ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *failure_target); // successful cast, fall through to profile or jump
}
} if (op->should_profile()) { Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, obj, tmp_load_klass);
type_profile_helper(mdo, md, data, recv, success);
__ jmp(*success);
// get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); // call out-of-line instance of __ check_klass_subtype_slow_path(...):
__ push(klass_RInfo);
__ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
__ pop(klass_RInfo);
__ pop(k_RInfo); // result is a boolean
__ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *failure_target); // fall through to the success case
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.