/* * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// At top of Java expression stack which may be different than esp(). It // isn't for category 1 objects. staticinline Address at_tos () { return Address(esp, Interpreter::expr_offset_in_bytes(0));
}
// Condition conversion static Assembler::Condition j_not(TemplateTable::Condition cc) { switch (cc) { case TemplateTable::equal : return Assembler::NE; case TemplateTable::not_equal : return Assembler::EQ; case TemplateTable::less : return Assembler::GE; case TemplateTable::less_equal : return Assembler::GT; case TemplateTable::greater : return Assembler::LE; case TemplateTable::greater_equal: return Assembler::LT;
}
ShouldNotReachHere(); return Assembler::EQ;
}
// Miscellaneous helper routines // Store an oop (or NULL) at the Address described by obj. // If val == noreg this means store a NULL staticvoid do_oop_store(InterpreterMacroAssembler* _masm,
Address dst, Register val,
DecoratorSet decorators) {
assert(val == noreg || val == r0, "parameter is just for looks");
__ store_heap_oop(dst, val, r10, r11, r3, decorators);
}
void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, Register temp_reg, bool load_bc_into_bc_reg/*=true*/, int byte_no)
{ if (!RewriteBytecodes) return;
Label L_patch_done;
switch (bc) { case Bytecodes::_fast_aputfield: case Bytecodes::_fast_bputfield: case Bytecodes::_fast_zputfield: case Bytecodes::_fast_cputfield: case Bytecodes::_fast_dputfield: case Bytecodes::_fast_fputfield: case Bytecodes::_fast_iputfield: case Bytecodes::_fast_lputfield: case Bytecodes::_fast_sputfield:
{ // We skip bytecode quickening for putfield instructions when // the put_code written to the constant pool cache is zero. // This is required so that every execution of this instruction // calls out to InterpreterRuntime::resolve_get_put to do // additional, required work.
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
__ movw(bc_reg, bc);
__ cbzw(temp_reg, L_patch_done); // don't patch
} break; default:
assert(byte_no == -1, "sanity"); // the pair bytecodes have already done the load. if (load_bc_into_bc_reg) {
__ movw(bc_reg, bc);
}
}
if (JvmtiExport::can_post_breakpoint()) {
Label L_fast_patch; // if a breakpoint is present we can't rewrite the stream directly
__ load_unsigned_byte(temp_reg, at_bcp(0));
__ cmpw(temp_reg, Bytecodes::_breakpoint);
__ br(Assembler::NE, L_fast_patch); // Let breakpoint table handling rewrite to quicker bytecode
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
__ b(L_patch_done);
__ bind(L_fast_patch);
}
// get type
__ add(r3, r1, tags_offset);
__ lea(r3, Address(r0, r3));
__ ldarb(r3, r3);
// unresolved class - get the resolved class
__ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass);
__ br(Assembler::EQ, call_ldc);
// unresolved class in error state - call into runtime to throw the error // from the first resolution attempt
__ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError);
__ br(Assembler::EQ, call_ldc);
// resolved class - need to call vm to get java mirror of the class
__ cmp(r3, (u1)JVM_CONSTANT_Class);
__ br(Assembler::NE, notClass);
// VMr = obj = base address to find primitive value to push // VMr2 = flags = (tos, off) using format of CPCE::_flags
__ mov(off, flags);
__ andw(off, off, ConstantPoolCacheEntry::field_index_mask);
const Address field(obj, off);
// What sort of thing are we loading? // x86 uses a shift and mask or wings it with a shift plus assert // the mask is not needed. aarch64 just uses bitfield extract
__ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,
ConstantPoolCacheEntry::tos_state_bits);
switch (bytecode()) { case Bytecodes::_ldc: case Bytecodes::_ldc_w:
{ // tos in (itos, ftos, stos, btos, ctos, ztos)
Label notInt, notFloat, notShort, notByte, notChar, notBool;
__ cmpw(flags, itos);
__ br(Assembler::NE, notInt); // itos
__ ldrw(r0, field);
__ push(itos);
__ b(Done);
void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos); if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done; Register bc = r4;
// get next bytecode
__ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
// if _iload, wait to rewrite to iload2. We only want to rewrite the // last two iloads in a pair. Comparing against fast_iload means that // the next bytecode is neither an iload or a caload, and therefore // an iload pair.
__ cmpw(r1, Bytecodes::_iload);
__ br(Assembler::EQ, done);
// if _fast_iload rewrite to _fast_iload2
__ cmpw(r1, Bytecodes::_fast_iload);
__ movw(bc, Bytecodes::_fast_iload2);
__ br(Assembler::EQ, rewrite);
// if _caload rewrite to _fast_icaload
__ cmpw(r1, Bytecodes::_caload);
__ movw(bc, Bytecodes::_fast_icaload);
__ br(Assembler::EQ, rewrite);
// else rewrite to _fast_iload
__ movw(bc, Bytecodes::_fast_iload);
void TemplateTable::fload()
{
transition(vtos, ftos);
locals_index(r1); // n.b. we use ldrd here because this is a 64 bit slot // this is comparable to the iload case
__ ldrd(v0, faddress(r1));
}
void TemplateTable::wide_fload()
{
transition(vtos, ftos);
locals_index_wide(r1); // n.b. we use ldrd here because this is a 64 bit slot // this is comparable to the iload case
__ ldrd(v0, faddress(r1));
}
// iload followed by caload frequent pair void TemplateTable::fast_icaload()
{
transition(vtos, itos); // load index out of locals
locals_index(r2);
__ ldr(r1, iaddress(r2));
void TemplateTable::aload_0_internal(RewriteControl rc) { // According to bytecode histograms, the pairs: // // _aload_0, _fast_igetfield // _aload_0, _fast_agetfield // _aload_0, _fast_fgetfield // // occur frequently. If RewriteFrequentPairs is set, the (slow) // _aload_0 bytecode checks if the next bytecode is either // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then // rewrites the current bytecode into a pair bytecode; otherwise it // rewrites the current bytecode into _fast_aload_0 that doesn't do // the pair check anymore. // // Note: If the next bytecode is _getfield, the rewrite must be // delayed, otherwise we may miss an opportunity for a pair. // // Also rewrite frequent pairs // aload_0, aload_1 // aload_0, iload_1 // These bytecodes with a small amount of code are most profitable // to rewrite if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done; constRegister bc = r4;
// get next bytecode
__ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
// if _getfield then wait with rewrite
__ cmpw(r1, Bytecodes::Bytecodes::_getfield);
__ br(Assembler::EQ, done);
// if _igetfield then rewrite to _fast_iaccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
__ cmpw(r1, Bytecodes::_fast_igetfield);
__ movw(bc, Bytecodes::_fast_iaccess_0);
__ br(Assembler::EQ, rewrite);
// if _agetfield then rewrite to _fast_aaccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
__ cmpw(r1, Bytecodes::_fast_agetfield);
__ movw(bc, Bytecodes::_fast_aaccess_0);
__ br(Assembler::EQ, rewrite);
// if _fgetfield then rewrite to _fast_faccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
__ cmpw(r1, Bytecodes::_fast_fgetfield);
__ movw(bc, Bytecodes::_fast_faccess_0);
__ br(Assembler::EQ, rewrite);
// Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
aload(0);
}
void TemplateTable::istore()
{
transition(itos, vtos);
locals_index(r1); // FIXME: We're being very pernickerty here storing a jint in a // local with strw, which costs an extra instruction over what we'd // be able to do with a simple str. We should just store the whole // word.
__ lea(rscratch1, iaddress(r1));
__ strw(r0, Address(rscratch1));
}
// do array store check - check for NULL value first
__ cbz(r0, is_null);
// Move subklass into r1
__ load_klass(r1, r0); // Move superklass into r0
__ load_klass(r0, r3);
__ ldr(r0, Address(r0,
ObjArrayKlass::element_klass_offset())); // Compress array + index*oopSize + 12 into a single register. Frees r2.
// Generate subtype check. Blows r2, r5 // Superklass in r0. Subklass in r1.
__ gen_subtype_check(r1, ok_is_subtype);
// Come here on failure // object is at TOS
__ b(Interpreter::_throw_ArrayStoreException_entry);
// Come here on success
__ bind(ok_is_subtype);
// Get the value we will store
__ ldr(r0, at_tos()); // Now store using the appropriate barrier
do_oop_store(_masm, element_address, r0, IS_ARRAY);
__ b(done);
// Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
__ bind(is_null);
__ profile_null_seen(r2);
// Store a NULL
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
void TemplateTable::bastore()
{
transition(itos, vtos);
__ pop_i(r1);
__ pop_ptr(r3); // r0: value // r1: index // r3: array
index_check(r3, r1); // prefer index in r1
// Need to check whether array is boolean or byte // since both types share the bastore bytecode.
__ load_klass(r2, r3);
__ ldrw(r2, Address(r2, Klass::layout_helper_offset())); int diffbit_index = exact_log2(Klass::layout_helper_boolean_diffbit());
Label L_skip;
__ tbz(r2, diffbit_index, L_skip);
__ andw(r0, r0, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
__ bind(L_skip);
void TemplateTable::dup()
{
transition(vtos, vtos);
__ ldr(r0, Address(esp, 0));
__ push(r0); // stack: ..., a, a
}
void TemplateTable::dup_x1()
{
transition(vtos, vtos); // stack: ..., a, b
__ ldr(r0, at_tos()); // load b
__ ldr(r2, at_tos_p1()); // load a
__ str(r0, at_tos_p1()); // store b
__ str(r2, at_tos()); // store a
__ push(r0); // push b // stack: ..., b, a, b
}
void TemplateTable::dup_x2()
{
transition(vtos, vtos); // stack: ..., a, b, c
__ ldr(r0, at_tos()); // load c
__ ldr(r2, at_tos_p2()); // load a
__ str(r0, at_tos_p2()); // store c in a
__ push(r0); // push c // stack: ..., c, b, c, c
__ ldr(r0, at_tos_p2()); // load b
__ str(r2, at_tos_p2()); // store a in b // stack: ..., c, a, c, c
__ str(r0, at_tos_p1()); // store b in c // stack: ..., c, a, b, c
}
void TemplateTable::dup2()
{
transition(vtos, vtos); // stack: ..., a, b
__ ldr(r0, at_tos_p1()); // load a
__ push(r0); // push a
__ ldr(r0, at_tos_p1()); // load b
__ push(r0); // push b // stack: ..., a, b, a, b
}
void TemplateTable::dup2_x1()
{
transition(vtos, vtos); // stack: ..., a, b, c
__ ldr(r2, at_tos()); // load c
__ ldr(r0, at_tos_p1()); // load b
__ push(r0); // push b
__ push(r2); // push c // stack: ..., a, b, c, b, c
__ str(r2, at_tos_p3()); // store c in b // stack: ..., a, c, c, b, c
__ ldr(r2, at_tos_p4()); // load a
__ str(r2, at_tos_p2()); // store a in 2nd c // stack: ..., a, c, a, b, c
__ str(r0, at_tos_p4()); // store b in a // stack: ..., b, c, a, b, c
}
void TemplateTable::dup2_x2()
{
transition(vtos, vtos); // stack: ..., a, b, c, d
__ ldr(r2, at_tos()); // load d
__ ldr(r0, at_tos_p1()); // load c
__ push(r0) ; // push c
__ push(r2); // push d // stack: ..., a, b, c, d, c, d
__ ldr(r0, at_tos_p4()); // load b
__ str(r0, at_tos_p2()); // store b in d
__ str(r2, at_tos_p4()); // store d in b // stack: ..., a, d, c, b, c, d
__ ldr(r2, at_tos_p5()); // load a
__ ldr(r0, at_tos_p3()); // load c
__ str(r2, at_tos_p3()); // store a in c
__ str(r0, at_tos_p5()); // store c in a // stack: ..., c, d, a, b, c, d
}
void TemplateTable::swap()
{
transition(vtos, vtos); // stack: ..., a, b
__ ldr(r2, at_tos_p1()); // load a
__ ldr(r0, at_tos()); // load b
__ str(r2, at_tos()); // store a in b
__ str(r0, at_tos_p1()); // store b in a // stack: ..., b, a
}
void TemplateTable::iop2(Operation op)
{
transition(itos, itos); // r0 <== r1 op r0
__ pop_i(r1); switch (op) { case add : __ addw(r0, r1, r0); break; case sub : __ subw(r0, r1, r0); break; case mul : __ mulw(r0, r1, r0); break; case _and : __ andw(r0, r1, r0); break; case _or : __ orrw(r0, r1, r0); break; case _xor : __ eorw(r0, r1, r0); break; case shl : __ lslvw(r0, r1, r0); break; case shr : __ asrvw(r0, r1, r0); break; case ushr : __ lsrvw(r0, r1, r0);break; default : ShouldNotReachHere();
}
}
void TemplateTable::lop2(Operation op)
{
transition(ltos, ltos); // r0 <== r1 op r0
__ pop_l(r1); switch (op) { case add : __ add(r0, r1, r0); break; case sub : __ sub(r0, r1, r0); break; case mul : __ mul(r0, r1, r0); break; case _and : __ andr(r0, r1, r0); break; case _or : __ orr(r0, r1, r0); break; case _xor : __ eor(r0, r1, r0); break; default : ShouldNotReachHere();
}
}
void TemplateTable::convert()
{ // Checking #ifdef ASSERT
{
TosState tos_in = ilgl;
TosState tos_out = ilgl; switch (bytecode()) { case Bytecodes::_i2l: // fall through case Bytecodes::_i2f: // fall through case Bytecodes::_i2d: // fall through case Bytecodes::_i2b: // fall through case Bytecodes::_i2c: // fall through case Bytecodes::_i2s: tos_in = itos; break; case Bytecodes::_l2i: // fall through case Bytecodes::_l2f: // fall through case Bytecodes::_l2d: tos_in = ltos; break; case Bytecodes::_f2i: // fall through case Bytecodes::_f2l: // fall through case Bytecodes::_f2d: tos_in = ftos; break; case Bytecodes::_d2i: // fall through case Bytecodes::_d2l: // fall through case Bytecodes::_d2f: tos_in = dtos; break; default : ShouldNotReachHere();
} switch (bytecode()) { case Bytecodes::_l2i: // fall through case Bytecodes::_f2i: // fall through case Bytecodes::_d2i: // fall through case Bytecodes::_i2b: // fall through case Bytecodes::_i2c: // fall through case Bytecodes::_i2s: tos_out = itos; break; case Bytecodes::_i2l: // fall through case Bytecodes::_f2l: // fall through case Bytecodes::_d2l: tos_out = ltos; break; case Bytecodes::_i2f: // fall through case Bytecodes::_l2f: // fall through case Bytecodes::_d2f: tos_out = ftos; break; case Bytecodes::_i2d: // fall through case Bytecodes::_l2d: // fall through case Bytecodes::_f2d: tos_out = dtos; break; default : ShouldNotReachHere();
}
transition(tos_in, tos_out);
} #endif// ASSERT // static const int64_t is_nan = 0x8000000000000000L;
void TemplateTable::lcmp()
{
transition(ltos, itos);
Label done;
__ pop_l(r1);
__ cmp(r1, r0);
__ mov(r0, (uint64_t)-1L);
__ br(Assembler::LT, done); // __ mov(r0, 1UL); // __ csel(r0, r0, zr, Assembler::NE); // and here is a faster way
__ csinc(r0, zr, zr, Assembler::EQ);
__ bind(done);
}
void TemplateTable::float_cmp(bool is_float, int unordered_result)
{
Label done; if (is_float) { // XXX get rid of pop here, use ... reg, mem32
__ pop_f(v1);
__ fcmps(v1, v0);
} else { // XXX get rid of pop here, use ... reg, mem64
__ pop_d(v1);
__ fcmpd(v1, v0);
} if (unordered_result < 0) { // we want -1 for unordered or less than, 0 for equal and 1 for // greater than.
__ mov(r0, (uint64_t)-1L); // for FP LT tests less than or unordered
__ br(Assembler::LT, done); // install 0 for EQ otherwise 1
__ csinc(r0, zr, zr, Assembler::EQ);
} else { // we want -1 for less than, 0 for equal and 1 for unordered or // greater than.
__ mov(r0, 1L); // for FP HI tests greater than or unordered
__ br(Assembler::HI, done); // install 0 for EQ otherwise ~0
__ csinv(r0, zr, zr, Assembler::EQ);
}
__ bind(done);
}
void TemplateTable::branch(bool is_jsr, bool is_wide)
{ // We might be moving to a safepoint. The thread which calls // Interpreter::notice_safepoints() will effectively flush its cache // when it makes a system call, but we need to do something to // ensure that we see the changed dispatch table.
__ membar(MacroAssembler::LoadLoad);
// load branch displacement if (!is_wide) {
__ ldrh(r2, at_bcp(1));
__ rev16(r2, r2); // sign extend the 16 bit value in r2
__ sbfm(r2, r2, 0, 15);
} else {
__ ldrw(r2, at_bcp(1));
__ revw(r2, r2); // sign extend the 32 bit value in r2
__ sbfm(r2, r2, 0, 31);
}
// Handle all the JSR stuff here, then exit. // It's much shorter and cleaner than intermingling with the non-JSR // normal-branch stuff occurring below.
if (is_jsr) { // Pre-load the next target bytecode into rscratch1
__ load_unsigned_byte(rscratch1, Address(rbcp, r2)); // compute return address as bci
__ ldr(rscratch2, Address(rmethod, Method::const_offset()));
__ add(rscratch2, rscratch2,
in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
__ sub(r1, rbcp, rscratch2);
__ push_i(r1); // Adjust the bcp by the 16-bit displacement in r2
__ add(rbcp, rbcp, r2);
__ dispatch_only(vtos, /*generate_poll*/true); return;
}
// Normal (non-jsr) branch handling
// Adjust the bcp by the displacement in r2
__ add(rbcp, rbcp, r2);
// r0: osr nmethod (osr ok) or NULL (osr not possible) // w1: target bytecode // r2: scratch
__ cbz(r0, dispatch); // test result -- no osr if null // nmethod may have been invalidated (VM may block upon call_VM return)
__ ldrb(r2, Address(r0, nmethod::state_offset())); if (nmethod::in_use != 0)
__ sub(r2, r2, nmethod::in_use);
__ cbnz(r2, dispatch);
// We have the address of an on stack replacement routine in r0 // We need to prepare to execute the OSR method. First we must // migrate the locals and monitors off of the stack.
void TemplateTable::if_icmp(Condition cc)
{
transition(itos, vtos); // assume branch is more often taken than not (loops use backward branches)
Label not_taken;
__ pop_i(r1);
__ cmpw(r1, r0, Assembler::LSL);
__ br(j_not(cc), not_taken);
branch(false, false);
__ bind(not_taken);
__ profile_not_taken_branch(r0);
}
void TemplateTable::if_nullcmp(Condition cc)
{
transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches)
Label not_taken; if (cc == equal)
__ cbnz(r0, not_taken); else
__ cbz(r0, not_taken);
branch(false, false);
__ bind(not_taken);
__ profile_not_taken_branch(r0);
}
void TemplateTable::if_acmp(Condition cc)
{
transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches)
Label not_taken;
__ pop_ptr(r1);
__ cmpoop(r1, r0);
__ br(j_not(cc), not_taken);
branch(false, false);
__ bind(not_taken);
__ profile_not_taken_branch(r0);
}
void TemplateTable::ret() {
transition(vtos, vtos); // We might be moving to a safepoint. The thread which calls // Interpreter::notice_safepoints() will effectively flush its cache // when it makes a system call, but we need to do something to // ensure that we see the changed dispatch table.
__ membar(MacroAssembler::LoadLoad);
void TemplateTable::lookupswitch() {
transition(itos, itos);
__ stop("lookupswitch bytecode should have been rewritten");
}
void TemplateTable::fast_linearswitch() {
transition(itos, vtos);
Label loop_entry, loop, found, continue_execution; // bswap r0 so we can avoid bswapping the table entries
__ rev32(r0, r0); // align rbcp
__ lea(r19, at_bcp(BytesPerInt)); // btw: should be able to get rid of // this instruction (change offsets // below)
__ andr(r19, r19, -BytesPerInt); // set counter
__ ldrw(r1, Address(r19, BytesPerInt));
__ rev32(r1, r1);
__ b(loop_entry); // table search
__ bind(loop);
__ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
__ ldrw(rscratch1, Address(rscratch1, 2 * BytesPerInt));
__ cmpw(r0, rscratch1);
__ br(Assembler::EQ, found);
__ bind(loop_entry);
__ subs(r1, r1, 1);
__ br(Assembler::PL, loop); // default case
__ profile_switch_default(r0);
__ ldrw(r3, Address(r19, 0));
__ b(continue_execution); // entry found -> get offset
__ bind(found);
__ lea(rscratch1, Address(r19, r1, Address::lsl(3)));
__ ldrw(r3, Address(rscratch1, 3 * BytesPerInt));
__ profile_switch_case(r1, r0, r19); // continue execution
__ bind(continue_execution);
__ rev32(r3, r3);
__ add(rbcp, rbcp, r3, ext::sxtw);
__ ldrb(rscratch1, Address(rbcp, 0));
__ dispatch_only(vtos, /*generate_poll*/true);
}
void TemplateTable::fast_binaryswitch() {
transition(itos, vtos); // Implementation using the following core algorithm: // // int binary_search(int key, LookupswitchPair* array, int n) { // // Binary search according to "Methodik des Programmierens" by // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. // int i = 0; // int j = n; // while (i+1 < j) { // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) // // with Q: for all i: 0 <= i < n: key < a[i] // // where a stands for the array and assuming that the (inexisting) // // element a[n] is infinitely big. // int h = (i + j) >> 1; // // i < h < j // if (key < array[h].fast_match()) { // j = h; // } else { // i = h; // } // } // // R: a[i] <= key < a[i+1] or Q // // (i.e., if key is within array, i is the correct index) // return i; // }
// Find array start
__ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to // get rid of this // instruction (change // offsets below)
__ andr(array, array, -BytesPerInt);
// Initialize i & j
__ mov(i, 0); // i = 0;
__ ldrw(j, Address(array, -BytesPerInt)); // j = length(array);
// Convert j into native byteordering
__ rev32(j, j);
// And start
Label entry;
__ b(entry);
// binary search loop
{
Label loop;
__ bind(loop); // int h = (i + j) >> 1;
__ addw(h, i, j); // h = i + j;
__ lsrw(h, h, 1); // h = (i + j) >> 1; // if (key < array[h].fast_match()) { // j = h; // } else { // i = h; // } // Convert array[h].match to native byte-ordering before compare
__ ldr(temp, Address(array, h, Address::lsl(3)));
__ rev32(temp, temp);
__ cmpw(key, temp); // j = h if (key < array[h].fast_match())
__ csel(j, h, j, Assembler::LT); // i = h if (key >= array[h].fast_match())
__ csel(i, h, i, Assembler::GE); // while (i+1 < j)
__ bind(entry);
__ addw(h, i, 1); // i+1
__ cmpw(h, j); // i+1 < j
__ br(Assembler::LT, loop);
}
// end of binary search, result index is i (must check again!)
Label default_case; // Convert array[i].match to native byte-ordering before compare
__ ldr(temp, Address(array, i, Address::lsl(3)));
__ rev32(temp, temp);
__ cmpw(key, temp);
__ br(Assembler::NE, default_case);
// Issue a StoreStore barrier after all stores but before return // from any constructor for any class with a final field. We don't // know if this is a finalizer, so we always do so. if (_desc->bytecode() == Bytecodes::_return)
__ membar(MacroAssembler::StoreStore);
// Narrow result if state is itos but result type is smaller. // Need to narrow in the return bytecode rather than in generate_return_entry // since compiled code callers expect the result to already be narrowed. if (state == itos) {
__ narrow(r0);
}
__ remove_activation(state);
__ ret(lr);
}
// ---------------------------------------------------------------------------- // Volatile variables demand their effects be made known to all CPU's // in order. Store buffers on most chips allow reads & writes to // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode // without some kind of memory barrier (i.e., it's not sufficient that // the interpreter does not reorder volatile references, the hardware // also must not reorder them). // // According to the new Java Memory Model (JMM): // (1) All volatiles are serialized wrt to each other. ALSO reads & // writes act as acquire & release, so: // (2) A read cannot let unrelated NON-volatile memory refs that // happen after the read float up to before the read. It's OK for // non-volatile memory refs that happen before the volatile read to // float down below it. // (3) Similar a volatile write cannot let unrelated NON-volatile // memory refs that happen BEFORE the write float down to after the // write. It's OK for non-volatile memory refs that happen after the // volatile write to float up before it. // // We only put in barriers around volatile refs (they are expensive), // not _between_ memory refs (that would require us to track the // flavor of the previous memory refs). Requirements (2) and (3) // require some barriers before volatile stores and after volatile // loads. These nearly cover requirement (1) but miss the // volatile-store-volatile-load case. This final case is placed after // volatile-stores although it could just as well go before // volatile-loads.
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
__ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
__ br(Assembler::EQ, resolved);
// resolve first time through // Class initialization barrier slow path lands here as well.
__ bind(clinit_barrier_slow);
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
__ mov(temp, (int) code);
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); // n.b. unlike x86 Rcache is now rcpool plus the indexed offset // so all clients ofthis method must be modified accordingly
__ bind(resolved);
// Class initialization barrier for static methods if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
__ load_resolved_method_at_index(byte_no, temp, Rcache);
__ load_method_holder(temp, temp);
__ clinit_barrier(temp, rscratch1, NULL, &clinit_barrier_slow);
}
}
// The Rcache and index registers must be set before call // n.b unlike x86 cache already includes the index offset void TemplateTable::load_field_cp_cache_entry(Register obj, Register cache, Register index, Register off, Register flags, bool is_static = false) {
assert_different_registers(cache, index, flags, off);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.