/* * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// At top of Java expression stack which may be different than esp(). It // isn't for category 1 objects. staticinline Address at_tos () { return Address(esp, Interpreter::expr_offset_in_bytes(0));
}
// Miscellaneous helper routines // Store an oop (or NULL) at the Address described by obj. // If val == noreg this means store a NULL staticvoid do_oop_store(InterpreterMacroAssembler* _masm,
Address dst, Register val,
DecoratorSet decorators) {
assert(val == noreg || val == x10, "parameter is just for looks");
__ store_heap_oop(dst, val, x28, x29, x13, decorators);
}
void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, Register temp_reg, bool load_bc_into_bc_reg /*=true*/, int byte_no) { if (!RewriteBytecodes) { return; }
Label L_patch_done;
switch (bc) { case Bytecodes::_fast_aputfield: // fall through case Bytecodes::_fast_bputfield: // fall through case Bytecodes::_fast_zputfield: // fall through case Bytecodes::_fast_cputfield: // fall through case Bytecodes::_fast_dputfield: // fall through case Bytecodes::_fast_fputfield: // fall through case Bytecodes::_fast_iputfield: // fall through case Bytecodes::_fast_lputfield: // fall through case Bytecodes::_fast_sputfield: { // We skip bytecode quickening for putfield instructions when // the put_code written to the constant pool cache is zero. // This is required so that every execution of this instruction // calls out to InterpreterRuntime::resolve_get_put to do // additional, required work.
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
__ mv(bc_reg, bc);
__ beqz(temp_reg, L_patch_done); break;
} default:
assert(byte_no == -1, "sanity"); // the pair bytecodes have already done the load. if (load_bc_into_bc_reg) {
__ mv(bc_reg, bc);
}
}
if (JvmtiExport::can_post_breakpoint()) {
Label L_fast_patch; // if a breakpoint is present we can't rewrite the stream directly
__ load_unsigned_byte(temp_reg, at_bcp(0));
__ addi(temp_reg, temp_reg, -Bytecodes::_breakpoint); // temp_reg is temporary register.
__ bnez(temp_reg, L_fast_patch); // Let breakpoint table handling rewrite to quicker bytecode
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), xmethod, xbcp, bc_reg);
__ j(L_patch_done);
__ bind(L_fast_patch);
}
// unresolved class - get the resolved class
__ mv(t1, (u1)JVM_CONSTANT_UnresolvedClass);
__ beq(x13, t1, call_ldc);
// unresolved class in error state - call into runtime to throw the error // from the first resolution attempt
__ mv(t1, (u1)JVM_CONSTANT_UnresolvedClassInError);
__ beq(x13, t1, call_ldc);
// resolved class - need to call vm to get java mirror of the class
__ mv(t1, (u1)JVM_CONSTANT_Class);
__ bne(x13, t1, notClass);
// VMr = obj = base address to find primitive value to push // VMr2 = flags = (tos, off) using format of CPCE::_flags
__ mv(off, flags);
__ mv(t0, ConstantPoolCacheEntry::field_index_mask);
__ andrw(off, off, t0);
void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos); if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done; constRegister bc = x14;
// get next bytecode
__ load_unsigned_byte(x11, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
// if _iload, wait to rewrite to iload2. We only want to rewrite the // last two iloads in a pair. Comparing against fast_iload means that // the next bytecode is neither an iload or a caload, and therefore // an iload pair.
__ mv(t1, Bytecodes::_iload);
__ beq(x11, t1, done);
// if _fast_iload rewrite to _fast_iload2
__ mv(t1, Bytecodes::_fast_iload);
__ mv(bc, Bytecodes::_fast_iload2);
__ beq(x11, t1, rewrite);
// if _caload rewrite to _fast_icaload
__ mv(t1, Bytecodes::_caload);
__ mv(bc, Bytecodes::_fast_icaload);
__ beq(x11, t1, rewrite);
// else rewrite to _fast_iload
__ mv(bc, Bytecodes::_fast_iload);
void TemplateTable::aload_0_internal(RewriteControl rc) { // According to bytecode histograms, the pairs: // // _aload_0, _fast_igetfield // _aload_0, _fast_agetfield // _aload_0, _fast_fgetfield // // occur frequently. If RewriteFrequentPairs is set, the (slow) // _aload_0 bytecode checks if the next bytecode is either // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then // rewrites the current bytecode into a pair bytecode; otherwise it // rewrites the current bytecode into _fast_aload_0 that doesn't do // the pair check anymore. // // Note: If the next bytecode is _getfield, the rewrite must be // delayed, otherwise we may miss an opportunity for a pair. // // Also rewrite frequent pairs // aload_0, aload_1 // aload_0, iload_1 // These bytecodes with a small amount of code are most profitable // to rewrite if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done; constRegister bc = x14;
// get next bytecode
__ load_unsigned_byte(x11, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
// if _getfield then wait with rewrite
__ mv(t1, Bytecodes::Bytecodes::_getfield);
__ beq(x11, t1, done);
// if _igetfield then rewrite to _fast_iaccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
__ mv(t1, Bytecodes::_fast_igetfield);
__ mv(bc, Bytecodes::_fast_iaccess_0);
__ beq(x11, t1, rewrite);
// if _agetfield then rewrite to _fast_aaccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
__ mv(t1, Bytecodes::_fast_agetfield);
__ mv(bc, Bytecodes::_fast_aaccess_0);
__ beq(x11, t1, rewrite);
// if _fgetfield then rewrite to _fast_faccess_0
assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
__ mv(t1, Bytecodes::_fast_fgetfield);
__ mv(bc, Bytecodes::_fast_faccess_0);
__ beq(x11, t1, rewrite);
// do array store check - check for NULL value first
__ beqz(x10, is_null);
// Move subklass into x11
__ load_klass(x11, x10); // Move superklass into x10
__ load_klass(x10, x13);
__ ld(x10, Address(x10,
ObjArrayKlass::element_klass_offset())); // Compress array + index * oopSize + 12 into a single register. Frees x12.
// Generate subtype check. Blows x12, x15 // Superklass in x10. Subklass in x11.
__ gen_subtype_check(x11, ok_is_subtype);
// Come here on failure // object is at TOS
__ j(Interpreter::_throw_ArrayStoreException_entry);
// Come here on success
__ bind(ok_is_subtype);
// Get the value we will store
__ ld(x10, at_tos()); // Now store using the appropriate barrier
do_oop_store(_masm, element_address, x10, IS_ARRAY);
__ j(done);
// Have a NULL in x10, x13=array, x12=index. Store NULL at ary[idx]
__ bind(is_null);
__ profile_null_seen(x12);
// Store a NULL
do_oop_store(_masm, element_address, noreg, IS_ARRAY);
void TemplateTable::bastore() {
transition(itos, vtos);
__ pop_i(x11);
__ pop_ptr(x13); // x10: value // x11: index // x13: array
index_check(x13, x11); // prefer index in x11
// Need to check whether array is boolean or byte // since both types share the bastore bytecode.
__ load_klass(x12, x13);
__ lwu(x12, Address(x12, Klass::layout_helper_offset()));
Label L_skip;
__ andi(t0, x12, Klass::layout_helper_boolean_diffbit());
__ beqz(t0, L_skip);
__ andi(x10, x10, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
__ bind(L_skip);
void TemplateTable::dup() {
transition(vtos, vtos);
__ ld(x10, Address(esp, 0));
__ push_reg(x10); // stack: ..., a, a
}
void TemplateTable::dup_x1() {
transition(vtos, vtos); // stack: ..., a, b
__ ld(x10, at_tos()); // load b
__ ld(x12, at_tos_p1()); // load a
__ sd(x10, at_tos_p1()); // store b
__ sd(x12, at_tos()); // store a
__ push_reg(x10); // push b // stack: ..., b, a, b
}
void TemplateTable::dup_x2() {
transition(vtos, vtos); // stack: ..., a, b, c
__ ld(x10, at_tos()); // load c
__ ld(x12, at_tos_p2()); // load a
__ sd(x10, at_tos_p2()); // store c in a
__ push_reg(x10); // push c // stack: ..., c, b, c, c
__ ld(x10, at_tos_p2()); // load b
__ sd(x12, at_tos_p2()); // store a in b // stack: ..., c, a, c, c
__ sd(x10, at_tos_p1()); // store b in c // stack: ..., c, a, b, c
}
void TemplateTable::dup2() {
transition(vtos, vtos); // stack: ..., a, b
__ ld(x10, at_tos_p1()); // load a
__ push_reg(x10); // push a
__ ld(x10, at_tos_p1()); // load b
__ push_reg(x10); // push b // stack: ..., a, b, a, b
}
void TemplateTable::dup2_x1() {
transition(vtos, vtos); // stack: ..., a, b, c
__ ld(x12, at_tos()); // load c
__ ld(x10, at_tos_p1()); // load b
__ push_reg(x10); // push b
__ push_reg(x12); // push c // stack: ..., a, b, c, b, c
__ sd(x12, at_tos_p3()); // store c in b // stack: ..., a, c, c, b, c
__ ld(x12, at_tos_p4()); // load a
__ sd(x12, at_tos_p2()); // store a in 2nd c // stack: ..., a, c, a, b, c
__ sd(x10, at_tos_p4()); // store b in a // stack: ..., b, c, a, b, c
}
void TemplateTable::dup2_x2() {
transition(vtos, vtos); // stack: ..., a, b, c, d
__ ld(x12, at_tos()); // load d
__ ld(x10, at_tos_p1()); // load c
__ push_reg(x10); // push c
__ push_reg(x12); // push d // stack: ..., a, b, c, d, c, d
__ ld(x10, at_tos_p4()); // load b
__ sd(x10, at_tos_p2()); // store b in d
__ sd(x12, at_tos_p4()); // store d in b // stack: ..., a, d, c, b, c, d
__ ld(x12, at_tos_p5()); // load a
__ ld(x10, at_tos_p3()); // load c
__ sd(x12, at_tos_p3()); // store a in c
__ sd(x10, at_tos_p5()); // store c in a // stack: ..., c, d, a, b, c, d
}
void TemplateTable::swap() {
transition(vtos, vtos); // stack: ..., a, b
__ ld(x12, at_tos_p1()); // load a
__ ld(x10, at_tos()); // load b
__ sd(x12, at_tos()); // store a in b
__ sd(x10, at_tos_p1()); // store b in a // stack: ..., b, a
}
void TemplateTable::iop2(Operation op) {
transition(itos, itos); // x10 <== x11 op x10
__ pop_i(x11); switch (op) { case add : __ addw(x10, x11, x10); break; case sub : __ subw(x10, x11, x10); break; case mul : __ mulw(x10, x11, x10); break; case _and : __ andrw(x10, x11, x10); break; case _or : __ orrw(x10, x11, x10); break; case _xor : __ xorrw(x10, x11, x10); break; case shl : __ sllw(x10, x11, x10); break; case shr : __ sraw(x10, x11, x10); break; case ushr : __ srlw(x10, x11, x10); break; default : ShouldNotReachHere();
}
}
void TemplateTable::lop2(Operation op) {
transition(ltos, ltos); // x10 <== x11 op x10
__ pop_l(x11); switch (op) { case add : __ add(x10, x11, x10); break; case sub : __ sub(x10, x11, x10); break; case mul : __ mul(x10, x11, x10); break; case _and : __ andr(x10, x11, x10); break; case _or : __ orr(x10, x11, x10); break; case _xor : __ xorr(x10, x11, x10); break; default : ShouldNotReachHere();
}
}
void TemplateTable::convert() { // Checking #ifdef ASSERT
{
TosState tos_in = ilgl;
TosState tos_out = ilgl; switch (bytecode()) { case Bytecodes::_i2l: // fall through case Bytecodes::_i2f: // fall through case Bytecodes::_i2d: // fall through case Bytecodes::_i2b: // fall through case Bytecodes::_i2c: // fall through case Bytecodes::_i2s: tos_in = itos; break; case Bytecodes::_l2i: // fall through case Bytecodes::_l2f: // fall through case Bytecodes::_l2d: tos_in = ltos; break; case Bytecodes::_f2i: // fall through case Bytecodes::_f2l: // fall through case Bytecodes::_f2d: tos_in = ftos; break; case Bytecodes::_d2i: // fall through case Bytecodes::_d2l: // fall through case Bytecodes::_d2f: tos_in = dtos; break; default : ShouldNotReachHere();
} switch (bytecode()) { case Bytecodes::_l2i: // fall through case Bytecodes::_f2i: // fall through case Bytecodes::_d2i: // fall through case Bytecodes::_i2b: // fall through case Bytecodes::_i2c: // fall through case Bytecodes::_i2s: tos_out = itos; break; case Bytecodes::_i2l: // fall through case Bytecodes::_f2l: // fall through case Bytecodes::_d2l: tos_out = ltos; break; case Bytecodes::_i2f: // fall through case Bytecodes::_l2f: // fall through case Bytecodes::_d2f: tos_out = ftos; break; case Bytecodes::_i2d: // fall through case Bytecodes::_l2d: // fall through case Bytecodes::_f2d: tos_out = dtos; break; default : ShouldNotReachHere();
}
transition(tos_in, tos_out);
} #endif// ASSERT
// Conversion switch (bytecode()) { case Bytecodes::_i2l:
__ sign_extend(x10, x10, 32); break; case Bytecodes::_i2f:
__ fcvt_s_w(f10, x10); break; case Bytecodes::_i2d:
__ fcvt_d_w(f10, x10); break; case Bytecodes::_i2b:
__ sign_extend(x10, x10, 8); break; case Bytecodes::_i2c:
__ zero_extend(x10, x10, 16); break; case Bytecodes::_i2s:
__ sign_extend(x10, x10, 16); break; case Bytecodes::_l2i:
__ addw(x10, x10, zr); break; case Bytecodes::_l2f:
__ fcvt_s_l(f10, x10); break; case Bytecodes::_l2d:
__ fcvt_d_l(f10, x10); break; case Bytecodes::_f2i:
__ fcvt_w_s_safe(x10, f10); break; case Bytecodes::_f2l:
__ fcvt_l_s_safe(x10, f10); break; case Bytecodes::_f2d:
__ fcvt_d_s(f10, f10); break; case Bytecodes::_d2i:
__ fcvt_w_d_safe(x10, f10); break; case Bytecodes::_d2l:
__ fcvt_l_d_safe(x10, f10); break; case Bytecodes::_d2f:
__ fcvt_s_d(f10, f10); break; default:
ShouldNotReachHere();
}
}
void TemplateTable::float_cmp(bool is_float, int unordered_result) { // For instruction feq, flt and fle, the result is 0 if either operand is NaN if (is_float) {
__ pop_f(f11); // if unordered_result < 0: // we want -1 for unordered or less than, 0 for equal and 1 for // greater than. // else: // we want -1 for less than, 0 for equal and 1 for unordered or // greater than. // f11 primary, f10 secondary
__ float_compare(x10, f11, f10, unordered_result);
} else {
__ pop_d(f11); // if unordered_result < 0: // we want -1 for unordered or less than, 0 for equal and 1 for // greater than. // else: // we want -1 for less than, 0 for equal and 1 for unordered or // greater than. // f11 primary, f10 secondary
__ double_compare(x10, f11, f10, unordered_result);
}
}
void TemplateTable::branch(bool is_jsr, bool is_wide) { // We might be moving to a safepoint. The thread which calls // Interpreter::notice_safepoints() will effectively flush its cache // when it makes a system call, but we need to do something to // ensure that we see the changed dispatch table.
__ membar(MacroAssembler::LoadLoad);
// load branch displacement if (!is_wide) {
__ lhu(x12, at_bcp(1));
__ revb_h_h(x12, x12); // reverse bytes in half-word and sign-extend
} else {
__ lwu(x12, at_bcp(1));
__ revb_w_w(x12, x12); // reverse bytes in word and sign-extend
}
// Handle all the JSR stuff here, then exit. // It's much shorter and cleaner than intermingling with the non-JSR // normal-branch stuff occurring below.
if (is_jsr) { // compute return address as bci
__ ld(t1, Address(xmethod, Method::const_offset()));
__ add(t1, t1,
in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3));
__ sub(x11, xbcp, t1);
__ push_i(x11); // Adjust the bcp by the 16-bit displacement in x12
__ add(xbcp, xbcp, x12);
__ load_unsigned_byte(t0, Address(xbcp, 0)); // load the next target bytecode into t0, it is the argument of dispatch_only
__ dispatch_only(vtos, /*generate_poll*/true); return;
}
// Normal (non-jsr) branch handling
// Adjust the bcp by the displacement in x12
__ add(xbcp, xbcp, x12);
// x10: osr nmethod (osr ok) or NULL (osr not possible) // w11: target bytecode // x12: temporary
__ beqz(x10, dispatch); // test result -- no osr if null // nmethod may have been invalidated (VM may block upon call_VM return)
__ lbu(x12, Address(x10, nmethod::state_offset())); if (nmethod::in_use != 0) {
__ sub(x12, x12, nmethod::in_use);
}
__ bnez(x12, dispatch);
// We have the address of an on stack replacement routine in x10 // We need to prepare to execute the OSR method. First we must // migrate the locals and monitors off of the stack.
// and begin the OSR nmethod
__ ld(t0, Address(x9, nmethod::osr_entry_point_offset()));
__ jr(t0);
}
}
void TemplateTable::if_0cmp(Condition cc) {
transition(itos, vtos); // assume branch is more often taken than not (loops use backward branches)
Label not_taken;
__ addw(x10, x10, zr); switch (cc) { case equal:
__ bnez(x10, not_taken); break; case not_equal:
__ beqz(x10, not_taken); break; case less:
__ bgez(x10, not_taken); break; case less_equal:
__ bgtz(x10, not_taken); break; case greater:
__ blez(x10, not_taken); break; case greater_equal:
__ bltz(x10, not_taken); break; default: break;
}
void TemplateTable::if_nullcmp(Condition cc) {
transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches)
Label not_taken; if (cc == equal) {
__ bnez(x10, not_taken);
} else {
__ beqz(x10, not_taken);
}
branch(false, false);
__ bind(not_taken);
__ profile_not_taken_branch(x10);
}
void TemplateTable::if_acmp(Condition cc) {
transition(atos, vtos); // assume branch is more often taken than not (loops use backward branches)
Label not_taken;
__ pop_ptr(x11);
void TemplateTable::ret() {
transition(vtos, vtos); // We might be moving to a safepoint. The thread which calls // Interpreter::notice_safepoints() will effectively flush its cache // when it makes a system call, but we need to do something to // ensure that we see the changed dispatch table.
__ membar(MacroAssembler::LoadLoad);
void TemplateTable::tableswitch() {
Label default_case, continue_execution;
transition(itos, vtos); // align xbcp
__ la(x11, at_bcp(BytesPerInt));
__ andi(x11, x11, -BytesPerInt); // load lo & hi
__ lwu(x12, Address(x11, BytesPerInt));
__ lwu(x13, Address(x11, 2 * BytesPerInt));
__ revb_w_w(x12, x12); // reverse bytes in word (32bit) and sign-extend
__ revb_w_w(x13, x13); // reverse bytes in word (32bit) and sign-extend // check against lo & hi
__ blt(x10, x12, default_case);
__ bgt(x10, x13, default_case); // lookup dispatch offset
__ subw(x10, x10, x12);
__ shadd(x13, x10, x11, t0, 2);
__ lwu(x13, Address(x13, 3 * BytesPerInt));
__ profile_switch_case(x10, x11, x12); // continue execution
__ bind(continue_execution);
__ revb_w_w(x13, x13); // reverse bytes in word (32bit) and sign-extend
__ add(xbcp, xbcp, x13);
__ load_unsigned_byte(t0, Address(xbcp));
__ dispatch_only(vtos, /*generate_poll*/true); // handle default
__ bind(default_case);
__ profile_switch_default(x10);
__ lwu(x13, Address(x11, 0));
__ j(continue_execution);
}
void TemplateTable::lookupswitch() {
transition(itos, itos);
__ stop("lookupswitch bytecode should have been rewritten");
}
void TemplateTable::fast_linearswitch() {
transition(itos, vtos);
Label loop_entry, loop, found, continue_execution; // bswap x10 so we can avoid bswapping the table entries
__ revb_w_w(x10, x10); // reverse bytes in word (32bit) and sign-extend // align xbcp
__ la(x9, at_bcp(BytesPerInt)); // btw: should be able to get rid of // this instruction (change offsets // below)
__ andi(x9, x9, -BytesPerInt); // set counter
__ lwu(x11, Address(x9, BytesPerInt));
__ revb_w(x11, x11);
__ j(loop_entry); // table search
__ bind(loop);
__ shadd(t0, x11, x9, t0, 3);
__ lw(t0, Address(t0, 2 * BytesPerInt));
__ beq(x10, t0, found);
__ bind(loop_entry);
__ addi(x11, x11, -1);
__ bgez(x11, loop); // default case
__ profile_switch_default(x10);
__ lwu(x13, Address(x9, 0));
__ j(continue_execution); // entry found -> get offset
__ bind(found);
__ shadd(t0, x11, x9, t0, 3);
__ lwu(x13, Address(t0, 3 * BytesPerInt));
__ profile_switch_case(x11, x10, x9); // continue execution
__ bind(continue_execution);
__ revb_w_w(x13, x13); // reverse bytes in word (32bit) and sign-extend
__ add(xbcp, xbcp, x13);
__ lbu(t0, Address(xbcp, 0));
__ dispatch_only(vtos, /*generate_poll*/true);
}
void TemplateTable::fast_binaryswitch() {
transition(itos, vtos); // Implementation using the following core algorithm: // // int binary_search(int key, LookupswitchPair* array, int n) // binary_search start: // #Binary search according to "Methodik des Programmierens" by // # Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. // int i = 0; // int j = n; // while (i + 1 < j) do // # invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) // # with Q: for all i: 0 <= i < n: key < a[i] // # where a stands for the array and assuming that the (inexisting) // # element a[n] is infinitely big. // int h = (i + j) >> 1 // # i < h < j // if (key < array[h].fast_match()) // then [j = h] // else [i = h] // end // # R: a[i] <= key < a[i+1] or Q // # (i.e., if key is within array, i is the correct index) // return i // binary_search end
// Find array start
__ la(array, at_bcp(3 * BytesPerInt)); // btw: should be able to // get rid of this // instruction (change // offsets below)
__ andi(array, array, -BytesPerInt);
// Initialize i & j
__ mv(i, zr); // i = 0
__ lwu(j, Address(array, -BytesPerInt)); // j = length(array)
// Convert j into native byteordering
__ revb_w(j, j);
// And start
Label entry;
__ j(entry);
// binary search loop
{
Label loop;
__ bind(loop);
__ addw(h, i, j); // h = i + j
__ srliw(h, h, 1); // h = (i + j) >> 1 // if [key < array[h].fast_match()] // then [j = h] // else [i = h] // Convert array[h].match to native byte-ordering before compare
__ shadd(temp, h, array, temp, 3);
__ ld(temp, Address(temp, 0));
__ revb_w_w(temp, temp); // reverse bytes in word (32bit) and sign-extend
Label L_done, L_greater;
__ bge(key, temp, L_greater); // if [key < array[h].fast_match()] then j = h
__ mv(j, h);
__ j(L_done);
__ bind(L_greater); // if [key >= array[h].fast_match()] then i = h
__ mv(i, h);
__ bind(L_done);
// while [i + 1 < j]
__ bind(entry);
__ addiw(h, i, 1); // i + 1
__ blt(h, j, loop); // i + 1 < j
}
// end of binary search, result index is i (must check again!)
Label default_case; // Convert array[i].match to native byte-ordering before compare
__ shadd(temp, i, array, temp, 3);
__ ld(temp, Address(temp, 0));
__ revb_w_w(temp, temp); // reverse bytes in word (32bit) and sign-extend
__ bne(key, temp, default_case);
// entry found -> j = offset
__ shadd(temp, i, array, temp, 3);
__ lwu(j, Address(temp, BytesPerInt));
__ profile_switch_case(i, key, array);
__ revb_w_w(j, j); // reverse bytes in word (32bit) and sign-extend
// Issue a StoreStore barrier after all stores but before return // from any constructor for any class with a final field. We don't // know if this is a finalizer, so we always do so. if (_desc->bytecode() == Bytecodes::_return) {
__ membar(MacroAssembler::StoreStore);
}
// Narrow result if state is itos but result type is smaller. // Need to narrow in the return bytecode rather than in generate_return_entry // since compiled code callers expect the result to already be narrowed. if (state == itos) {
__ narrow(x10);
}
__ remove_activation(state);
__ ret();
}
// ---------------------------------------------------------------------------- // Volatile variables demand their effects be made known to all CPU's // in order. Store buffers on most chips allow reads & writes to // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode // without some kind of memory barrier (i.e., it's not sufficient that // the interpreter does not reorder volatile references, the hardware // also must not reorder them). // // According to the new Java Memory Model (JMM): // (1) All volatiles are serialized wrt to each other. ALSO reads & // writes act as acquire & release, so: // (2) A read cannot let unrelated NON-volatile memory refs that // happen after the read float up to before the read. It's OK for // non-volatile memory refs that happen before the volatile read to // float down below it. // (3) Similar a volatile write cannot let unrelated NON-volatile // memory refs that happen BEFORE the write float down to after the // write. It's OK for non-volatile memory refs that happen after the // volatile write to float up before it. // // We only put in barriers around volatile refs (they are expensive), // not _between_ memory refs (that would require us to track the // flavor of the previous memory refs). Requirements (2) and (3) // require some barriers before volatile stores and after volatile // loads. These nearly cover requirement (1) but miss the // volatile-store-volatile-load case. This final case is placed after // volatile-stores although it could just as well go before // volatile-loads.
// Update registers with resolved info
__ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); // n.b. unlike x86 Rcache is now rcpool plus the indexed offset // so all clients ofthis method must be modified accordingly
__ bind(resolved);
// Class initialization barrier for static methods if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
__ load_resolved_method_at_index(byte_no, temp, Rcache);
__ load_method_holder(temp, temp);
__ clinit_barrier(temp, t0, NULL, &clinit_barrier_slow);
}
}
// The Rcache and index registers must be set before call // n.b unlike x86 cache already includes the index offset void TemplateTable::load_field_cp_cache_entry(Register obj, Register cache, Register index, Register off, Register flags, bool is_static = false) {
assert_different_registers(cache, index, flags, off);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.