/* * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
#ifndef PRODUCT #define COMMENT(x) do { __ block_comment(x); } while (0) #else #define COMMENT(x) #endif
NEEDS_CLEANUP // remove this definitions ? constRegister IC_Klass = t1; // where the IC klass is cached constRegister SYNC_header = x10; // synchronization header constRegister SHIFT_count = x10; // where count for shift operations must be
void LIR_Assembler::clinit_barrier(ciMethod* method) {
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
// we jump here if osr happens with the interpreter // state set up to continue at the beginning of the // loop that triggered osr - in particular, we have // the following registers setup: // // x12: osr buffer //
//build frame
ciMethod* m = compilation()->method();
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
// OSR buffer is // // locals[nlocals-1..0] // monitors[0..number_of_locks] // // locals is a direct copy of the interpreter frame so in the osr buffer // so first slot in the local array is the last local from the interpreter // and last slot is local[0] (receiver) from the interpreter // // Similarly with locks. The first lock slot in the osr buffer is the nth lock // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock // in the interpreter frame (the method lock if a sync method)
// Initialize monitors in the compiled activation. // x12: pointer to osr buffer // All other registers are dead at this point and the locals will be // copied into place by code emitted in the IR.
Register OSR_buf = osrBufferPointer()->as_pointer_register();
{
assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); int monitor_offset = BytesPerWord * method()->max_locals() +
(2 * BytesPerWord) * (number_of_locks - 1); // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in // the OSR buffer using 2 word entries: first the lock and then // the oop. for (int i = 0; i < number_of_locks; i++) { int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); #ifdef ASSERT // verify the interpreter's monitor has a non-null object
{
Label L;
__ ld(t0, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
__ bnez(t0, L);
__ stop("locked object is NULL");
__ bind(L);
} #endif// ASSERT
__ ld(x9, Address(OSR_buf, slot_offset + 0));
__ sd(x9, frame_map()->address_for_monitor_lock(i));
__ ld(x9, Address(OSR_buf, slot_offset + 1 * BytesPerWord));
__ sd(x9, frame_map()->address_for_monitor_object(i));
}
}
}
// inline cache check; done before the frame is built. int LIR_Assembler::check_icache() { Register receiver = FrameMap::receiver_opr->as_register(); Register ic_klass = IC_Klass; int start_offset = __ offset();
Label dont;
__ inline_cache_check(receiver, ic_klass, dont);
// if icache check fails, then jump to runtime routine // Note: RECEIVER must still contain the receiver!
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
// We align the verified entry point unless the method body // (including its inline cache check) will fit in a single 64-byte // icache line. if (!method()->is_accessor() || __ offset() - start_offset > 4 * 4) { // force alignment after the cache check.
__ align(CodeEntryAlignment);
}
__ bind(dont); return start_offset;
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) { if (o == NULL) {
__ mv(reg, zr);
} else {
__ movoop(reg, o);
}
}
// This specifies the rsp decrement needed to build the frame int LIR_Assembler::initial_frame_size_in_bytes() const { // if rounding, must let FrameMap know!
int LIR_Assembler::emit_exception_handler() { // generate code for exception handler
address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // not enough space left for the handler
bailout("exception handler overflow"); return -1;
}
int offset = code_offset();
// the exception oop and pc are in x10, and x13 // no other registers need to be preserved, so invalidate them
__ invalidate_registers(false, true, true, false, true, true);
// check that there is really an exception
__ verify_not_null_oop(x10);
// Emit the code to remove the frame from the stack in the exception // unwind path. int LIR_Assembler::emit_unwind_handler() { #ifndef PRODUCT if (CommentedAssembly) {
_masm->block_comment("Unwind handler");
} #endif// PRODUCT
int offset = code_offset();
// Fetch the exception from TLS and clear out exception related thread state
__ ld(x10, Address(xthread, JavaThread::exception_oop_offset()));
__ sd(zr, Address(xthread, JavaThread::exception_oop_offset()));
__ sd(zr, Address(xthread, JavaThread::exception_pc_offset()));
__ bind(_unwind_handler_entry);
__ verify_not_null_oop(x10); if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mv(x9, x10); // Preserve the exception
}
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
__ mv(x10, x9); // Restore the exception
}
// remove the activation and dispatch to the unwind handler
__ block_comment("remove_frame and dispatch to the unwind handler");
__ remove_frame(initial_frame_size_in_bytes());
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
// Emit the slow path assembly if (stub != NULL) {
stub->emit_code(this);
}
return offset;
}
int LIR_Assembler::emit_deopt_handler() { // generate code for exception handler
address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // not enough space left for the handler
bailout("deopt handler overflow"); return -1;
}
assert(src->is_register(), "should not call otherwise");
assert(dest->is_stack(), "should not call otherwise"); if (src->is_single_cpu()) { int index = dest->single_stack_ix(); if (is_reference_type(type)) {
__ sd(src->as_register(), stack_slot_address(index, c_sz64));
__ verify_oop(src->as_register());
} elseif (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
__ sd(src->as_register(), stack_slot_address(index, c_sz64));
} else {
__ sw(src->as_register(), stack_slot_address(index, c_sz32));
}
} elseif (src->is_double_cpu()) { int index = dest->double_stack_ix();
Address dest_addr_LO = stack_slot_address(index, c_sz64, lo_word_offset_in_bytes);
__ sd(src->as_register_lo(), dest_addr_LO);
} elseif (src->is_single_fpu()) { int index = dest->single_stack_ix();
__ fsw(src->as_float_reg(), stack_slot_address(index, c_sz32));
} elseif (src->is_double_fpu()) { int index = dest->double_stack_ix();
__ fsd(src->as_double_reg(), stack_slot_address(index, c_sz64));
} else {
ShouldNotReachHere();
}
}
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide) {
LIR_Address* to_addr = dest->as_address_ptr(); // t0 was used as tmp reg in as_Address, so we use t1 as compressed_src Register compressed_src = t1;
if (patch_code != lir_patch_none) {
deoptimize_trap(info); return;
}
if (is_reference_type(type)) {
__ verify_oop(src->as_register());
switch (type) { case T_FLOAT:
__ fsw(src->as_float_reg(), as_Address(to_addr)); break;
case T_DOUBLE:
__ fsd(src->as_double_reg(), as_Address(to_addr)); break;
case T_ARRAY: // fall through case T_OBJECT: if (UseCompressedOops && !wide) {
__ sw(compressed_src, as_Address(to_addr));
} else {
__ sd(compressed_src, as_Address(to_addr));
} break; case T_METADATA: // We get here to store a method pointer to the stack to pass to // a dtrace runtime call. This can't work on 64 bit with // compressed klass ptrs: T_METADATA can be compressed klass // ptr or a 64 bit method pointer.
ShouldNotReachHere();
__ sd(src->as_register(), as_Address(to_addr)); break; case T_ADDRESS:
__ sd(src->as_register(), as_Address(to_addr)); break; case T_INT:
__ sw(src->as_register(), as_Address(to_addr)); break; case T_LONG:
__ sd(src->as_register_lo(), as_Address(to_addr)); break; case T_BYTE: // fall through case T_BOOLEAN:
__ sb(src->as_register(), as_Address(to_addr)); break; case T_CHAR: // fall through case T_SHORT:
__ sh(src->as_register(), as_Address(to_addr)); break; default:
ShouldNotReachHere();
}
if (info != NULL) {
add_debug_info_for_null_check(null_check_here, info);
}
}
if (addr->base()->type() == T_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register());
}
if (patch_code != lir_patch_none) {
deoptimize_trap(info); return;
}
if (info != NULL) {
add_debug_info_for_null_check_here(info);
}
int null_check_here = code_offset(); switch (type) { case T_FLOAT:
__ flw(dest->as_float_reg(), as_Address(from_addr)); break; case T_DOUBLE:
__ fld(dest->as_double_reg(), as_Address(from_addr)); break; case T_ARRAY: // fall through case T_OBJECT: if (UseCompressedOops && !wide) {
__ lwu(dest->as_register(), as_Address(from_addr));
} else {
__ ld(dest->as_register(), as_Address(from_addr));
} break; case T_METADATA: // We get here to store a method pointer to the stack to pass to // a dtrace runtime call. This can't work on 64 bit with // compressed klass ptrs: T_METADATA can be a compressed klass // ptr or a 64 bit method pointer.
ShouldNotReachHere();
__ ld(dest->as_register(), as_Address(from_addr)); break; case T_ADDRESS:
__ ld(dest->as_register(), as_Address(from_addr)); break; case T_INT:
__ lw(dest->as_register(), as_Address(from_addr)); break; case T_LONG:
__ ld(dest->as_register_lo(), as_Address_lo(from_addr)); break; case T_BYTE:
__ lb(dest->as_register(), as_Address(from_addr)); break; case T_BOOLEAN:
__ lbu(dest->as_register(), as_Address(from_addr)); break; case T_CHAR:
__ lhu(dest->as_register(), as_Address(from_addr)); break; case T_SHORT:
__ lh(dest->as_register(), as_Address(from_addr)); break; default:
ShouldNotReachHere();
}
if (is_reference_type(type)) { if (UseCompressedOops && !wide) {
__ decode_heap_oop(dest->as_register());
}
if (!UseZGC) { // Load barrier has not yet been applied, so ZGC can't verify the oop here
__ verify_oop(dest->as_register());
}
}
}
void LIR_Assembler::emit_op3(LIR_Op3* op) { switch (op->code()) { case lir_idiv: // fall through case lir_irem:
arithmetic_idiv(op->code(),
op->in_opr1(),
op->in_opr2(),
op->in_opr3(),
op->result_opr(),
op->info()); break; case lir_fmad:
__ fmadd_d(op->result_opr()->as_double_reg(),
op->in_opr1()->as_double_reg(),
op->in_opr2()->as_double_reg(),
op->in_opr3()->as_double_reg()); break; case lir_fmaf:
__ fmadd_s(op->result_opr()->as_float_reg(),
op->in_opr1()->as_float_reg(),
op->in_opr2()->as_float_reg(),
op->in_opr3()->as_float_reg()); break; default:
ShouldNotReachHere();
}
}
void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, Register recv, Label* update_done) { for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test; // See if the receiver is receiver[n].
__ ld(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
__ bne(recv, t1, next_test);
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
__ increment(data_addr, DataLayout::counter_increment);
__ j(*update_done);
__ bind(next_test);
}
// Didn't find receiver; find next empty slot and fill it in for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
__ ld(t1, recv_addr);
__ bnez(t1, next_test);
__ sd(recv, recv_addr);
__ mv(t1, DataLayout::counter_increment);
__ sd(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
__ j(*update_done);
__ bind(next_test);
}
}
void LIR_Assembler::data_check(LIR_OpTypeCheck *op, ciMethodData **md, ciProfileData **data) {
ciMethod* method = op->profiled_method();
assert(method != NULL, "Should have method"); int bci = op->profiled_bci();
*md = method->method_data_or_null();
guarantee(*md != NULL, "Sanity");
*data = ((*md)->bci_to_data(bci));
assert(*data != NULL, "need data for type check");
assert((*data)->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
}
void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Register Rtmp1, Register k_RInfo, Register klass_RInfo,
Label *failure_target, Label *success_target) { // get object class // not a safepoint as obj null check happens earlier
__ load_klass(klass_RInfo, obj); if (k->is_loaded()) { // See if we get an immediate positive hit
__ ld(t0, Address(klass_RInfo, int64_t(k->super_check_offset()))); if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
__ bne(k_RInfo, t0, *failure_target, /* is_far */ true); // successful cast, fall through to profile or jump
} else { // See if we get an immediate positive hit
__ beq(k_RInfo, t0, *success_target); // check for self
__ beq(klass_RInfo, k_RInfo, *success_target);
__ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
__ sd(k_RInfo, Address(sp, 0)); // sub klass
__ sd(klass_RInfo, Address(sp, wordSize)); // super klass
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); // load result to k_RInfo
__ ld(k_RInfo, Address(sp, 0));
__ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo // result is a boolean
__ beqz(k_RInfo, *failure_target, /* is_far */ true); // successful cast, fall through to profile or jump
}
} else { // perform the fast part of the checking logic
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); // call out-of-line instance of __ check_klass_subtytpe_slow_path(...)
__ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
__ sd(klass_RInfo, Address(sp, wordSize)); // sub klass
__ sd(k_RInfo, Address(sp, 0)); // super klass
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); // load result to k_RInfo
__ ld(k_RInfo, Address(sp, 0));
__ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo // result is a boolean
__ beqz(k_RInfo, *failure_target, /* is_far */ true); // successful cast, fall thriugh to profile or jump
}
}
void LIR_Assembler::align_call(LIR_Code code) { // With RVC a call instruction may get 2-byte aligned. // The address of the call instruction needs to be 4-byte aligned to // ensure that it does not span a cache line so that it can be patched.
__ align(NativeInstruction::instruction_size);
}
// exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers)
info->add_register_oop(exceptionOop);
Runtime1::StubID unwind_id;
// get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it. if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) { // As no instructions have been generated yet for this LIR node it's // possible that an oop map already exists for the current offset. // In that case insert an dummy NOP here to ensure all oop map PCs // are unique. See JDK-8237483.
__ nop();
} int pc_for_athrow_offset = __ offset();
InternalAddress pc_for_athrow(__ pc());
__ relocate(pc_for_athrow.rspec(), [&] {
int32_t offset;
__ la_patchable(exceptionPC->as_register(), pc_for_athrow, offset);
__ addi(exceptionPC->as_register(), exceptionPC->as_register(), offset);
});
add_call_info(pc_for_athrow_offset, info); // for exception handler
void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register obj = op->obj_opr()->as_register(); // may not be an oop Register hdr = op->hdr_opr()->as_register(); Register lock = op->lock_opr()->as_register(); if (UseHeavyMonitors) { if (op->info() != NULL) {
add_debug_info_for_null_check_here(op->info());
__ null_check(obj);
}
__ j(*op->stub()->entry());
} elseif (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // add debug info for NullPointerException only if one is possible int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); if (op->info() != NULL) {
add_debug_info_for_null_check(null_check_offset, op->info());
}
} elseif (op->code() == lir_unlock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
} else {
Unimplemented();
}
__ bind(*op->stub()->continuation());
}
// Update counter for all call types
ciMethodData* md = method->method_data_or_null();
guarantee(md != NULL, "Sanity");
ciProfileData* data = md->bci_to_data(bci);
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); Register mdo = op->mdo()->as_register();
__ mov_metadata(mdo, md->constant_encoding());
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); // Perform additional virtual call profiling for invokevirtual and // invokeinterface bytecodes if (op->should_profile_receiver_type()) {
assert(op->recv()->is_single_cpu(), "recv must be allocated"); Register recv = op->recv()->as_register();
assert_different_registers(mdo, recv);
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
ciKlass* known_klass = op->known_holder(); if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { // We know the type that will be seen at this call site; we can // statically update the MethodData* rather than needing to do // dynamic tests on the receiver type // NOTE: we should probably put a lock around this search to // avoid collisions by concurrent compilations
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
uint i; for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i); if (known_klass->equals(receiver)) {
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ increment(data_addr, DataLayout::counter_increment); return;
}
}
// Receiver type not found in profile data; select an empty slot // Note that this is less efficient than it should be because it // always does a write to the receiver part of the // VirtualCallData rather than just the first time for (i = 0; i < VirtualCallData::row_limit(); i++) {
ciKlass* receiver = vc_data->receiver(i); if (receiver == NULL) {
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
__ mov_metadata(t1, known_klass->constant_encoding());
__ sd(t1, recv_addr);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ increment(data_addr, DataLayout::counter_increment); return;
}
}
} else {
__ load_klass(recv, recv);
Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done); // Receiver did not match any saved receiver and there is no empty row for it. // Increment total counter to indicate polymorphic case.
__ increment(counter_addr, DataLayout::counter_increment);
__ ld(t1, mdo_addr);
__ xorr(tmp, tmp, t1);
__ andi(t0, tmp, TypeEntries::type_klass_mask); // klass seen before, nothing to do. The unknown bit may have been // set already but no need to check.
__ beqz(t0, next);
// already unknown. Nothing to do anymore.
__ andi(t0, tmp, TypeEntries::type_unknown);
__ bnez(t0, next);
if (TypeEntries::is_type_none(current_klass)) {
__ beqz(t1, none);
__ mv(t0, (u1)TypeEntries::null_seen);
__ beq(t0, t1, none); // There is a chance that the checks above (re-reading profiling // data from memory) fail if another thread has just set the // profiling to this obj's klass
__ membar(MacroAssembler::LoadLoad);
__ ld(t1, mdo_addr);
__ xorr(tmp, tmp, t1);
__ andi(t0, tmp, TypeEntries::type_klass_mask);
__ beqz(t0, next);
}
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
__ ld(tmp, mdo_addr); // already unknown. Nothing to do anymore.
__ andi(t0, tmp, TypeEntries::type_unknown);
__ bnez(t0, next);
}
// different than before. Cannot keep accurate profile.
__ ld(t1, mdo_addr);
__ ori(t1, t1, TypeEntries::type_unknown);
__ sd(t1, mdo_addr);
if (TypeEntries::is_type_none(current_klass)) {
__ j(next);
__ bind(none); // first time here. Set profile type.
__ sd(tmp, mdo_addr);
}
}
void LIR_Assembler::check_no_conflict(ciKlass* exact_klass, intptr_t current_klass, Register tmp,
Address mdo_addr, Label &next) { // There's a single possible klass at this profile point
assert(exact_klass != NULL, "should be"); if (TypeEntries::is_type_none(current_klass)) {
__ mov_metadata(tmp, exact_klass->constant_encoding());
__ ld(t1, mdo_addr);
__ xorr(tmp, tmp, t1);
__ andi(t0, tmp, TypeEntries::type_klass_mask);
__ beqz(t0, next); #ifdef ASSERT
{
Label ok;
__ ld(t0, mdo_addr);
__ beqz(t0, ok);
__ mv(t1, (u1)TypeEntries::null_seen);
__ beq(t0, t1, ok); // may have been set by another thread
__ membar(MacroAssembler::LoadLoad);
__ mov_metadata(t0, exact_klass->constant_encoding());
__ ld(t1, mdo_addr);
__ xorr(t1, t0, t1);
__ andi(t1, t1, TypeEntries::type_mask);
__ beqz(t1, ok);
__ stop("unexpected profiling mismatch");
__ bind(ok);
} #endif // first time here. Set profile type.
__ sd(tmp, mdo_addr);
} else {
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
__ ld(tmp, mdo_addr); // already unknown. Nothing to do anymore.
__ andi(t0, tmp, TypeEntries::type_unknown);
__ bnez(t0, next);
assert(do_null || do_update, "why are we here?");
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
assert_different_registers(tmp, t0, t1, mdo_addr.base());
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { // tmp must be unused
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
if (left->is_single_cpu()) {
assert(dest->is_single_cpu(), "expect single result reg");
__ negw(dest->as_register(), left->as_register());
} elseif (left->is_double_cpu()) {
assert(dest->is_double_cpu(), "expect double result reg");
__ neg(dest->as_register_lo(), left->as_register_lo());
} elseif (left->is_single_fpu()) {
assert(dest->is_single_fpu(), "expect single float result reg");
__ fneg_s(dest->as_float_reg(), left->as_float_reg());
} else {
assert(left->is_double_fpu(), "expect double float operand reg");
assert(dest->is_double_fpu(), "expect double float result reg");
__ fneg_d(dest->as_double_reg(), left->as_double_reg());
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.