SSL c2_MacroAssembler_x86.cpp
Interaktion und PortierbarkeitC
/* * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// WARNING: Initial instruction MUST be 5 bytes or longer so that // NativeJump::patch_verified_entry will be able to patch out the entry // code safely. The push to verify stack depth is ok at 5 bytes, // the frame allocation can be either 3 or 6 bytes. So if we don't do // stack bang then we must use the 6 byte frame allocation even if // we have no frame. :-(
assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); // Remove word for return addr
framesize -= wordSize;
stack_bang_size -= wordSize;
// Calls to C2R adapters often do not accept exceptional returns. // We require that their callers must bang for them. But be careful, because // some VM calls (such as call site linkage) can use several kilobytes of // stack. But the stack safety zone should account for that. // See bugs 4446381, 4468289, 4497237. if (stack_bang_size > 0) {
generate_stack_overflow_check(stack_bang_size);
// We always push rbp, so that on return to interpreter rbp, will be // restored correctly and we can correct the stack.
push(rbp); // Save caller's stack pointer into RBP if the frame pointer is preserved. if (PreserveFramePointer) {
mov(rbp, rsp);
} // Remove word for ebp
framesize -= wordSize;
// Create frame if (framesize) {
subptr(rsp, framesize);
}
} else { // Create frame (force generation of a 4 byte immediate value)
subptr_imm32(rsp, framesize);
// Save RBP register now.
framesize -= wordSize;
movptr(Address(rsp, framesize), rbp); // Save caller's stack pointer into RBP if the frame pointer is preserved. if (PreserveFramePointer) {
movptr(rbp, rsp); if (framesize > 0) {
addptr(rbp, framesize);
}
}
}
if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
framesize -= wordSize;
movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
}
#ifndef _LP64 // If method sets FPU control word do it now if (fp_mode_24b) {
fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_24()));
} if (UseSSE >= 2 && VerifyFPU) {
verify_FPU(0, "FPU stack must be clean on entry");
} #endif
#ifdef ASSERT if (VerifyStackAtCalls) {
Label L;
push(rax);
mov(rax, rsp);
andptr(rax, StackAlignmentInBytes-1);
cmpptr(rax, StackAlignmentInBytes-wordSize);
pop(rax);
jcc(Assembler::equal, L);
STOP("Stack is not properly aligned!");
bind(L);
} #endif
if (!is_stub) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); #ifdef _LP64 if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL) { // We put the non-hot code of the nmethod entry barrier out-of-line in a stub.
Label dummy_slow_path;
Label dummy_continuation;
Label* slow_path = &dummy_slow_path;
Label* continuation = &dummy_continuation; if (!Compile::current()->output()->in_scratch_emit_size()) { // Use real labels from actual stub when not emitting code for the purpose of measuring its size
C2EntryBarrierStub* stub = Compile::current()->output()->entry_barrier_table()->add_entry_barrier();
slow_path = &stub->slow_path();
continuation = &stub->continuation();
}
bs->nmethod_entry_barrier(this, slow_path, continuation);
} #else // Don't bother with out-of-line nmethod entry barrier stub for x86_32.
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */); #endif
}
}
// Update rtm_counters based on abort status // input: abort_status // rtm_counters (RTMLockingCounters*) // flags are killed void C2_MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset())); if (PrintPreciseRTMLockingStatistics) { for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
Label check_abort;
testl(abort_status, (1<<i));
jccb(Assembler::equal, check_abort);
atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
bind(check_abort);
}
}
}
// Branch if (random & (count-1) != 0), count is 2^n // tmp, scr and flags are killed void C2_MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
assert(tmp == rax, "");
assert(scr == rdx, "");
rdtsc(); // modifies EDX:EAX
andptr(tmp, count-1);
jccb(Assembler::notZero, brLabel);
}
// Perform abort ratio calculation, set no_rtm bit if high ratio // input: rtm_counters_Reg (RTMLockingCounters* address) // tmpReg, rtm_counters_Reg and flags are killed void C2_MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg, Register rtm_counters_Reg,
RTMLockingCounters* rtm_counters,
Metadata* method_data) {
Label L_done, L_check_always_rtm1, L_check_always_rtm2;
if (RTMLockingCalculationDelay > 0) { // Delay calculation
movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()));
testptr(tmpReg, tmpReg);
jccb(Assembler::equal, L_done);
} // Abort ratio calculation only if abort_count > RTMAbortThreshold // Aborted transactions = abort_count * 100 // All transactions = total_count * RTMTotalCountIncrRate // Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
Register scrReg = rtm_counters_Reg;
movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
imulptr(scrReg, scrReg, RTMAbortRatio);
cmpptr(tmpReg, scrReg);
jccb(Assembler::below, L_check_always_rtm1); if (method_data != NULL) { // set rtm_state to "no rtm" in MDO
mov_metadata(tmpReg, method_data);
lock();
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
}
jmpb(L_done);
bind(L_check_always_rtm1); // Reload RTMLockingCounters* address
lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
bind(L_check_always_rtm2);
movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
jccb(Assembler::below, L_done); if (method_data != NULL) { // set rtm_state to "always rtm" in MDO
mov_metadata(tmpReg, method_data);
lock();
orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
}
bind(L_done);
}
// Update counters and perform abort ratio calculation // input: abort_status_Reg // rtm_counters_Reg, flags are killed void C2_MacroAssembler::rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
RTMLockingCounters* rtm_counters,
Metadata* method_data, bool profile_rtm) {
assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); // update rtm counters based on rax value at abort // reads abort_status_Reg, updates flags
lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
rtm_counters_update(abort_status_Reg, rtm_counters_Reg); if (profile_rtm) { // Save abort status because abort_status_Reg is used by following code. if (RTMRetryCount > 0) {
push(abort_status_Reg);
}
assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data); // restore abort status if (RTMRetryCount > 0) {
pop(abort_status_Reg);
}
}
}
// Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4) // inputs: retry_count_Reg // : abort_status_Reg // output: retry_count_Reg decremented by 1 // flags are killed void C2_MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
Label doneRetry;
assert(abort_status_Reg == rax, ""); // The abort reason bits are in eax (see all states in rtmLocking.hpp) // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4) // if reason is in 0x6 and retry count != 0 then retry
andptr(abort_status_Reg, 0x6);
jccb(Assembler::zero, doneRetry);
testl(retry_count_Reg, retry_count_Reg);
jccb(Assembler::zero, doneRetry);
pause();
decrementl(retry_count_Reg);
jmp(retryLabel);
bind(doneRetry);
}
// Spin and retry if lock is busy, // inputs: box_Reg (monitor address) // : retry_count_Reg // output: retry_count_Reg decremented by 1 // : clear z flag if retry count exceeded // tmp_Reg, scr_Reg, flags are killed void C2_MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg, Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
Label SpinLoop, SpinExit, doneRetry; int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
bind(SpinExit);
jmp(retryLabel);
bind(doneRetry);
incrementl(retry_count_Reg); // clear z flag
}
// Use RTM for normal stack locks // Input: objReg (object to lock) void C2_MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg, Register retry_on_abort_count_Reg,
RTMLockingCounters* stack_rtm_counters,
Metadata* method_data, bool profile_rtm,
Label& DONE_LABEL, Label& IsInflated) {
assert(UseRTMForStackLocks, "why call this otherwise?");
assert(tmpReg == rax, "");
assert(scrReg == rdx, "");
Label L_rtm_retry, L_decrement_retry, L_on_abort;
if (RTMRetryCount > 0) {
movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
bind(L_rtm_retry);
}
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral
jcc(Assembler::notZero, IsInflated);
if (PrintPreciseRTMLockingStatistics || profile_rtm) {
Label L_noincrement; if (RTMTotalCountIncrRate > 1) { // tmpReg, scrReg and flags are killed
branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
}
assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
bind(L_noincrement);
}
xbegin(L_on_abort);
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
andptr(tmpReg, markWord::lock_mask_in_place); // look at 2 lock bits
cmpptr(tmpReg, markWord::unlocked_value); // bits = 01 unlocked
jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
Register abort_status_Reg = tmpReg; // status of abort is stored in RAX if (UseRTMXendForLockBusy) {
xend();
movptr(abort_status_Reg, 0x2); // Set the abort status to 2 (so we can retry)
jmp(L_decrement_retry);
} else {
xabort(0);
}
bind(L_on_abort); if (PrintPreciseRTMLockingStatistics || profile_rtm) {
rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
}
bind(L_decrement_retry); if (RTMRetryCount > 0) { // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
}
}
// Appears unlocked - try to swing _owner from null to non-null. // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. #ifdef _LP64 Register threadReg = r15_thread; #else
get_thread(scrReg); Register threadReg = scrReg; #endif
lock();
cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
if (RTMRetryCount > 0) { // success done else retry
jccb(Assembler::equal, DONE_LABEL) ;
bind(L_decrement_retry); // Spin and retry if lock is busy.
rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
} else {
bind(L_decrement_retry);
}
}
#endif// INCLUDE_RTM_OPT
// fast_lock and fast_unlock used by C2
// Because the transitions from emitted code to the runtime // monitorenter/exit helper stubs are so slow it's critical that // we inline both the stack-locking fast path and the inflated fast path. // // See also: cmpFastLock and cmpFastUnlock. // // What follows is a specialized inline transliteration of the code // in enter() and exit(). If we're concerned about I$ bloat another // option would be to emit TrySlowEnter and TrySlowExit methods // at startup-time. These methods would accept arguments as // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure // indications in the icc.ZFlag. fast_lock and fast_unlock would simply // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit. // In practice, however, the # of lock sites is bounded and is usually small. // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer // if the processor uses simple bimodal branch predictors keyed by EIP // Since the helper routines would be called from multiple synchronization // sites. // // An even better approach would be write "MonitorEnter()" and "MonitorExit()" // in java - using j.u.c and unsafe - and just bind the lock and unlock sites // to those specialized methods. That'd give us a mostly platform-independent // implementation that the JITs could optimize and inline at their pleasure. // Done correctly, the only time we'd need to cross to native could would be // to park() or unpark() threads. We'd also need a few more unsafe operators // to (a) prevent compiler-JIT reordering of non-volatile accesses, and // (b) explicit barriers or fence operations. // // TODO: // // * Arrange for C2 to pass "Self" into fast_lock and fast_unlock in one of the registers (scr). // This avoids manifesting the Self pointer in the fast_lock and fast_unlock terminals. // Given TLAB allocation, Self is usually manifested in a register, so passing it into // the lock operators would typically be faster than reifying Self. // // * Ideally I'd define the primitives as: // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED. // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED // Unfortunately ADLC bugs prevent us from expressing the ideal form. // Instead, we're stuck with a rather awkward and brittle register assignments below. // Furthermore the register assignments are overconstrained, possibly resulting in // sub-optimal code near the synchronization site. // // * Eliminate the sp-proximity tests and just use "== Self" tests instead. // Alternately, use a better sp-proximity test. // // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value. // Either one is sufficient to uniquely identify a thread. // TODO: eliminate use of sp in _owner and use get_thread(tr) instead. // // * Intrinsify notify() and notifyAll() for the common cases where the // object is locked by the calling thread but the waitlist is empty. // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll(). // // * use jccb and jmpb instead of jcc and jmp to improve code density. // But beware of excessive branch density on AMD Opterons. // // * Both fast_lock and fast_unlock set the ICC.ZF to indicate success // or failure of the fast path. If the fast path fails then we pass // control to the slow path, typically in C. In fast_lock and // fast_unlock we often branch to DONE_LABEL, just to find that C2 // will emit a conditional branch immediately after the node. // So we have branches to branches and lots of ICC.ZF games. // Instead, it might be better to have C2 pass a "FailureLabel" // into fast_lock and fast_unlock. In the case of success, control // will drop through the node. ICC.ZF is undefined at exit. // In the case of failure, the node will branch directly to the // FailureLabel
// Possible cases that we'll encounter in fast_lock // ------------------------------------------------ // * Inflated // -- unlocked // -- Locked // = by self // = by other // * neutral // * stack-locked // -- by self // = sp-proximity test hits // = sp-proximity test generates false-negative // -- by other //
// Recursive locking. // The object is stack-locked: markword contains stack pointer to BasicLock. // Locked by current thread if difference with current SP is less than one page.
subptr(tmpReg, rsp); // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
movptr(Address(boxReg, 0), tmpReg);
} else { // Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0.
testptr(objReg, objReg);
}
jmp(DONE_LABEL);
bind(IsInflated); // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value
#if INCLUDE_RTM_OPT // Use the same RTM locking code in 32- and 64-bit VM. if (use_rtm) {
rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
rtm_counters, method_data, profile_rtm, DONE_LABEL);
} else { #endif// INCLUDE_RTM_OPT
#ifndef _LP64 // The object is inflated.
// boxReg refers to the on-stack BasicLock in the current frame. // We'd like to write: // set box->_displaced_header = markWord::unused_mark(). Any non-0 value suffices. // This is convenient but results a ST-before-CAS penalty. The following CAS suffers // additional latency as we have another ST in the store buffer that must drain.
// avoid ST-before-CAS // register juggle because we need tmpReg for cmpxchgptr below
movptr(scrReg, boxReg);
movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
// Appears unlocked - try to swing _owner from null to non-null. // Ideally, I'd manifest "Self" with get_thread and then attempt // to CAS the register containing Self into m->Owner. // But we don't have enough registers, so instead we can either try to CAS // rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds // we later store "Self" into m->Owner. Transiently storing a stack address // (rsp or the address of the box) into m->owner is harmless. // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
lock();
cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3 // If we weren't able to swing _owner from NULL to the BasicLock // then take the slow path.
jccb (Assembler::notZero, NO_COUNT); // update _owner from BasicLock to thread
get_thread (scrReg); // beware: clobbers ICCs
movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success
// If the CAS fails we can either retry or pass control to the slow path. // We use the latter tactic. // Pass the CAS result in the icc.ZFlag into DONE_LABEL // If the CAS was successful ... // Self has acquired the lock // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it. // Intentional fall-through into DONE_LABEL ... #else// _LP64 // It's inflated and we use scrReg for ObjectMonitor* in this section.
movq(scrReg, tmpReg);
xorq(tmpReg, tmpReg);
lock();
cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // Unconditionally set box->_displaced_header = markWord::unused_mark(). // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
movptr(Address(boxReg, 0), checked_cast<int32_t>(markWord::unused_mark().value())); // Propagate ICC.ZF from CAS above into DONE_LABEL.
jccb(Assembler::equal, COUNT); // CAS above succeeded; propagate ZF = 1 (success)
cmpptr(r15_thread, rax); // Check if we are already the owner (recursive lock)
jccb(Assembler::notEqual, NO_COUNT); // If not recursive, ZF = 0 at this point (fail)
incq(Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
xorq(rax, rax); // Set ZF = 1 (success) for recursive lock, denoting locking success #endif// _LP64 #if INCLUDE_RTM_OPT
} // use_rtm() #endif
bind(DONE_LABEL);
// ZFlag == 1 count in fast path // ZFlag == 0 count in slow path
jccb(Assembler::notZero, NO_COUNT); // jump if ZFlag == 0
bind(COUNT); // Count monitors in fast path #ifndef _LP64
get_thread(tmpReg);
incrementl(Address(tmpReg, JavaThread::held_monitor_count_offset())); #else// _LP64
incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); #endif
xorl(tmpReg, tmpReg); // Set ZF == 1
bind(NO_COUNT);
// At NO_COUNT the icc ZFlag is set as follows ... // fast_unlock uses the same protocol. // ZFlag == 1 -> Success // ZFlag == 0 -> Failure - force control through the slow path
}
// obj: object to unlock // box: box address (displaced header location), killed. Must be EAX. // tmp: killed, cannot be obj nor box. // // Some commentary on balanced locking: // // fast_lock and fast_unlock are emitted only for provably balanced lock sites. // Methods that don't have provably balanced locking are forced to run in the // interpreter - such methods won't be compiled to use fast_lock and fast_unlock. // The interpreter provides two properties: // I1: At return-time the interpreter automatically and quietly unlocks any // objects acquired the current activation (frame). Recall that the // interpreter maintains an on-stack list of locks currently held by // a frame. // I2: If a method attempts to unlock an object that is not held by the // the frame the interpreter throws IMSX. // // Lets say A(), which has provably balanced locking, acquires O and then calls B(). // B() doesn't have provably balanced locking so it runs in the interpreter. // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O // is still locked by A(). // // The only other source of unbalanced locking would be JNI. The "Java Native Interface: // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter // should not be unlocked by "normal" java-level locking and vice-versa. The specification // doesn't specify what will occur if a program engages in such mixed-mode locking, however. // Arguably given that the spec legislates the JNI case as undefined our implementation // could reasonably *avoid* checking owner in fast_unlock(). // In the interest of performance we elide m->Owner==Self check in unlock. // A perfectly viable alternative is to elide the owner check except when // Xcheck:jni is enabled.
// Despite our balanced locking property we still check that m->_owner == Self // as java routines or native JNI code called by this thread might // have released the lock. // Refer to the comments in synchronizer.cpp for how we might encode extra // state in _succ so we can avoid fetching EntryList|cxq. // // If there's no contention try a 1-0 exit. That is, exit without // a costly MEMBAR or CAS. See synchronizer.cpp for details on how // we detect and recover from the race that the 1-0 exit admits. // // Conceptually fast_unlock() must execute a STST|LDST "release" barrier // before it STs null into _owner, releasing the lock. Updates // to data protected by the critical section must be visible before // we drop the lock (and thus before any other thread could acquire // the lock and observe the fields protected by the lock). // IA32's memory-model is SPO, so STs are ordered with respect to // each other and there's no need for an explicit barrier (fence). // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html. #ifndef _LP64 // Note that we could employ various encoding schemes to reduce // the number of loads below (currently 4) to just 2 or 3. // Refer to the comments in synchronizer.cpp. // In practice the chain of fetches doesn't seem to impact performance, however.
xorptr(boxReg, boxReg);
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
jccb (Assembler::notZero, DONE_LABEL);
movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
jccb (Assembler::notZero, DONE_LABEL);
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
jmpb (DONE_LABEL); #else// _LP64 // It's inflated
Label CheckSucc, LNotRecursive, LSuccess, LGoSlowPath;
bind(LNotRecursive);
movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
jccb (Assembler::notZero, CheckSucc); // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
jmpb (DONE_LABEL);
// Try to avoid passing control into the slow_path ...
bind (CheckSucc);
// The following optional optimization can be elided if necessary // Effectively: if (succ == null) goto slow path // The code reduces the window for a race, however, // and thus benefits performance.
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
jccb (Assembler::zero, LGoSlowPath);
xorptr(boxReg, boxReg); // Without cast to int32_t this style of movptr will destroy r10 which is typically obj.
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
// Memory barrier/fence // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack. // This is faster on Nehalem and AMD Shanghai/Barcelona. // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences // We might also restructure (ST Owner=0;barrier;LD _Succ) to // (mov box,0; xchgq box, &m->Owner; LD _succ) .
lock(); addl(Address(rsp, 0), 0);
// Rare inopportune interleaving - race. // The successor vanished in the small window above. // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor. // We need to ensure progress and succession. // Try to reacquire the lock. // If that fails then the new owner is responsible for succession and this // thread needs to take no further action and can exit via the fast path (success). // If the re-acquire succeeds then pass control into the slow path. // As implemented, this latter mode is horrible because we generated more // coherence traffic on the lock *and* artificially extended the critical section // length while by virtue of passing control into the slow path.
// box is really RAX -- the following CMPXCHG depends on that binding // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
lock();
cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // There's no successor so we tried to regrab the lock. // If that didn't work, then another thread grabbed the // lock so we're done (and exit was a success).
jccb (Assembler::notEqual, LSuccess); // Intentional fall-through into slow path
bind (LGoSlowPath);
orl (boxReg, 1); // set ICC.ZF=0 to indicate failure
jmpb (DONE_LABEL);
bind (LSuccess);
testl (boxReg, 0); // set ICC.ZF=1 to indicate success
jmpb (DONE_LABEL);
#endif if (!UseHeavyMonitors) {
bind (Stacked);
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
lock();
cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box // Intentional fall-thru into DONE_LABEL
}
bind(DONE_LABEL);
// ZFlag == 1 count in fast path // ZFlag == 0 count in slow path
jccb(Assembler::notZero, NO_COUNT);
bind(COUNT); // Count monitors in fast path #ifndef _LP64
get_thread(tmpReg);
decrementl(Address(tmpReg, JavaThread::held_monitor_count_offset())); #else// _LP64
decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); #endif
xorl(tmpReg, tmpReg); // Set ZF == 1
bind(NO_COUNT);
}
//------------------------------------------------------------------------------------------- // Generic instructions support for use in .ad files C2 code generation
void C2_MacroAssembler::vabsnegd(int opcode, XMMRegister dst, XMMRegister src) { if (dst != src) {
movdqu(dst, src);
} if (opcode == Op_AbsVD) {
andpd(dst, ExternalAddress(StubRoutines::x86::vector_double_sign_mask()), noreg);
} else {
assert((opcode == Op_NegVD),"opcode should be Op_NegD");
xorpd(dst, ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), noreg);
}
}
void C2_MacroAssembler::vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len) { if (opcode == Op_AbsVD) {
vandpd(dst, src, ExternalAddress(StubRoutines::x86::vector_double_sign_mask()), vector_len, noreg);
} else {
assert((opcode == Op_NegVD),"opcode should be Op_NegD");
vxorpd(dst, src, ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), vector_len, noreg);
}
}
void C2_MacroAssembler::vabsnegf(int opcode, XMMRegister dst, XMMRegister src) { if (dst != src) {
movdqu(dst, src);
} if (opcode == Op_AbsVF) {
andps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_mask()), noreg);
} else {
assert((opcode == Op_NegVF),"opcode should be Op_NegF");
xorps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), noreg);
}
}
void C2_MacroAssembler::vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len) { if (opcode == Op_AbsVF) {
vandps(dst, src, ExternalAddress(StubRoutines::x86::vector_float_sign_mask()), vector_len, noreg);
} else {
assert((opcode == Op_NegVF),"opcode should be Op_NegF");
vxorps(dst, src, ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), vector_len, noreg);
}
}
void C2_MacroAssembler::vshiftq(int opcode, XMMRegister dst, XMMRegister shift) { switch (opcode) { case Op_RShiftVL: psrlq(dst, shift); break; // using srl to implement sra on pre-avs512 systems case Op_LShiftVL: psllq(dst, shift); break; case Op_URShiftVL: psrlq(dst, shift); break;
void C2_MacroAssembler::load_iota_indices(XMMRegister dst, int vlen_in_bytes, BasicType bt) { // The iota indices are ordered by type B/S/I/L/F/D, and the offset between two types is 64. int offset = exact_log2(type2aelembytes(bt)) << 6; if (is_floating_point_type(bt)) {
offset += 128;
}
ExternalAddress addr(StubRoutines::x86::vector_iota_indices() + offset);
load_vector(dst, addr, vlen_in_bytes);
}
// Reductions for vectors of bytes, shorts, ints, longs, floats, and doubles.
void C2_MacroAssembler::reduce_operation_128(BasicType typ, int opcode, XMMRegister dst, XMMRegister src) { int vector_len = Assembler::AVX_128bit;
switch (opcode) { case Op_AndReductionV: pand(dst, src); break; case Op_OrReductionV: por (dst, src); break; case Op_XorReductionV: pxor(dst, src); break; case Op_MinReductionV: switch (typ) { case T_BYTE: pminsb(dst, src); break; case T_SHORT: pminsw(dst, src); break;
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.54 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.