/* * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
if (is_atomic) { // Atomics always need to be wrapped in CPU membars returntrue;
}
if (anonymous) { // We will need memory barriers unless we can determine a unique // alias category for this reference. (Note: If for some reason // the barriers get omitted and the unsafe reference begins to "pollute" // the alias analysis of the rest of the graph, either Compile::can_alias // or Compile::must_alias will throw a diagnostic assert.) if (is_mixed || !is_unordered || (mismatched && !_addr.type()->isa_aryptr())) { returntrue;
}
} else {
assert(!is_mixed, "not unsafe");
}
if (is_atomic) {
assert(kit != NULL, "unsupported at optimization time"); // Memory-model-wise, a LoadStore acts like a little synchronized // block, so needs barriers on each side. These don't translate // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. if (is_release) {
_leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
} elseif (is_volatile) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
_leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
} else {
_leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
}
}
} elseif (is_write) { // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning // another volatile read. if (is_volatile || is_release) {
assert(kit != NULL, "unsupported at optimization time");
_leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
}
} else { // Memory barrier to prevent normal and 'unsafe' accesses from // bypassing each other. Happens after null checks, so the // exception paths do not take memory state from the memory barrier, // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) {
assert(kit != NULL, "unsupported at optimization time");
_leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
}
}
if (access.needs_cpu_membar()) {
assert(kit != NULL, "unsupported at optimization time");
kit->insert_mem_bar(Op_MemBarCPUOrder);
}
if (is_atomic) { // 4984716: MemBars must be inserted before this // memory node in order to avoid a false // dependency which will confuse the scheduler.
access.set_memory();
}
}
// If reference is volatile, prevent following volatiles ops from // floating up before the volatile access. if (_access.needs_cpu_membar()) {
kit->insert_mem_bar(Op_MemBarCPUOrder);
}
if (is_atomic) {
assert(kit != NULL, "unsupported at optimization time"); if (is_acquire || is_volatile) {
Node* n = _access.raw_access();
Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); if (_leading_membar != NULL) {
MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
}
}
} elseif (is_write) { // If not multiple copy atomic, we do the MemBarVolatile before the load. if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
assert(kit != NULL, "unsupported at optimization time");
Node* n = _access.raw_access();
Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar if (_leading_membar != NULL) {
MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
}
}
} else { if (is_volatile || is_acquire) {
assert(kit != NULL, "unsupported at optimization time");
Node* n = _access.raw_access();
assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
mb->as_MemBar()->set_trailing_load();
}
}
}
};
if (AlwaysAtomicAccesses && is_unordered) {
_decorators &= ~MO_DECORATOR_MASK; // clear the MO bits
_decorators |= MO_RELAXED; // Force the MO_RELAXED decorator with AlwaysAtomicAccess
}
if (is_read && !is_write && anonymous) { // To be valid, unsafe loads may depend on other conditions than // the one that guards them: pin the Load node
_decorators |= C2_CONTROL_DEPENDENT_LOAD;
_decorators |= C2_UNKNOWN_CONTROL_LOAD; const TypePtr* adr_type = _addr.type();
Node* adr = _addr.node(); if (!needs_cpu_membar() && adr_type->isa_instptr()) {
assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
intptr_t offset = Type::OffsetBot;
AddPNode::Ideal_base_and_offset(adr, &gvn(), offset); if (offset >= 0) { int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->instance_klass()->layout_helper()); if (offset < s) { // Guaranteed to be a valid access, no need to pin it
_decorators ^= C2_CONTROL_DEPENDENT_LOAD;
_decorators ^= C2_UNKNOWN_CONTROL_LOAD;
}
}
}
}
}
void BarrierSetC2::pin_atomic_op(C2AtomicParseAccess& access) const { // SCMemProjNodes represent the memory state of a LoadStore. Their // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used.
assert(access.is_parse_access(), "entry not supported at optimization time");
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
GraphKit* kit = parse_access.kit();
Node* load_store = access.raw_access();
assert(load_store != NULL, "must pin atomic op");
Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
kit->set_memory(proj, access.alias_idx());
}
int BarrierSetC2::arraycopy_payload_base_offset(bool is_array) { // Exclude the header but include array length to copy by 8 bytes words. // Can't use base_offset_in_bytes(bt) since basic type is unknown. int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
instanceOopDesc::base_offset_in_bytes(); // base_off: // 8 - 32-bit VM // 12 - 64-bit VM, compressed klass // 16 - 64-bit VM, normal klass if (base_off % BytesPerLong != 0) {
assert(UseCompressedClassPointers, ""); if (is_array) { // Exclude length to copy by 8 bytes words.
base_off += sizeof(int);
} else { // Include klass to copy by 8 bytes words.
base_off = instanceOopDesc::klass_offset_in_bytes();
}
assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
} return base_off;
}
// Load TLAB end. // // Note: We set the control input on "tlab_end" and "old_tlab_top" to work around // a bug where these values were being moved across // a safepoint. These are not oops, so they cannot be include in the oop // map, but they can be changed by a GC. The proper way to fix this would // be to set the raw memory state when generating a SafepointNode. However // this will require extensive changes to the loop optimization in order to // prevent a degradation of the optimization. // See comment in memnode.hpp, around line 227 in class LoadPNode.
Node* tlab_end = macro->make_load(toobig_false, mem, tlab_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS);
// Load the TLAB top.
Node* old_tlab_top = new LoadPNode(toobig_false, mem, tlab_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered);
macro->transform_later(old_tlab_top);
// Add to heap top to get a new TLAB top
Node* new_tlab_top = new AddPNode(macro->top(), old_tlab_top, size_in_bytes);
macro->transform_later(new_tlab_top);
// Check against TLAB end
Node* tlab_full = new CmpPNode(new_tlab_top, tlab_end);
macro->transform_later(tlab_full);
Node* needgc_bol = new BoolNode(tlab_full, BoolTest::ge);
macro->transform_later(needgc_bol);
IfNode* needgc_iff = new IfNode(toobig_false, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN);
macro->transform_later(needgc_iff);
// Plug the failing-heap-space-need-gc test into the slow-path region
Node* needgc_true = new IfTrueNode(needgc_iff);
macro->transform_later(needgc_true);
needgc_ctrl = needgc_true;
// No need for a GC.
Node* needgc_false = new IfFalseNode(needgc_iff);
macro->transform_later(needgc_false);
// Store the modified TLAB top back down.
Node* store_tlab_top = new StorePNode(needgc_false, mem, tlab_top_adr,
TypeRawPtr::BOTTOM, new_tlab_top, MemNode::unordered);
macro->transform_later(store_tlab_top);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.