/* * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
template <class T> inlinevoid G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) { // We're not going to even bother checking whether the object is // already forwarded or not, as this usually causes an immediate // stall. We'll try to prefetch the object (for write, given that // we might need to install the forwarding reference) and we'll // get back to it when pop it from the queue
Prefetch::write(obj->mark_addr(), 0);
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
// slightly paranoid test; I'm trying to catch potential // problems before we go into push_on_queue to know where the // problem is coming from
assert((obj == RawAccess<>::oop_load(p)) ||
(obj->is_forwarded() &&
obj->forwardee() == RawAccess<>::oop_load(p)), "p should still be pointing to obj or to its forwardee");
template <class T> inlinestaticvoid check_obj_during_refinement(T* p, oop const obj) { #ifdef ASSERT
G1CollectedHeap* g1h = G1CollectedHeap::heap(); // can't do because of races // assert(oopDesc::is_oop_or_null(obj), "expected an oop");
assert(is_object_aligned(obj), "obj must be aligned");
assert(g1h->is_in(obj), "invariant");
assert(g1h->is_in(p), "invariant"); #endif// ASSERT
}
template <class T> inlinevoid G1ConcurrentRefineOopClosure::do_oop_work(T* p) {
T o = RawAccess<MO_RELAXED>::oop_load(p); if (CompressedOops::is_null(o)) { return;
}
oop obj = CompressedOops::decode_not_null(o);
check_obj_during_refinement(p, obj);
if (HeapRegion::is_in_same_region(p, obj)) { // Normally this closure should only be called with cross-region references. // But since Java threads are manipulating the references concurrently and we // reload the values things may have changed. // Also this check lets slip through references from a humongous continues region // to its humongous start region, as they are in different regions, and adds a // remembered set entry. This is benign (apart from memory usage), as we never // try to either evacuate or eager reclaim humonguous arrays of j.l.O. return;
}
template <class T> inlinevoid G1ScanCardClosure::do_oop_work(T* p) {
T o = RawAccess<>::oop_load(p); if (CompressedOops::is_null(o)) { return;
}
oop obj = CompressedOops::decode_not_null(o);
check_obj_during_refinement(p, obj);
assert(!_g1h->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.",
p2i(p), _g1h->addr_to_region(p));
const G1HeapRegionAttr region_attr = _g1h->region_attr(obj); if (region_attr.is_in_cset()) { // Since the source is always from outside the collection set, here we implicitly know // that this is a cross-region reference too.
prefetch_and_push(p, obj);
_heap_roots_found++;
} elseif (!HeapRegion::is_in_same_region(p, obj)) {
handle_non_cset_obj_common(region_attr, p, obj);
_par_scan_state->enqueue_card_if_tracked(region_attr, p, obj);
}
}
template <class T> inlinevoid G1ScanRSForOptionalClosure::do_oop_work(T* p) { const G1HeapRegionAttr region_attr = _g1h->region_attr(p); // Entries in the optional collection set may start to originate from the collection // set after one or more increments. In this case, previously optional regions // became actual collection set regions. Filter them out here. if (region_attr.is_in_cset()) { return;
}
_scan_cl->do_oop_work(p);
_scan_cl->trim_queue_partially();
}
void G1ParCopyHelper::do_cld_barrier(oop new_obj) { if (_g1h->heap_region_containing(new_obj)->is_young()) {
_scanned_cld->record_modified_oops();
}
}
void G1ParCopyHelper::mark_object(oop obj) {
assert(!_g1h->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
// We know that the object is not moving so it's safe to read its size.
_cm->mark_in_bitmap(_worker_id, obj);
}
// The object is not in the collection set. should_mark is true iff the // current closure is applied on strong roots (and weak roots when class // unloading is disabled) in a concurrent mark start pause. if (should_mark) {
mark_object(obj);
}
}
trim_queue_partially();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.