/* * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
if (clear) {
log_info(gc, ref)("Clearing All SoftReferences");
_soft_reference_policy = &always_clear_policy;
} else {
_soft_reference_policy = &lru_max_heap_policy;
}
_soft_reference_policy->setup();
}
bool ZReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { if (type == REF_FINAL) { // A FinalReference is inactive if its next field is non-null. An application can't // call enqueue() or clear() on a FinalReference. return reference_next(reference) != NULL;
} else { // A non-FinalReference is inactive if the referent is null. The referent can only // be null if the application called Reference.enqueue() or Reference.clear(). return referent == NULL;
}
}
if (is_inactive(reference, referent, type)) { returnfalse;
}
if (is_strongly_live(referent)) { returnfalse;
}
if (is_softly_live(reference, type)) { returnfalse;
}
// PhantomReferences with finalizable marked referents should technically not have // to be discovered. However, InstanceRefKlass::oop_oop_iterate_ref_processing() // does not know about the finalizable mark concept, and will therefore mark // referents in non-discovered PhantomReferences as strongly live. To prevent // this, we always discover PhantomReferences with finalizable marked referents. // They will automatically be dropped during the reference processing phase. returntrue;
}
bool ZReferenceProcessor::should_drop(oop reference, ReferenceType type) const { const oop referent = reference_referent(reference); if (referent == NULL) { // Reference has been cleared, by a call to Reference.enqueue() // or Reference.clear() from the application, which means we // should drop the reference. returntrue;
}
// Check if the referent is still alive, in which case we should // drop the reference. if (type == REF_PHANTOM) { return ZBarrier::is_alive_barrier_on_phantom_oop(referent);
} else { return ZBarrier::is_alive_barrier_on_weak_oop(referent);
}
}
void ZReferenceProcessor::make_inactive(oop reference, ReferenceType type) const { if (type == REF_FINAL) { // Don't clear referent. It is needed by the Finalizer thread to make the call // to finalize(). A FinalReference is instead made inactive by self-looping the // next field. An application can't call FinalReference.enqueue(), so there is // no race to worry about when setting the next field.
assert(reference_next(reference) == NULL, "Already inactive");
reference_set_next(reference, reference);
} else { // Clear referent
reference_clear_referent(reference);
}
}
if (type == REF_FINAL) { // Mark referent (and its reachable subgraph) finalizable. This avoids // the problem of later having to mark those objects if the referent is // still final reachable during processing. volatile oop* const referent_addr = reference_referent_addr(reference);
ZBarrier::mark_barrier_on_oop_field(referent_addr, true/* finalizable */);
}
// Add reference to discovered list
assert(reference_discovered(reference) == NULL, "Already discovered");
oop* const list = _discovered_list.addr();
reference_set_discovered(reference, *list);
*list = reference;
}
// Make reference inactive
make_inactive(reference, type);
// Return next in list return reference_discovered_addr(reference);
}
void ZReferenceProcessor::work() { // Process discovered references
oop* const list = _discovered_list.addr();
oop* p = list;
while (*p != NULL) { const oop reference = *p; const ReferenceType type = reference_type(reference);
if (should_drop(reference, type)) {
*p = drop(reference, type);
} else {
p = keep(reference, type);
}
}
// Prepend discovered references to internal pending list if (*list != NULL) {
*p = Atomic::xchg(_pending_list.addr(), *list); if (*p == NULL) { // First to prepend to list, record tail
_pending_list_tail = p;
}
// Clear discovered list
*list = NULL;
}
}
bool ZReferenceProcessor::is_empty() const {
ZPerWorkerConstIterator<oop> iter(&_discovered_list); for (const oop* list; iter.next(&list);) { if (*list != NULL) { returnfalse;
}
}
if (_pending_list.get() != NULL) { returnfalse;
}
returntrue;
}
void ZReferenceProcessor::reset_statistics() {
assert(is_empty(), "Should be empty");
// Reset encountered
ZPerWorkerIterator<Counters> iter_encountered(&_encountered_count); for (Counters* counters; iter_encountered.next(&counters);) { for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
(*counters)[i] = 0;
}
}
// Reset discovered
ZPerWorkerIterator<Counters> iter_discovered(&_discovered_count); for (Counters* counters; iter_discovered.next(&counters);) { for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
(*counters)[i] = 0;
}
}
// Reset enqueued
ZPerWorkerIterator<Counters> iter_enqueued(&_enqueued_count); for (Counters* counters; iter_enqueued.next(&counters);) { for (int i = REF_SOFT; i <= REF_PHANTOM; i++) {
(*counters)[i] = 0;
}
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.