/* * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// // Walk the list of dependent nmethods searching for nmethods which // are dependent on the changes that were passed in and mark them for // deoptimization. Returns the number of nmethods found. // int DependencyContext::mark_dependent_nmethods(DepChange& changes) { int found = 0; for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
nmethod* nm = b->get_nmethod(); if (b->count() > 0 && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { if (TraceDependencies) {
ResourceMark rm;
tty->print_cr("Marked for deoptimization");
changes.print();
nm->print();
nm->print_dependencies();
}
changes.mark_for_deoptimization(nm);
found++;
}
} return found;
}
// // Add an nmethod to the dependency context. // It's possible that an nmethod has multiple dependencies on a klass // so a count is kept for each bucket to guarantee that creation and // deletion of dependencies is consistent. // void DependencyContext::add_dependent_nmethod(nmethod* nm) {
assert_lock_strong(CodeCache_lock); for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) { if (nm == b->get_nmethod()) {
b->increment(); return;
}
}
nmethodBucket* new_head = new nmethodBucket(nm, NULL); for (;;) {
nmethodBucket* head = Atomic::load(_dependency_context_addr);
new_head->set_next(head); if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) { break;
}
} if (UsePerfData) {
_perf_total_buckets_allocated_count->inc();
}
}
void DependencyContext::release(nmethodBucket* b) { bool expunge = Atomic::load(&_cleaning_epoch) == 0; if (expunge) {
assert_locked_or_safepoint(CodeCache_lock); delete b; if (UsePerfData) {
_perf_total_buckets_deallocated_count->inc();
}
} else { // Mark the context as having stale entries, since it is not safe to // expunge the list right now. for (;;) {
nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
b->set_purge_list_next(purge_list_head); if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) { break;
}
} if (UsePerfData) {
_perf_total_buckets_stale_count->inc();
_perf_total_buckets_stale_acc_count->inc();
}
}
}
// // Reclaim all unused buckets. // void DependencyContext::purge_dependency_contexts() { int removed = 0; for (nmethodBucket* b = _purge_list; b != NULL;) {
nmethodBucket* next = b->purge_list_next();
removed++; delete b;
b = next;
} if (UsePerfData && removed > 0) {
_perf_total_buckets_deallocated_count->inc(removed);
}
_purge_list = NULL;
}
// // Cleanup a dependency context by unlinking and placing all dependents corresponding // to is_unloading nmethods on a purge list, which will be deleted later when it is safe. void DependencyContext::clean_unloading_dependents() { if (!claim_cleanup()) { // Somebody else is cleaning up this dependency context. return;
} // Walk the nmethodBuckets and move dead entries on the purge list, which will // be deleted during ClassLoaderDataGraph::purge().
nmethodBucket* b = dependencies_not_unloading(); while (b != NULL) {
nmethodBucket* next = b->next_not_unloading();
b = next;
}
}
nmethodBucket* DependencyContext::release_and_get_next_not_unloading(nmethodBucket* b) {
nmethodBucket* next = b->next_not_unloading();
release(b); return next;
}
// // Invalidate all dependencies in the context void DependencyContext::remove_all_dependents() {
nmethodBucket* b = dependencies_not_unloading();
set_dependencies(NULL);
assert(b == nullptr, "All dependents should be unloading");
}
int DependencyContext::remove_and_mark_for_deoptimization_all_dependents() {
nmethodBucket* b = dependencies_not_unloading();
set_dependencies(NULL); int marked = 0; while (b != NULL) {
nmethod* nm = b->get_nmethod(); if (b->count() > 0 && !nm->is_marked_for_deoptimization()) {
nm->mark_for_deoptimization();
marked++;
}
b = release_and_get_next_not_unloading(b);
} return marked;
}
#ifndef PRODUCT void DependencyContext::print_dependent_nmethods(bool verbose) { int idx = 0; for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) {
nmethod* nm = b->get_nmethod();
tty->print("[%d] count=%d { ", idx++, b->count()); if (!verbose) {
nm->print_on(tty, "nmethod");
tty->print_cr(" } ");
} else {
nm->print();
nm->print_dependencies();
tty->print_cr("--- } ");
}
}
} #endif//PRODUCT
bool DependencyContext::is_dependent_nmethod(nmethod* nm) { for (nmethodBucket* b = dependencies_not_unloading(); b != NULL; b = b->next_not_unloading()) { if (nm == b->get_nmethod()) { #ifdef ASSERT int count = b->count();
assert(count >= 0, "count shouldn't be negative: %d", count); #endif returntrue;
}
} returnfalse;
}
int nmethodBucket::decrement() { return Atomic::sub(&_count, 1);
}
// We use a monotonically increasing epoch counter to track the last epoch a given // dependency context was cleaned. GC threads claim cleanup tasks by performing // a CAS on this value. bool DependencyContext::claim_cleanup() {
uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch);
uint64_t last_cleanup = Atomic::load(_last_cleanup_addr); if (last_cleanup >= cleaning_epoch) { returnfalse;
} return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
}
// Retrieve the first nmethodBucket that has a dependent that does not correspond to // an is_unloading nmethod. Any nmethodBucket entries observed from the original head // that is_unloading() will be unlinked and placed on the purge list.
nmethodBucket* DependencyContext::dependencies_not_unloading() { for (;;) { // Need acquire because the read value could come from a concurrent insert.
nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr); if (head == NULL || !head->get_nmethod()->is_unloading()) { return head;
}
nmethodBucket* head_next = head->next();
OrderAccess::loadload(); if (Atomic::load(_dependency_context_addr) != head) { // Unstable load of head w.r.t. head->next continue;
} if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) { // Release is_unloading entries if unlinking was claimed
DependencyContext::release(head);
}
}
}
// Relaxed accessors void DependencyContext::set_dependencies(nmethodBucket* b) {
Atomic::store(_dependency_context_addr, b);
}
// After the gc_prologue, the dependency contexts may be claimed by the GC // and releasing of nmethodBucket entries will be deferred and placed on // a purge list to be deleted later. void DependencyContext::cleaning_start() {
assert(SafepointSynchronize::is_at_safepoint(), "must be");
uint64_t epoch = ++_cleaning_epoch_monotonic;
Atomic::store(&_cleaning_epoch, epoch);
}
// The epilogue marks the end of dependency context cleanup by the GC, // and also makes subsequent releases of nmethodBuckets cause immediate // deletion. It is okay to delay calling of cleaning_end() to a concurrent // phase, subsequent to the safepoint operation in which cleaning_start() // was called. That allows dependency contexts to be cleaned concurrently. void DependencyContext::cleaning_end() {
uint64_t epoch = 0;
Atomic::store(&_cleaning_epoch, epoch);
}
// This function skips over nmethodBuckets in the list corresponding to // nmethods that are is_unloading. This allows exposing a view of the // dependents as-if they were already cleaned, despite being cleaned // concurrently. Any entry observed that is_unloading() will be unlinked // and placed on the purge list.
nmethodBucket* nmethodBucket::next_not_unloading() { for (;;) { // Do not need acquire because the loaded entry can never be // concurrently inserted.
nmethodBucket* next = Atomic::load(&_next); if (next == NULL || !next->get_nmethod()->is_unloading()) { return next;
}
nmethodBucket* next_next = Atomic::load(&next->_next);
OrderAccess::loadload(); if (Atomic::load(&_next) != next) { // Unstable load of next w.r.t. next->next continue;
} if (Atomic::cmpxchg(&_next, next, next_next) == next) { // Release is_unloading entries if unlinking was claimed
DependencyContext::release(next);
}
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.