/* * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
jint GenCollectedHeap::initialize() { // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some // cases incorrectly returns the size in wordSize units rather than // HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
// Update the _full_collections_completed counter // at the end of a stop-world full GC. unsignedint GenCollectedHeap::update_full_collections_completed() {
assert(_full_collections_completed <= _total_full_collections, "Can't complete more collections than were started");
_full_collections_completed = _total_full_collections; return _full_collections_completed;
}
// Return true if any of the following is true: // . the allocation won't fit into the current young gen heap // . gc locker is occupied (jni critical section) // . heap memory is tight -- the most recent previous collection // was a full collection because a partial collection (would // have) failed and is likely to fail again bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
size_t young_capacity = _young_gen->capacity_before_gc(); return (word_size > heap_word_size(young_capacity))
|| GCLocker::is_active_and_needs_gc()
|| incremental_collection_failed();
}
HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
HeapWord* result = NULL; if (_old_gen->should_allocate(size, is_tlab)) {
result = _old_gen->expand_and_allocate(size, is_tlab);
} if (result == NULL) { if (_young_gen->should_allocate(size, is_tlab)) {
result = _young_gen->expand_and_allocate(size, is_tlab);
}
}
assert(result == NULL || is_in_reserved(result), "result not in heap"); return result;
}
HeapWord* GenCollectedHeap::mem_allocate_work(size_t size, bool is_tlab, bool* gc_overhead_limit_was_exceeded) { // In general gc_overhead_limit_was_exceeded should be false so // set it so here and reset it to true only if the gc time // limit is being exceeded as checked below.
*gc_overhead_limit_was_exceeded = false;
HeapWord* result = NULL;
// Loop until the allocation is satisfied, or unsatisfied after GC. for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
// First allocation attempt is lock-free.
Generation *young = _young_gen; if (young->should_allocate(size, is_tlab)) {
result = young->par_allocate(size, is_tlab); if (result != NULL) {
assert(is_in_reserved(result), "result not in heap"); return result;
}
}
uint gc_count_before; // Read inside the Heap_lock locked region.
{
MutexLocker ml(Heap_lock);
log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation"); // Note that only large objects get a shot at being // allocated in later generations. bool first_only = !should_try_older_generation_allocation(size);
result = attempt_allocation(size, is_tlab, first_only); if (result != NULL) {
assert(is_in_reserved(result), "result not in heap"); return result;
}
if (GCLocker::is_active_and_needs_gc()) { if (is_tlab) { return NULL; // Caller will retry allocating individual object.
} if (!is_maximal_no_gc()) { // Try and expand heap to satisfy request.
result = expand_heap_and_allocate(size, is_tlab); // Result could be null if we are out of space. if (result != NULL) { return result;
}
}
if (gclocker_stalled_count > GCLockerRetryAllocationCount) { return NULL; // We didn't get to do a GC and we didn't get any memory.
}
// If this thread is not in a jni critical section, we stall // the requestor until the critical section has cleared and // GC allowed. When the critical section clears, a GC is // initiated by the last thread exiting the critical section; so // we retry the allocation sequence from the beginning of the loop, // rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current(); if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock); // Wait for JNI critical section to be exited
GCLocker::stall_until_clear();
gclocker_stalled_count += 1; continue;
} else { if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while" " in jni critical section");
} return NULL;
}
}
// Read the gc count while the heap lock is held.
gc_count_before = total_collections();
}
VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
VMThread::execute(&op); if (op.prologue_succeeded()) {
result = op.result(); if (op.gc_locked()) {
assert(result == NULL, "must be NULL if gc_locked() is true"); continue; // Retry and/or stall as necessary.
}
// Allocation has failed and a collection // has been done. If the gc time limit was exceeded the // this time, return NULL so that an out-of-memory // will be thrown. Clear gc_overhead_limit_exceeded // so that the overhead exceeded does not persist.
// Must be done anew before each collection because // a previous collection will do mangling and will // change top of some spaces.
record_gen_tops_before_GC();
if (run_verification && VerifyBeforeGC) {
Universe::verify("Before GC");
}
COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
// Do collection work
{ // Note on ref discovery: For what appear to be historical reasons, // GCH enables and disabled (by enqueuing) refs discovery. // In the future this should be moved into the generation's // collect method so that ref discovery and enqueueing concerns // are local to a generation. The collect method could return // an appropriate indication in the case that notification on // the ref lock was needed. This will make the treatment of // weak refs more uniform (and indeed remove such concerns // from GCH). XXX
save_marks(); // save marks for all gens // We want to discover references, but not process them yet. // This mode is disabled in process_discovered_references if the // generation does some collection work, or in // enqueue_discovered_references if the generation returns // without doing any work.
ReferenceProcessor* rp = gen->ref_processor();
rp->start_discovery(clear_soft_refs);
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(my_thread->is_VM_thread(), "only VM thread");
assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock");
guarantee(!is_gc_active(), "collection is not reentrant");
if (GCLocker::check_active_before_gc()) { return; // GC is disabled (e.g. JNI GetXXXCritical operation)
}
if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) { // Allocation request was met by young GC.
size = 0;
}
// Ask if young collection is enough. If so, do the final steps for young collection, // and fallthrough to the end.
do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation); if (!do_full_collection) { // Adjust generation sizes.
_young_gen->compute_new_size();
print_heap_change(pre_gc_values);
// Track memory usage and detect low memory after GC finishes
MemoryService::track_memory_usage();
gc_epilogue(complete);
}
print_heap_after_gc();
} else { // No young collection, ask if we need to perform Full collection.
do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
}
if (!do_young_collection) {
gc_prologue(complete);
increment_total_collections(complete);
}
// Accounting quirk: total full collections would be incremented when "complete" // is set, by calling increment_total_collections above. However, we also need to // account Full collections that had "complete" unset. if (!complete) {
increment_total_full_collections();
}
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge(/*at_safepoint*/true);
DEBUG_ONLY(MetaspaceUtils::verify();)
// Need to clear claim bits for the next mark.
ClassLoaderDataGraph::clear_claimed_marks();
// Resize the metaspace capacity after full collections
MetaspaceGC::compute_new_size();
update_full_collections_completed();
print_heap_change(pre_gc_values);
// Track memory usage and detect low memory after GC finishes
MemoryService::track_memory_usage();
// Need to tell the epilogue code we are done with Full GC, regardless what was // the initial value for "complete" flag.
gc_epilogue(true);
assert(size != 0, "Precondition violated"); if (GCLocker::is_active_and_needs_gc()) { // GC locker is active; instead of a collection we will attempt // to expand the heap, if there's room for expansion. if (!is_maximal_no_gc()) {
result = expand_heap_and_allocate(size, is_tlab);
} return result; // Could be null if we are out of space.
} elseif (!incremental_collection_will_fail(false/* don't consult_young */)) { // Do an incremental collection.
do_collection(false, // full false, // clear_all_soft_refs
size, // size
is_tlab, // is_tlab
GenCollectedHeap::OldGen); // max_generation
} else {
log_trace(gc)(" :: Trying full because partial may fail :: "); // Try a full collection; see delta for bug id 6266275 // for the original code and why this has been simplified // with from-space allocation criteria modified and // such allocation moved out of the safepoint path.
do_collection(true, // full false, // clear_all_soft_refs
size, // size
is_tlab, // is_tlab
GenCollectedHeap::OldGen); // max_generation
}
result = attempt_allocation(size, is_tlab, false/*first_only*/);
if (result != NULL) {
assert(is_in_reserved(result), "result not in heap"); return result;
}
// OK, collection failed, try expansion.
result = expand_heap_and_allocate(size, is_tlab); if (result != NULL) { return result;
}
// If we reach this point, we're really out of memory. Try every trick // we can to reclaim memory. Force collection of soft references. Force // a complete compaction of the heap. Any additional methods for finding // free memory should be here, especially if they are expensive. If this // attempt fails, an OOM exception will be thrown.
{
UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
result = attempt_allocation(size, is_tlab, false/* first_only */); if (result != NULL) {
assert(is_in_reserved(result), "result not in heap"); return result;
}
assert(!soft_ref_policy()->should_clear_all_soft_refs(), "Flag should have been handled and cleared prior to this point");
// What else? We might try synchronous finalization later. If the total // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be // appropriate. return NULL;
}
#ifdef ASSERT class AssertNonScavengableClosure: public OopClosure { public: virtualvoid do_oop(oop* p) {
assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p), "Referent should not be scavengable."); } virtualvoid do_oop(narrowOop* p) { ShouldNotReachHere(); }
}; static AssertNonScavengableClosure assert_is_non_scavengable_closure; #endif
void GenCollectedHeap::process_roots(ScanningOption so,
OopClosure* strong_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots) { // General roots.
assert(code_roots != NULL, "code root closure should always be set");
// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
if (so & SO_ScavengeCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");
// We only visit parts of the CodeCache when scavenging.
ScavengableNMethods::nmethods_do(code_roots);
} if (so & SO_AllCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");
// CMSCollector uses this to do intermediate-strength collections. // We scan the entire code cache, since CodeCache::do_unloading is not called.
CodeCache::blobs_do(code_roots);
} // Verify that the code cache contents are not subject to // movement by a scavenging collection.
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
}
// public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) { // The caller doesn't have the Heap_lock
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
GenerationType last_generation) {
do_collection(true, // full
clear_all_soft_refs, // clear_all_soft_refs
0, // size false, // is_tlab
last_generation); // last_generation // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed"); // This time allow the old gen to be collected as well
do_collection(true, // full
clear_all_soft_refs, // clear_all_soft_refs
0, // size false, // is_tlab
OldGen); // last_generation
}
}
bool GenCollectedHeap::is_in_young(constvoid* p) const { bool result = p < _old_gen->reserved().start();
assert(result == _young_gen->is_in_reserved(p), "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p)); return result;
}
// Returns "TRUE" iff "p" points into the committed areas of the heap. bool GenCollectedHeap::is_in(constvoid* p) const { return _young_gen->is_in(p) || _old_gen->is_in(p);
}
#ifdef ASSERT // Don't implement this by using is_in_young(). This method is used // in some cases to check that is_in_young() is correct. bool GenCollectedHeap::is_in_partial_collection(constvoid* p) {
assert(is_in_reserved(p) || p == NULL, "Does not work if address is non-null and outside of the heap"); return p < _young_gen->reserved().end() && p != NULL;
} #endif
Space* GenCollectedHeap::space_containing(constvoid* addr) const {
Space* res = _young_gen->space_containing(addr); if (res != NULL) { return res;
}
res = _old_gen->space_containing(addr);
assert(res != NULL, "Could not find containing space"); return res;
}
HeapWord* GenCollectedHeap::block_start(constvoid* addr) const {
assert(is_in_reserved(addr), "block_start of address outside of heap"); if (_young_gen->is_in_reserved(addr)) {
assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); return _young_gen->block_start(addr);
}
assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); return _old_gen->block_start(addr);
}
bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
assert(block_start(addr) == addr, "addr must be a block start"); if (_young_gen->is_in_reserved(addr)) { return _young_gen->block_is_obj(addr);
}
assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); return _old_gen->block_is_obj(addr);
}
size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!"); return _young_gen->tlab_capacity();
}
size_t GenCollectedHeap::tlab_used(Thread* thr) const {
assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!"); return _young_gen->tlab_used();
}
size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!"); return _young_gen->unsafe_max_tlab_alloc();
}
// Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size // from the list headed by "*prev_ptr". static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { bool first = true;
size_t min_size = 0; // "first" makes this conceptually infinite.
ScratchBlock **smallest_ptr, *smallest;
ScratchBlock *cur = *prev_ptr; while (cur) {
assert(*prev_ptr == cur, "just checking"); if (first || cur->num_words < min_size) {
smallest_ptr = prev_ptr;
smallest = cur;
min_size = smallest->num_words;
first = false;
}
prev_ptr = &cur->next;
cur = cur->next;
}
smallest = *smallest_ptr;
*smallest_ptr = smallest->next; return smallest;
}
// Sort the scratch block list headed by res into decreasing size order, // and set "res" to the result. staticvoid sort_scratch_list(ScratchBlock*& list) {
ScratchBlock* sorted = NULL;
ScratchBlock* unsorted = list; while (unsorted) {
ScratchBlock *smallest = removeSmallestScratch(&unsorted);
smallest->next = sorted;
sorted = smallest;
}
list = sorted;
}
GenCollectedHeap* GenCollectedHeap::heap() { // SerialHeap is the only subtype of GenCollectedHeap. return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
}
#if INCLUDE_SERIALGC void GenCollectedHeap::prepare_for_compaction() { // Start by compacting into same gen.
CompactPoint cp(_old_gen);
_old_gen->prepare_for_compaction(&cp);
_young_gen->prepare_for_compaction(&cp);
} #endif// INCLUDE_SERIALGC
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.