/* * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// If the cld has not been dirtied we know that there's // no references into the young gen and we can skip it. if (cld->has_modified_oops()) {
// Tell the closure which CLD is being scanned so that it can be dirtied // if oops are left pointing into the young gen.
_scavenge_closure->set_scanned_cld(cld);
// Clean the cld since we're going to scavenge all the metadata.
cld->oops_do(_scavenge_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
_eden_space = new ContiguousSpace();
_from_space = new ContiguousSpace();
_to_space = new ContiguousSpace();
// Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters.
uintx size = _virtual_space.reserved_size();
_max_survivor_size = compute_survivor_size(size, SpaceAlignment);
_max_eden_size = size - (2*_max_survivor_size);
void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, bool clear_space, bool mangle_space) { // If the spaces are being cleared (only done at heap initialization // currently), the survivor spaces need not be empty. // Otherwise, no care is taken for used areas in the survivor spaces // so check.
assert(clear_space || (to()->is_empty() && from()->is_empty()), "Initialization of the survivor spaces assumes these are empty");
// A minimum eden size implies that there is a part of eden that // is being used and that affects the initialization of any // newly formed eden. bool live_in_eden = minimum_eden_size > 0;
// If not clearing the spaces, do some checking to verify that // the space are already mangled. if (!clear_space) { // Must check mangling before the spaces are reshaped. Otherwise, // the bottom or end of one space may have moved into another // a failure of the check may not correctly indicate which space // is not properly mangled. if (ZapUnusedHeapArea) {
HeapWord* limit = (HeapWord*) _virtual_space.high();
eden()->check_mangled_unused_area(limit);
from()->check_mangled_unused_area(limit);
to()->check_mangled_unused_area(limit);
}
}
// Reset the spaces for their new regions.
eden()->initialize(edenMR,
clear_space && !live_in_eden,
SpaceDecorator::Mangle); // If clear_space and live_in_eden, we will not have cleared any // portion of eden above its top. This can cause newly // expanded space not to be mangled if using ZapUnusedHeapArea. // We explicitly do such mangling here. if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
eden()->mangle_unused_area();
}
from()->initialize(fromMR, clear_space, mangle_space);
to()->initialize(toMR, clear_space, mangle_space);
// Set next compaction spaces.
eden()->set_next_compaction_space(from()); // The to-space is normally empty before a compaction so need // not be considered. The exception is during promotion // failure handling when to-space can contain live objects.
from()->set_next_compaction_space(NULL);
}
void DefNewGeneration::swap_spaces() {
ContiguousSpace* s = from();
_from_space = to();
_to_space = s;
eden()->set_next_compaction_space(from()); // The to-space is normally empty before a compaction so need // not be considered. The exception is during promotion // failure handling when to-space can contain live objects.
from()->set_next_compaction_space(NULL);
if (UsePerfData) {
CSpaceCounters* c = _from_counters;
_from_counters = _to_counters;
_to_counters = c;
}
}
bool DefNewGeneration::expand(size_t bytes) {
HeapWord* prev_high = (HeapWord*) _virtual_space.high(); bool success = _virtual_space.expand_by(bytes); if (success && ZapUnusedHeapArea) { // Mangle newly committed space immediately because it // can be done here more simply that after the new // spaces have been computed.
HeapWord* new_high = (HeapWord*) _virtual_space.high();
MemRegion mangle_region(prev_high, new_high);
SpaceMangler::mangle_region(mangle_region);
}
// Do not attempt an expand-to-the reserve size. The // request should properly observe the maximum size of // the generation so an expand-to-reserve should be // unnecessary. Also a second call to expand-to-reserve // value potentially can cause an undue expansion. // For example if the first expand fail for unknown reasons, // but the second succeeds and expands the heap to its maximum // value. if (GCLocker::is_active()) {
log_debug(gc)("Garbage collection disabled, expanded heap instead");
}
if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
// 1. Check an overflow at 'new_size_candidate + thread_increase_size'. if (new_size_candidate <= max_uintx - thread_increase_size) {
new_size_candidate += thread_increase_size;
// 2. Check an overflow at 'align_up'.
size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1)); if (new_size_candidate <= aligned_max) {
desired_new_size = align_up(new_size_candidate, alignment);
}
}
}
return desired_new_size;
}
void DefNewGeneration::compute_new_size() { // This is called after a GC that includes the old generation, so from-space // will normally be empty. // Note that we check both spaces, since if scavenge failed they revert roles. // If not we bail out (otherwise we would have to relocate the objects). if (!from()->is_empty() || !to()->is_empty()) { return;
}
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t old_size = gch->old_gen()->capacity();
size_t new_size_before = _virtual_space.committed_size();
size_t min_new_size = initial_size();
size_t max_new_size = reserved().byte_size();
assert(min_new_size <= new_size_before &&
new_size_before <= max_new_size, "just checking"); // All space sizes must be multiples of Generation::GenGrain.
size_t alignment = Generation::GenGrain;
int threads_count = Threads::number_of_non_daemon_threads();
size_t thread_increase_size = calculate_thread_increase_size(threads_count);
size_t new_size_candidate = old_size / NewRatio; // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease // and reverts to previous value if any overflow happens
size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
alignment, thread_increase_size);
bool changed = false; if (desired_new_size > new_size_before) {
size_t change = desired_new_size - new_size_before;
assert(change % alignment == 0, "just checking"); if (expand(change)) {
changed = true;
} // If the heap failed to expand to the desired size, // "changed" will be false. If the expansion failed // (and at this point it was expected to succeed), // ignore the failure (leaving "changed" as false).
} if (desired_new_size < new_size_before && eden()->is_empty()) { // bail out of shrinking if objects in eden
size_t change = new_size_before - desired_new_size;
assert(change % alignment == 0, "just checking");
_virtual_space.shrink_by(change);
changed = true;
} if (changed) { // The spaces have already been mangled at this point but // may not have been cleared (set top = bottom) and should be. // Mangling was done when the heap was being expanded.
compute_space_boundaries(eden()->used(),
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
MemRegion cmr((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
gch->rem_set()->resize_covered_region(cmr);
// The last collection bailed out, we are running out of heap space, // so we try to allocate the from-space, too.
HeapWord* DefNewGeneration::allocate_from_space(size_t size) { bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
// If the Heap_lock is not locked by this thread, this will be called // again later with the Heap_lock held. bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
HeapWord* result = NULL; if (do_alloc) {
result = from()->allocate(size);
}
HeapWord* DefNewGeneration::expand_and_allocate(size_t size, bool is_tlab) { // We don't attempt to expand the young generation (but perhaps we should.) return allocate(size, is_tlab);
}
void DefNewGeneration::adjust_desired_tenuring_threshold() { // Set the desired survivor size to half the real survivor space
size_t const survivor_capacity = to()->capacity() / HeapWordSize;
size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
// If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) {
log_trace(gc)(":: Collection attempt not safe ::");
heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one return;
}
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
// These can be shared for all code paths
IsAliveClosure is_alive(this);
ScanWeakRefClosure scan_weak_ref(this);
age_table()->clear();
to()->clear(SpaceDecorator::Mangle); // The preserved marks should be empty at the start of the GC.
_preserved_marks_set.init(1);
assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
// Verify that the usage of keep_alive didn't copy any objects.
assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
_string_dedup_requests.flush();
if (!_promotion_failed) { // Swap the survivor spaces.
eden()->clear(SpaceDecorator::Mangle);
from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the // collection(s). When a young collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces.
to()->mangle_unused_area();
}
swap_spaces();
assert(to()->is_empty(), "to space should be empty now");
adjust_desired_tenuring_threshold();
// A successful scavenge should restart the GC time limit count which is // for full GC's.
AdaptiveSizePolicy* size_policy = heap->size_policy();
size_policy->reset_gc_overhead_limit_count();
assert(!heap->incremental_collection_failed(), "Should be clear");
} else {
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
remove_forwarding_pointers();
log_info(gc, promotion)("Promotion failed"); // Add to-space to the list of space to compact // when a promotion failure has occurred. In that // case there can be live objects in to-space // as a result of a partial evacuation of eden // and from-space.
swap_spaces(); // For uniformity wrt ParNewGeneration.
from()->set_next_compaction_space(to());
heap->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_old_gen->promotion_failure_occurred();
_gc_tracer->report_promotion_failed(_promotion_failed_info);
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(heap->reset_promotion_should_fail();)
} // We should have processed and cleared all the preserved marks.
_preserved_marks_set.reclaim();
// Will enter Full GC soon due to failed promotion. Must reset the mark word // of objs in young-gen so that no objs are marked (forwarded) when Full GC // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.) struct ResetForwardedMarkWord : ObjectClosure { void do_object(oop obj) override { if (obj->is_forwarded()) {
obj->init_mark();
}
}
} cl;
eden()->object_iterate(&cl);
from()->object_iterate(&cl);
// Increment age if obj still in new generation
obj->incr_age();
age_table()->add(obj, s);
}
// Done, insert forward pointer to obj in this header
old->forward_to(obj);
if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) { // Record old; request adds a new weak reference, which reference // processing expects to refer to a from-space object.
_string_dedup_requests.add(old);
} return obj;
}
bool DefNewGeneration::no_allocs_since_save_marks() {
assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); return to()->saved_mark_at_top();
}
void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
size_t max_alloc_words) { if (requestor == this || _promotion_failed) { return;
}
assert(GenCollectedHeap::heap()->is_old_gen(requestor), "We should not call our own generation");
/* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. if (to_space->top() > to_space->bottom()) { trace("to_space not empty when contribute_scratch called"); }
*/
ContiguousSpace* to_space = to();
assert(to_space->end() >= to_space->top(), "pointers out of order");
size_t free_words = pointer_delta(to_space->end(), to_space->top()); if (free_words >= MinFreeScratchWords) {
ScratchBlock* sb = (ScratchBlock*)to_space->top();
sb->num_words = free_words;
sb->next = list;
list = sb;
}
}
void DefNewGeneration::reset_scratch() { // If contributing scratch in to_space, mangle all of // to_space if ZapUnusedHeapArea. This is needed because // top is not maintained while using to-space as scratch. if (ZapUnusedHeapArea) {
to()->mangle_unused_area_complete();
}
}
bool DefNewGeneration::collection_attempt_is_safe() { if (!to()->is_empty()) {
log_trace(gc)(":: to is not empty ::"); returnfalse;
} if (_old_gen == NULL) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
_old_gen = gch->old_gen();
} return _old_gen->promotion_attempt_is_safe(used());
}
assert(!GCLocker::is_active(), "We should not be executing here"); // Check if the heap is approaching full after a collection has // been done. Generally the young generation is empty at // a minimum at the end of a collection. If it is not, then // the heap is approaching full.
GenCollectedHeap* gch = GenCollectedHeap::heap(); if (full) {
DEBUG_ONLY(seen_incremental_collection_failed = false;) if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
GCCause::to_string(gch->gc_cause()));
gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
set_should_allocate_from_space(); // we seem to be running out of space
} else {
log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
GCCause::to_string(gch->gc_cause()));
gch->clear_incremental_collection_failed(); // We just did a full collection
clear_should_allocate_from_space(); // if set
}
} else { #ifdef ASSERT // It is possible that incremental_collection_failed() == true // here, because an attempted scavenge did not succeed. The policy // is normally expected to cause a full collection which should // clear that condition, so we should not be here twice in a row // with incremental_collection_failed() == true without having done // a full collection in between. if (!seen_incremental_collection_failed &&
gch->incremental_collection_failed()) {
log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
GCCause::to_string(gch->gc_cause()));
seen_incremental_collection_failed = true;
} elseif (seen_incremental_collection_failed) {
log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
GCCause::to_string(gch->gc_cause()));
seen_incremental_collection_failed = false;
} #endif// ASSERT
}
if (ZapUnusedHeapArea) {
eden()->check_mangled_unused_area_complete();
from()->check_mangled_unused_area_complete();
to()->check_mangled_unused_area_complete();
}
// update the generation and space performance counters
update_counters();
gch->counters()->update_counters();
}
constchar* DefNewGeneration::name() const { return"def new generation";
}
// Moved from inline file as they are not called inline
CompactibleSpace* DefNewGeneration::first_compaction_space() const { return eden();
}
HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. // Note that since DefNewGeneration supports lock-free allocation, we // have to use it here, as well.
HeapWord* result = eden()->par_allocate(word_size); if (result == NULL) { // If the eden is full and the last collection bailed out, we are running // out of heap space, and we try to allocate the from-space, too. // allocate_from_space can't be inlined because that would introduce a // circular dependency at compile time.
result = allocate_from_space(word_size);
} return result;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.