/* * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
PSPromotionManager* pm =
PSPromotionManager::gc_thread_promotion_manager(worker_id);
pm->drain_stacks(true);
guarantee(pm->stacks_empty(), "stacks should be empty at this point");
while (true) {
ScannerTask task; if (PSPromotionManager::steal_depth(worker_id, task)) {
TASKQUEUE_STATS_ONLY(pm->record_steal(task));
pm->process_popped_location_depth(task);
pm->drain_stacks_depth(true);
} else { if (terminator.offer_termination()) { break;
}
}
}
guarantee(pm->stacks_empty(), "stacks should be empty at this point");
}
// Define before use class PSIsAliveClosure: public BoolObjectClosure { public: bool do_object_b(oop p) { return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
}
};
PSIsAliveClosure PSScavenge::_is_alive_closure;
class PSKeepAliveClosure: public OopClosure { protected:
MutableSpace* _to_space;
PSPromotionManager* _promotion_manager;
template <class T> void do_oop_work(T* p) { #ifdef ASSERT // Referent must be non-null and in from-space
oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
assert(oopDesc::is_oop(obj), "referent must be an oop");
assert(PSScavenge::is_obj_in_young(obj), "must be in young-gen");
assert(!PSScavenge::is_obj_in_to_space(obj), "must be in from-space"); #endif
virtualvoid do_void() {
assert(_promotion_manager != nullptr, "Sanity");
_promotion_manager->drain_stacks(true);
guarantee(_promotion_manager->stacks_empty(), "stacks should be empty at this point");
if (_terminator != nullptr) {
steal_work(*_terminator, _worker_id);
}
}
};
class ParallelScavengeRefProcProxyTask : public RefProcProxyTask {
TaskTerminator _terminator;
// This method contains all heap specific policy for invoking scavenge. // PSScavenge::invoke_no_policy() will do nothing but attempt to // scavenge. It will not clean up after failed promotions, bail out if // we've exceeded policy time limits, or any other special behavior. // All such policy should be placed here. // // Note that this method should only be called from the vm_thread while // at a safepoint! bool PSScavenge::invoke() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
if (!_is_old_gen_empty) { // There are only old-to-young pointers if there are objects // in the old gen.
{
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
// Scavenge OopStorages
{
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
PSScavengeRootsClosure closure(pm);
_oop_storage_strong_par_state.oops_do(&closure); // Do the real work
pm->drain_stacks(false);
}
// If active_workers can exceed 1, add a steal_work(). // PSPromotionManager::drain_stacks_depth() does not fully drain its // stacks and expects a steal_work() to complete the draining if // ParallelGCThreads is > 1.
if (_active_workers > 1) {
steal_work(_terminator, worker_id);
}
}
};
// This method contains no policy. You should probably // be calling invoke() instead. bool PSScavenge::invoke_no_policy() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
_gc_timer.register_gc_start();
if (GCLocker::check_active_before_gc()) { returnfalse;
}
if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { // Gather the feedback data for eden occupancy.
young_gen->eden_space()->accumulate_statistics();
}
// Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're // implicitly saying it's mutator time).
size_policy->minor_collection_end(gc_cause);
if (!promotion_failure_occurred) { // Swap the survivor spaces.
young_gen->eden_space()->clear(SpaceDecorator::Mangle);
young_gen->from_space()->clear(SpaceDecorator::Mangle);
young_gen->swap_spaces();
// A successful scavenge should restart the GC time limit count which is // for full GC's.
size_policy->reset_gc_overhead_limit_count(); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold
// Deciding a free ratio in the young generation is tricky, so if // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating // that the old generation size may have been limited because of them) we // should then limit our young generation size using NewRatio to have it // follow the old generation size. if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio,
young_gen->max_gen_size());
}
if (UsePerfData) {
PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
counters->update_tenuring_threshold(_tenuring_threshold);
counters->update_survivor_size_counters();
}
// Do call at minor collections? // Don't check if the size_policy is ready at this // level. Let the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { // Calculate optimal free space amounts
assert(young_gen->max_gen_size() >
young_gen->from_space()->capacity_in_bytes() +
young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds");
// Used for diagnostics
size_policy->clear_generation_free_space_flags();
size_policy->compute_eden_space_size(young_live,
eden_live,
cur_eden,
max_eden_size, false/* not full gc*/);
size_policy->check_gc_overhead_limit(eden_live,
max_old_gen_size,
max_eden_size, false/* not full gc*/,
gc_cause,
heap->soft_ref_policy());
size_policy->decay_supplemental_growth(false/* not full gc*/);
} // Resize the young generation at every collection // even if new sizes have not been calculated. This is // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces.
// Resizing the old gen at young collections can cause increases // that don't feed back to the generation sizing policy until // a full collection. Don't resize the old gen here.
// Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can // cause the change of the heap layout. Make sure eden is reshaped if that's the case. // Also update() will case adaptive NUMA chunk resizing.
assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
young_gen->eden_space()->update();
heap->gc_policy_counters()->update_counters();
heap->resize_all_tlabs();
assert(young_gen->to_space()->is_empty(), "to space should be empty now");
}
// Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // heap->card_table()->verify_all_young_refs_precise();
heap->card_table()->verify_all_young_refs_imprecise();
}
if (log_is_enabled(Debug, gc, heap, exit)) {
accumulated_time()->stop();
}
// Do not attempt to promote unless to_space is empty if (!young_gen->to_space()->is_empty()) { if (UsePerfData) {
counters->update_scavenge_skipped(to_space_not_empty);
} returnfalse;
}
// Test to see if the scavenge will likely fail.
PSAdaptiveSizePolicy* policy = heap->size_policy();
// A similar test is done in the policy's should_full_GC(). If this is // changed, decide if that test should also be changed.
size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); bool result = promotion_estimate < old_gen->free_in_bytes();
log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT,
result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
(size_t) policy->padded_average_promoted_in_bytes(),
old_gen->free_in_bytes()); if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
}
if (!result) { if (UsePerfData) {
counters->update_scavenge_skipped(promoted_too_large);
}
} return result;
}
void PSScavenge::initialize() { // Arguments must have been parsed
if (AlwaysTenure || NeverTenure) {
assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1, "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold);
_tenuring_threshold = MaxTenuringThreshold;
} else { // We want to smooth out our startup times for the AdaptiveSizePolicy
_tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
MaxTenuringThreshold;
}
// Set boundary between young_gen and old_gen
assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), "old above young");
set_young_generation_boundary(young_gen->eden_space()->bottom());
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.