/* * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// GCTraceTime wrapper that constructs the message according to GC pause type and // GC cause. // The code relies on the fact that GCTraceTimeWrapper stores the string passed // initially as a reference only, so that we can modify it as needed. class G1YoungGCTraceTime {
G1YoungCollector* _collector;
public:
G1YoungGCTraceTime(G1YoungCollector* collector, GCCause::Cause cause) :
_collector(collector), // Take snapshot of current pause type at start as it may be modified during gc. // The strings for all Concurrent Start pauses are the same, so the parameter // does not matter here.
_pause_type(_collector->collector_state()->young_gc_pause_type(false/* concurrent_operation_is_full_mark */)),
_pause_cause(cause), // Fake a "no cause" and manually add the correct string in update_young_gc_name() // to make the string look more natural.
_tt(update_young_gc_name(), NULL, GCCause::_no_gc, true) {
}
~G1YoungGCVerifierMark() { // Inject evacuation failure tag into type if needed.
G1HeapVerifier::G1VerifyType type = _type; if (_collector->evacuation_failed()) {
type = (G1HeapVerifier::G1VerifyType)(type | G1HeapVerifier::G1VerifyYoungEvacFail);
}
G1CollectedHeap::heap()->verify_after_young_collection(type);
}
};
void G1YoungCollector::wait_for_root_region_scanning() {
Ticks start = Ticks::now(); // We have to wait until the CM threads finish scanning the // root regions as it's the only way to ensure that all the // objects on them have been correctly scanned before we start // moving them during the GC. bool waited = concurrent_mark()->wait_until_root_region_scan_finished();
Tickspan wait_time; if (waited) {
wait_time = (Ticks::now() - start);
}
phase_times()->record_root_region_scan_wait_time(wait_time.seconds() * MILLIUNITS);
}
class G1PrintCollectionSetClosure : public HeapRegionClosure { private:
G1HRPrinter* _hr_printer; public:
G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, doubletarget_pause_time_ms) { // Forget the current allocation region (we might even choose it to be part // of the collection set!) before finalizing the collection set.
allocator()->release_mutator_alloc_regions();
if (hr_printer()->is_active()) {
G1PrintCollectionSetClosure cl(hr_printer());
collection_set()->iterate(&cl);
collection_set()->iterate_optional(&cl);
}
}
class G1PrepareEvacuationTask : public WorkerTask { class G1PrepareRegionsClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h;
G1PrepareEvacuationTask* _parent_task;
uint _worker_humongous_total;
uint _worker_humongous_candidates;
G1MonotonicArenaMemoryStats _card_set_stats;
void sample_card_set_size(HeapRegion* hr) { // Sample card set sizes for young gen and humongous before GC: this makes // the policy to give back memory to the OS keep the most recent amount of // memory for these regions. if (hr->is_young() || hr->is_starts_humongous()) {
_card_set_stats.add(hr->rem_set()->card_set_memory_stats());
}
}
// Dead objects cannot be eager reclaim candidates. Due to class // unloading it is unsafe to query their classes so we return early. if (_g1h->is_obj_dead(obj, region)) { returnfalse;
}
// If we do not have a complete remembered set for the region, then we can // not be sure that we have all references to it. if (!region->rem_set()->is_complete()) { returnfalse;
} // Candidate selection must satisfy the following constraints // while concurrent marking is in progress: // // * In order to maintain SATB invariants, an object must not be // reclaimed if it was allocated before the start of marking and // has not had its references scanned. Such an object must have // its references (including type metadata) scanned to ensure no // live objects are missed by the marking process. Objects // allocated after the start of concurrent marking don't need to // be scanned. // // * An object must not be reclaimed if it is on the concurrent // mark stack. Objects allocated after the start of concurrent // marking are never pushed on the mark stack. // // Nominating only objects allocated after the start of concurrent // marking is sufficient to meet both constraints. This may miss // some objects that satisfy the constraints, but the marking data // structures don't support efficiently performing the needed // additional tests or scrubbing of the mark stack. // // However, we presently only nominate is_typeArray() objects. // A humongous object containing references induces remembered // set entries on other regions. In order to reclaim such an // object, those remembered sets would need to be cleaned up. // // We also treat is_typeArray() objects specially, allowing them // to be reclaimed even if allocated before the start of // concurrent mark. For this we rely on mark stack insertion to // exclude is_typeArray() objects, preventing reclaiming an object // that is in the mark stack. We also rely on the metadata for // such objects to be built-in and so ensured to be kept live. // Frequent allocation and drop of large binary blobs is an // important use case for eager reclaim, and this special handling // may reduce needed headroom.
void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) { // Flush early, so later phases don't need to account for per-thread stuff. // Flushes deferred card marks, so must precede concatenating logs.
retire_tlabs();
// Flush early, so later phases don't need to account for per-thread stuff.
concatenate_dirty_card_logs_and_stats();
// Please see comment in g1CollectedHeap.hpp and // G1CollectedHeap::ref_processing_init() to see how // reference processing currently works in G1.
ref_processor_stw()->start_discovery(false/* always_clear */);
Ticks start_processing = Ticks::now();
{
G1RootProcessor root_processor(_g1h, num_workers);
G1EvacuateRegionsTask g1_par_task(_g1h,
per_thread_states,
task_queues(),
&root_processor,
num_workers,
has_optional_evacuation_work);
task_time = run_task_timed(&g1_par_task); // Closing the inner scope will execute the destructor for the // G1RootProcessor object. By subtracting the WorkerThreads task from the total // time of this scope, we get the "NMethod List Cleanup" time. This list is // constructed during "STW two-phase nmethod root processing", see more in // nmethod.hpp
}
Tickspan total_processing = Ticks::now() - start_processing;
void G1YoungCollector::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) { // To access the protected constructor/destructor class G1MarkScope : public MarkScope { };
Tickspan task_time;
Ticks start_processing = Ticks::now();
{
G1MarkScope code_mark_scope;
G1EvacuateOptionalRegionsTask task(per_thread_states, task_queues(), workers()->active_workers());
task_time = run_task_timed(&task); // See comment in evacuate_initial_collection_set() for the reason of the scope.
}
Tickspan total_processing = Ticks::now() - start_processing;
G1GCPhaseTimes* p = phase_times();
p->record_or_add_nmethod_list_cleanup_time((total_processing - task_time).seconds() * 1000.0);
}
if (time_left_ms < 0 ||
!collection_set()->finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
collection_set()->optional_region_length(), time_left_ms); break;
}
// Non Copying Keep Alive closure class G1KeepAliveClosure: public OopClosure {
G1CollectedHeap*_g1h; public:
G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {} void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } void do_oop(oop* p) {
oop obj = *p;
assert(obj != NULL, "the caller should have filtered out NULL values");
const G1HeapRegionAttr region_attr =_g1h->region_attr(obj); if (!region_attr.is_in_cset_or_humongous_candidate()) { return;
} if (region_attr.is_in_cset()) {
assert(obj->is_forwarded(), "invariant" );
*p = obj->forwardee();
} else {
assert(!obj->is_forwarded(), "invariant" );
assert(region_attr.is_humongous_candidate(), "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type());
_g1h->set_humongous_is_live(obj);
}
}
};
// Copying Keep Alive closure - can be called from both // serial and parallel code as long as different worker // threads utilize different G1ParScanThreadState instances // and different queues. class G1CopyingKeepAliveClosure: public OopClosure {
G1CollectedHeap* _g1h;
G1ParScanThreadState* _par_scan_state;
if (_g1h->is_in_cset_or_humongous_candidate(obj)) { // If the referent object has been forwarded (either copied // to a new location or to itself in the event of an // evacuation failure) then we need to update the reference // field and, if both reference and referent are in the G1 // heap, update the RSet for the referent. // // If the referent has not been forwarded then we have to keep // it alive by policy. Therefore we have copy the referent. // // When the queue is drained (after each phase of reference processing) // the object and it's followers will be copied, the reference field set // to point to the new location, and the RSet updated.
_par_scan_state->push_on_queue(ScannerTask(p));
}
}
};
class G1STWRefProcProxyTask : public RefProcProxyTask {
G1CollectedHeap& _g1h;
G1ParScanThreadStateSet& _pss;
TaskTerminator _terminator;
G1ScannerTasksQueueSet& _task_queues;
// Special closure for enqueuing discovered fields: during enqueue the card table // may not be in shape to properly handle normal barrier calls (e.g. card marks // in regions that failed evacuation, scribbling of various values by card table // scan code). Additionally the regular barrier enqueues into the "global" // DCQS, but during GC we need these to-be-refined entries in the GC local queue // so that after clearing the card table, the redirty cards phase will properly // mark all dirty cards to be picked up by refinement. class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
G1CollectedHeap* _g1h;
G1ParScanThreadState* _pss;
void enqueue(HeapWord* discovered_field_addr, oop value) override {
assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr)); // Store the value first, whatever it is.
RawAccess<>::oop_store(discovered_field_addr, value); if (value == nullptr) { return;
}
_pss->write_ref_field_post(discovered_field_addr, value);
}
};
void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
G1ParScanThreadStateSet* per_thread_states) {
G1GCPhaseTimes* p = phase_times();
// Process any discovered reference objects - we have // to do this _before_ we retire the GC alloc regions // as we may have to copy some 'reachable' referent // objects (and their reachable sub-graphs) that were // not copied during the pause.
process_discovered_references(per_thread_states);
void G1YoungCollector::collect() { // Do timing/tracing/statistics/pre- and post-logging/verification work not // directly related to the collection. They should not be accounted for in // collection work timing.
// The G1YoungGCTraceTime message depends on collector state, so must come after // determining collector state.
G1YoungGCTraceTime tm(this, _gc_cause);
// JFR
G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause); // JStat/MXBeans
G1YoungGCMonitoringScope ms(monitoring_support(),
collector_state()->in_mixed_phase() /* all_memory_pools_affected */); // Create the heap printer before internal pause timing to have // heap information printed as last part of detailed GC log.
G1HeapPrinterMark hpm(_g1h); // Young GC internal pause timing
G1YoungGCNotifyPauseMark npm(this);
// Verification may use the workers, so they must be set up before. // Individual parallel phases may override this.
set_young_collection_default_active_worker_threads();
// Wait for root region scan here to make sure that it is done before any // use of the STW workers to maximize cpu use (i.e. all cores are available // just to do that).
wait_for_root_region_scanning();
G1YoungGCVerifierMark vm(this);
{ // Actual collection work starts and is executed (only) in this scope.
// Young GC internal collection timing. The elapsed time recorded in the // policy for the collection deliberately elides verification (and some // other trivial setup above).
policy()->record_young_collection_start();
bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0; // Actually do the work...
evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);
if (may_do_optional_evacuation) {
evacuate_optional_collection_set(&per_thread_states);
}
post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
// Refine the type of a concurrent mark operation now that we did the // evacuation, eventually aborting it.
_concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
// Need to report the collection pause now since record_collection_pause_end() // modifies it to the next state.
jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.