/* * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
uint G1FullCollector::calc_active_workers() {
G1CollectedHeap* heap = G1CollectedHeap::heap();
uint max_worker_count = heap->workers()->max_workers(); // Only calculate number of workers if UseDynamicNumberOfGCThreads // is enabled, otherwise use max. if (!UseDynamicNumberOfGCThreads) { return max_worker_count;
}
// Consider G1HeapWastePercent to decide max number of workers. Each worker // will in average cause half a region waste.
uint max_wasted_regions_allowed = ((heap->num_regions() * G1HeapWastePercent) / 100);
uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
// Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate // the number of workers.
uint current_active_workers = heap->workers()->active_workers();
uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
// Finally consider the amount of used regions.
uint used_worker_limit = heap->num_used_regions();
assert(used_worker_limit > 0, "Should never have zero used regions.");
// Update active workers to the lower of the limits.
uint worker_count = MIN3(heap_waste_worker_limit, active_worker_limit, used_worker_limit);
log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, " "adaptive workers: %u, used limited workers: %u)",
worker_count, heap_waste_worker_limit, active_worker_limit, used_worker_limit);
worker_count = heap->workers()->set_active_workers(worker_count);
log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);
for (uint i = 0; i < _num_workers; i++) {
_markers[i] = new G1FullGCMarker(this, i, _preserved_marks_set.get(i), _live_stats);
_compaction_points[i] = new G1FullGCCompactionPoint(this);
_oop_queue_set.register_queue(i, marker(i)->oop_stack());
_array_queue_set.register_queue(i, marker(i)->objarray_stack());
}
_region_attr_table.initialize(heap->reserved(), HeapRegion::GrainBytes);
}
G1FullCollector::~G1FullCollector() { for (uint i = 0; i < _num_workers; i++) { delete _markers[i]; delete _compaction_points[i];
}
FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops);
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _live_stats);
}
class PrepareRegionsClosure : public HeapRegionClosure {
G1FullCollector* _collector;
// Verification needs the bitmap, so we should clear the bitmap only later. bool in_concurrent_cycle = _heap->abort_concurrent_cycle();
_heap->verify_before_full_collection(scope()->is_explicit_gc()); if (in_concurrent_cycle) {
GCTraceTime(Debug, gc) debug("Clear Bitmap");
_heap->concurrent_mark()->clear_bitmap(_heap->workers());
}
void G1FullCollector::phase1_mark_live_objects() { // Recursively traverse all live objects and mark them.
GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
{ // Do the actual marking.
G1FullGCMarkTask marking_task(this);
run_task(&marking_task);
}
{
uint old_active_mt_degree = reference_processor()->num_queues();
reference_processor()->set_active_mt_degree(workers());
GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", scope()->timer()); // Process reference objects found during marking.
ReferenceProcessorPhaseTimes pt(scope()->timer(), reference_processor()->max_num_queues());
G1FullGCRefProcProxyTask task(*this, reference_processor()->max_num_queues()); const ReferenceProcessorStats& stats = reference_processor()->process_discovered_references(task, pt);
scope()->tracer()->report_gc_reference_stats(stats);
pt.print_all_references();
assert(marker(0)->oop_stack()->is_empty(), "Should be no oops on the stack");
// Class unloading and cleanup. if (ClassUnloading) {
GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
CodeCache::UnloadingScope unloading_scope(&_is_alive); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(scope()->timer());
_heap->complete_cleaning(purged_class);
}
// Try to avoid OOM immediately after Full GC in case there are no free regions // left after determining the result locations (i.e. this phase). Prepare to // maximally compact the tail regions of the compaction queues serially. if (!has_free_compaction_targets) {
phase2c_prepare_serial_compaction();
}
}
void G1FullCollector::phase2c_prepare_serial_compaction() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer()); // At this point we know that after parallel compaction there will be no // completely free regions. That means that the last region of // all compaction queues still have data in them. We try to compact // these regions in serial to avoid a premature OOM when the mutator wants // to allocate the first eden region after gc. for (uint i = 0; i < workers(); i++) {
G1FullGCCompactionPoint* cp = compaction_point(i); if (cp->has_regions()) {
serial_compaction_point()->add(cp->remove_last());
}
}
// Update the forwarding information for the regions in the serial // compaction point.
G1FullGCCompactionPoint* cp = serial_compaction_point(); for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
HeapRegion* current = *it; if (!cp->is_initialized()) { // Initialize the compaction point. Nothing more is needed for the first heap region // since it is already prepared for compaction.
cp->initialize(current);
} else {
assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
G1SerialRePrepareClosure re_prepare(cp, current);
set_compaction_top(current, current->bottom());
current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
}
}
cp->update();
}
void G1FullCollector::phase3_adjust_pointers() { // Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
G1FullGCAdjustTask task(this);
run_task(&task);
}
void G1FullCollector::phase4_do_compaction() { // Compact the heap using the compaction queues created in phase 2.
GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
G1FullGCCompactTask task(this);
run_task(&task);
// Serial compact to avoid OOM when very few free regions. if (serial_compaction_point()->has_regions()) {
task.serial_compaction();
}
}
void G1FullCollector::verify_after_marking() { if (!VerifyDuringGC || !_heap->verifier()->should_verify(G1HeapVerifier::G1VerifyFull)) { // Only do verification if VerifyDuringGC and G1VerifyFull is set. return;
}
#if COMPILER2_OR_JVMCI
DerivedPointerTableDeactivate dpt_deact; #endif
_heap->prepare_for_verify(); // Note: we can verify only the heap here. When an object is // marked, the previous value of the mark word (including // identity hash values, ages, etc) is preserved, and the mark // word is set to markWord::marked_value - effectively removing // any hash values from the mark word. These hash values are // used when verifying the dictionaries and so removing them // from the mark word can make verification of the dictionaries // fail. At the end of the GC, the original mark word values // (including hash values) are restored to the appropriate // objects.
GCTraceTime(Info, gc, verify) tm("Verifying During GC (full)");
_heap->verify(VerifyOption::G1UseFullMarking);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.15 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.