/* * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// We move that task's local finger along.
_task->move_finger_to(addr);
_task->scan_task_entry(G1TaskQueueEntry::from_oop(cast_to_oop(addr))); // we only partially drain the local queue and global stack
_task->drain_local_queue(true);
_task->drain_global_stack(true);
// if the has_aborted flag has been raised, we need to bail out of // the iteration return !_task->has_aborted();
}
bool G1CMMarkStack::resize(size_t new_capacity) {
assert(is_empty(), "Only resize when stack is empty.");
assert(new_capacity <= _max_chunk_capacity, "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
if (new_base == NULL) {
log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk)); returnfalse;
} // Release old mapping. if (_base != NULL) {
MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
}
log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT,
initial_chunk_capacity, _max_chunk_capacity);
return resize(initial_chunk_capacity);
}
void G1CMMarkStack::expand() { if (_chunk_capacity == _max_chunk_capacity) {
log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); return;
}
size_t old_capacity = _chunk_capacity; // Double capacity if possible
size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
if (resize(new_capacity)) {
log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
old_capacity, new_capacity);
} else {
log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
old_capacity, new_capacity);
}
}
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() { // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding // wraparound of _hwm. if (_hwm >= _chunk_capacity) { return NULL;
}
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space "SIZE_FORMAT, _max_regions);
assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " "end (" PTR_FORMAT ")", p2i(start), p2i(end));
_root_regions[idx].set_start(start);
_root_regions[idx].set_end(end);
}
const MemRegion* G1CMRootMemRegions::claim_next() { if (_should_abort) { // If someone has set the should_abort flag, we return NULL to // force the caller to bail out of their loop. return NULL;
}
if (_claimed_root_regions >= _num_root_regions) { return NULL;
}
G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* bitmap_storage) : // _cm_thread set inside the constructor
_g1h(g1h),
_mark_bitmap(),
_heap(_g1h->reserved()),
_root_regions(_g1h->max_regions()),
_global_mark_stack(),
// _finger set in set_non_marking_state
_worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
_max_num_tasks(MAX2(ConcGCThreads, ParallelGCThreads)), // _num_active_tasks set in set_non_marking_state() // _tasks set inside the constructor
_concurrent_workers = new WorkerThreads("G1 Conc", _max_concurrent_workers);
_concurrent_workers->initialize_workers();
if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack.");
}
// Reset all tasks, since different phases will use different number of active // threads. So, it's easiest to have all of them ready. for (uint i = 0; i < _max_num_tasks; ++i) {
_tasks[i]->reset(mark_bitmap());
}
uint max_reserved_regions = _g1h->max_reserved_regions(); for (uint i = 0; i < max_reserved_regions; i++) {
_top_at_rebuild_starts[i] = NULL;
_region_mark_stats[i].clear();
}
// Expand the marking stack, if we have to and if we can. if (has_overflown()) {
_global_mark_stack.expand();
uint max_reserved_regions = _g1h->max_reserved_regions(); for (uint i = 0; i < max_reserved_regions; i++) {
_region_mark_stats[i].clear_during_overflow();
}
}
clear_has_overflown();
_finger = _heap.start();
for (uint i = 0; i < _max_num_tasks; ++i) {
G1CMTaskQueue* queue = _task_queues->queue(i);
queue->set_empty();
}
}
void G1ConcurrentMark::set_concurrency(uint active_tasks) {
assert(active_tasks <= _max_num_tasks, "we should not have more");
_num_active_tasks = active_tasks; // Need to update the three data structures below according to the // number of active threads for this phase.
_terminator.reset_for_reuse(active_tasks);
_first_overflow_barrier_sync.set_n_workers((int) active_tasks);
_second_overflow_barrier_sync.set_n_workers((int) active_tasks);
}
if (!concurrent) { // At this point we should be in a STW phase, and completed marking.
assert_at_safepoint_on_vm_thread();
assert(out_of_regions(), "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
p2i(_finger), p2i(_heap.end()));
}
}
void G1ConcurrentMark::reset_at_marking_complete() { // We set the global marking state to some default values when we're // not doing marking.
reset_marking_for_restart();
_num_active_tasks = 0;
}
G1ConcurrentMark::~G1ConcurrentMark() {
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats); // The G1ConcurrentMark instance is never freed.
ShouldNotReachHere();
}
class G1ClearBitMapTask : public WorkerTask { public: static size_t chunk_size() { return M; }
private: // Heap region closure used for clearing the _mark_bitmap. class G1ClearBitmapHRClosure : public HeapRegionClosure { private:
G1ConcurrentMark* _cm;
G1CMBitMap* _bitmap; bool _suspendible; // If suspendible, do yield checks.
HeapWord* region_clear_limit(HeapRegion* r) { // During a Concurrent Undo Mark cycle, the per region top_at_mark_start and // live_words data are current wrt to the _mark_bitmap. We use this information // to only clear ranges of the bitmap that require clearing. if (is_clear_concurrent_undo()) { // No need to clear bitmaps for empty regions (which includes regions we // did not mark through). if (_cm->live_words(r->hrm_index()) == 0) {
assert(_bitmap->get_next_marked_addr(r->bottom(), r->end()) == r->end(), "Should not have marked bits"); return r->bottom();
}
assert(_bitmap->get_next_marked_addr(r->top_at_mark_start(), r->end()) == r->end(), "Should not have marked bits above tams");
} return r->end();
}
// Repeat the asserts from before the start of the closure. We will do them // as asserts here to minimize their overhead on the product. However, we // will have them as guarantees at the beginning / end of the bitmap // clearing to get some checking in the product.
assert(!suspendible() || _cm->cm_thread()->in_progress(), "invariant");
assert(!suspendible() || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
// Abort iteration if necessary. if (has_aborted()) { returntrue;
}
}
assert(cur >= end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
r->reset_top_at_mark_start();
returnfalse;
}
};
G1ClearBitmapHRClosure _cl;
HeapRegionClaimer _hr_claimer; bool _suspendible; // If the task is suspendible, workers must join the STS.
log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
workers->run_task(&cl, num_workers);
guarantee(may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
}
void G1ConcurrentMark::cleanup_for_next_mark() { // Make sure that the concurrent mark thread looks to still be in // the current cycle.
guarantee(cm_thread()->in_progress(), "invariant");
// We are finishing up the current cycle by clearing the next // marking bitmap and getting it ready for the next cycle. During // this time no other cycle can start. So, let's make sure that this // is the case.
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
clear_bitmap(_concurrent_workers, true);
// Repeat the asserts from above.
guarantee(cm_thread()->in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
}
void G1ConcurrentMark::clear_bitmap(WorkerThreads* workers) {
assert_at_safepoint_on_vm_thread(); // To avoid fragmentation the full collection requesting to clear the bitmap // might use fewer workers than available. To ensure the bitmap is cleared // as efficiently as possible the number of active workers are temporarily // increased to include all currently created workers.
WithActiveWorkers update(workers, workers->created_workers());
clear_bitmap(workers, false);
}
class G1PreConcurrentStartTask : public G1BatchedTask { // Reset marking state. class ResetMarkingStateTask; // For each region note start of marking. class NoteStartOfMarkTask;
class G1PreConcurrentStartTask::NoteStartOfMarkTask : public G1AbstractSubTask {
HeapRegionClaimer _claimer; public:
NoteStartOfMarkTask() : G1AbstractSubTask(G1GCPhaseTimes::NoteStartOfMark), _claimer(0) { }
double worker_cost() const override { // The work done per region is very small, therefore we choose this magic number to cap the number // of threads used when there are few regions. constdouble regions_per_thread = 1000; return _claimer.n_regions() / regions_per_thread;
}
SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); // This is the start of the marking cycle, we're expected all // threads to have SATB queues with active set to false.
satb_mq_set.set_active_all_threads(true, /* new active value */ false/* expected_active */);
_root_regions.prepare_for_scan();
// update_g1_committed() will be called at the end of an evac pause // when marking is on. So, it's also called at the end of the // concurrent start pause to update the heap end, if the heap expands // during it. No need to call it here.
}
/* * Notice that in the next two methods, we actually leave the STS * during the barrier sync and join it immediately afterwards. If we * do not do this, the following deadlock can occur: one thread could * be in the barrier sync code, waiting for the other thread to also * sync up, whereas another one could be trying to yield, while also * waiting for the other threads to sync up too. * * Note, however, that this code is also used during remark and in * this case we should not attempt to leave / enter the STS, otherwise * we'll either hit an assert (debug / fastdebug) or deadlock * (product). So we should only leave / enter the STS if we are * operating concurrently. * * Because the thread that does the sync barrier has left the STS, it * is possible to be suspended for a Full GC or an evacuation pause * could occur. This is actually safe, since the entering the sync * barrier is one of the last things do_marking_step() does, and it * doesn't manipulate any data structures afterwards.
*/
// at this point everyone should have synced up and not be doing any // more work
if (barrier_aborted) { // If the barrier aborted we ignore the overflow condition and // just abort the whole marking phase as quickly as possible. return;
}
}
uint G1ConcurrentMark::calc_active_marking_workers() {
uint result = 0; if (!UseDynamicNumberOfGCThreads || !FLAG_IS_DEFAULT(ConcGCThreads)) {
result = _max_concurrent_workers;
} else {
result =
WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
1, /* Minimum workers */
_num_concurrent_workers,
Threads::number_of_non_daemon_threads()); // Don't scale the result down by scale_concurrent_workers() because // that scaling has already gone into "_max_concurrent_workers".
}
assert(result > 0 && result <= _max_concurrent_workers, "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
_max_concurrent_workers, result); return result;
}
void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) { #ifdef ASSERT
HeapWord* last = region->last();
HeapRegion* hr = _g1h->heap_region_containing(last);
assert(hr->is_old() || hr->top_at_mark_start() == hr->bottom(), "Root regions must be old or survivor/eden but region %u is %s", hr->hrm_index(), hr->get_type_str());
assert(hr->top_at_mark_start() == region->start(), "MemRegion start should be equal to TAMS"); #endif
class G1CMRootRegionScanTask : public WorkerTask {
G1ConcurrentMark* _cm; public:
G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
WorkerTask("G1 Root Region Scan"), _cm(cm) { }
void work(uint worker_id) {
G1CMRootMemRegions* root_regions = _cm->root_regions(); const MemRegion* region = root_regions->claim_next(); while (region != NULL) {
_cm->scan_root_region(region, worker_id);
region = root_regions->claim_next();
}
}
};
void G1ConcurrentMark::scan_root_regions() { // scan_in_progress() will have been set to true only if there was // at least one root region to scan. So, if it's false, we // should not attempt to do any further work. if (root_regions()->scan_in_progress()) {
assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
_num_concurrent_workers = MIN2(calc_active_marking_workers(), // We distribute work on a per-region basis, so starting // more threads than that is useless.
root_regions()->num_root_regions());
assert(_num_concurrent_workers <= _max_concurrent_workers, "Maximum number of marking threads exceeded");
G1CMRootRegionScanTask task(this);
log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
_concurrent_workers->run_task(&task, _num_concurrent_workers);
// It's possible that has_aborted() is true here without actually // aborting the survivor scan earlier. This is OK as it's // mainly used for sanity checking.
root_regions()->scan_finished();
}
}
// Setting active workers is not guaranteed since fewer // worker threads may currently exist and more may not be // available.
active_workers = _concurrent_workers->set_active_workers(active_workers);
log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->max_workers());
// Parallel task terminator is set in "set_concurrency_and_phase()"
set_concurrency_and_phase(active_workers, true/* concurrent */);
// Only check bitmap in Remark, and not at After-Verification because the regions // already have their TAMS'es reset. if (location != VerifyLocation::RemarkAfter) {
verifier->verify_bitmap_clear(true/* above_tams_only */);
}
}
}
class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
G1CollectedHeap* _g1h;
G1ConcurrentMark* _cm;
HeapRegionClaimer _hrclaimer;
uint volatile _total_selected_for_rebuild;
G1PrintRegionLivenessInfoClosure _cl;
class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
G1CollectedHeap* _g1h;
G1ConcurrentMark* _cm;
G1PrintRegionLivenessInfoClosure* _cl;
uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
// Distribute the given words across the humongous object starting with hr and // note end of marking. void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
uint const region_idx = hr->hrm_index();
size_t const obj_size_in_words = cast_to_oop(hr->bottom())->size();
uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
// "Distributing" zero words means that we only note end of marking for these // regions.
assert(marked_words == 0 || obj_size_in_words == marked_words, "Marked words should either be 0 or the same as humongous object (" SIZE_FORMAT ") but is " SIZE_FORMAT,
obj_size_in_words, marked_words);
for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
HeapRegion* const r = _g1h->region_at(i);
size_t const words_to_add = MIN2(HeapRegion::GrainWords, marked_words);
log_trace(gc, marking)("Adding " SIZE_FORMAT " words to humongous region %u (%s)",
words_to_add, i, r->get_type_str());
add_marked_bytes_and_note_end(r, words_to_add * HeapWordSize);
marked_words -= words_to_add;
}
assert(marked_words == 0,
SIZE_FORMAT " words left after distributing space across %u regions",
marked_words, num_regions_in_humongous);
}
void update_marked_bytes(HeapRegion* hr) {
uint const region_idx = hr->hrm_index();
size_t const marked_words = _cm->live_words(region_idx); // The marking attributes the object's size completely to the humongous starts // region. We need to distribute this value across the entire set of regions a // humongous object spans. if (hr->is_humongous()) {
assert(hr->is_starts_humongous() || marked_words == 0, "Should not have marked words " SIZE_FORMAT " in non-starts humongous region %u (%s)",
marked_words, region_idx, hr->get_type_str()); if (hr->is_starts_humongous()) {
distribute_marked_bytes(hr, marked_words);
}
} else {
log_trace(gc, marking)("Adding " SIZE_FORMAT " words to region %u (%s)", marked_words, region_idx, hr->get_type_str());
add_marked_bytes_and_note_end(hr, _cm->live_bytes(region_idx));
}
}
virtualbool do_heap_region(HeapRegion* r) { // Update the remset tracking state from updating to complete // if remembered sets have been rebuilt.
_g1h->policy()->remset_tracker()->update_after_rebuild(r); returnfalse;
}
};
// If a full collection has happened, we should not continue. However we might // have ended up here as the Remark VM operation has been scheduled already. if (has_aborted()) { return;
}
boolconst mark_finished = !has_overflown(); if (mark_finished) {
weak_refs_work();
SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); // We're done with marking. // This is the end of the marking cycle, we're expected all // threads to have SATB queues with active set to true.
satb_mq_set.set_active_all_threads(false, /* new active value */ true/* expected_active */);
// All marking completed. Check bitmap now as we will start to reset TAMSes // in parallel below so that we can not do this in the After-Remark verification.
_g1h->verifier()->verify_bitmap_clear(true/* above_tams_only */);
{
GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
_g1h->workers()->run_task(&cl, num_workers);
log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
_g1h->num_regions(), cl.total_selected_for_rebuild());
class G1ReclaimEmptyRegionsTask : public WorkerTask { // Per-region work during the Cleanup pause. class G1ReclaimEmptyRegionsClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h;
size_t _freed_bytes;
FreeRegionList* _local_cleanup_list;
uint _old_regions_removed;
uint _archive_regions_removed;
uint _humongous_regions_removed;
// Now update the old/archive/humongous region sets
_g1h->remove_from_old_gen_sets(cl.old_regions_removed(),
cl.archive_regions_removed(),
cl.humongous_regions_removed());
{
MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
_g1h->decrement_summary_bytes(cl.freed_bytes());
void G1ConcurrentMark::reclaim_empty_regions() {
WorkerThreads* workers = _g1h->workers();
FreeRegionList empty_regions_list("Empty Regions After Mark List");
if (!empty_regions_list.is_empty()) {
log_debug(gc)("Reclaimed %u empty regions", empty_regions_list.length()); // Now print the empty regions list.
_g1h->hr_printer()->cleanup(&empty_regions_list); // And actually make them available.
_g1h->prepend_to_freelist(&empty_regions_list);
}
}
// Cleanup will have freed any regions completely full of garbage. // Update the soft reference policy with the new heap occupancy.
Universe::heap()->update_capacity_and_used_at_gc();
// We reclaimed old regions so we should calculate the sizes to make // sure we update the old gen/space data.
_g1h->monitoring_support()->update_sizes();
}
if (needs_remembered_set_rebuild()) { // Update the remset tracking information as well as marking all regions // as fully parsable.
GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
G1UpdateRegionsAfterRebuild cl(_g1h);
_g1h->heap_region_iterate(&cl);
} else {
log_debug(gc, phases)("No Remembered Sets to update after rebuild");
}
// We need to make this be a "collection" so any collection pause that // races with it goes around and waits for Cleanup to finish.
_g1h->increment_total_collections();
// 'Keep Alive' oop closure used by both serial parallel reference processing. // Uses the G1CMTask associated with a worker thread (for serial reference // processing the G1CMTask for worker 0 is used) to preserve (mark) and // trace referent objects. // // Using the G1CMTask and embedded local queues avoids having the worker // threads operating on the global mark stack. This reduces the risk // of overflowing the stack - which we would rather avoid at this late // state. Also using the tasks' local queues removes the potential // of the workers interfering with each other that could occur if // operating on the global stack.
class G1CMKeepAliveAndDrainClosure : public OopClosure {
G1ConcurrentMark* _cm;
G1CMTask* _task;
uint _ref_counter_limit;
uint _ref_counter; bool _is_serial; public:
G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
_cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
_ref_counter(_ref_counter_limit), _is_serial(is_serial) {
assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
}
template <class T> void do_oop_work(T* p) { if (_cm->has_overflown()) { return;
} if (!_task->deal_with_reference(p)) { // We did not add anything to the mark bitmap (or mark stack), so there is // no point trying to drain it. return;
}
_ref_counter--;
if (_ref_counter == 0) { // We have dealt with _ref_counter_limit references, pushing them // and objects reachable from them on to the local stack (and // possibly the global stack). Call G1CMTask::do_marking_step() to // process these entries. // // We call G1CMTask::do_marking_step() in a loop, which we'll exit if // there's nothing more to do (i.e. we're done with the entries that // were pushed as a result of the G1CMTask::deal_with_reference() calls // above) or we overflow. // // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() // flag while there may still be some work to do. (See the comment at // the beginning of G1CMTask::do_marking_step() for those conditions - // one of which is reaching the specified time target.) It is only // when G1CMTask::do_marking_step() returns without setting the // has_aborted() flag that the marking step has completed. do { double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
_task->do_marking_step(mark_step_duration_ms, false/* do_termination */,
_is_serial);
} while (_task->has_aborted() && !_cm->has_overflown());
_ref_counter = _ref_counter_limit;
}
}
};
// 'Drain' oop closure used by both serial and parallel reference processing. // Uses the G1CMTask associated with a given worker thread (for serial // reference processing the G1CMtask for worker 0 is used). Calls the // do_marking_step routine, with an unbelievably large timeout value, // to drain the marking data structures of the remaining entries // added by the 'keep alive' oop closure above.
class G1CMDrainMarkingStackClosure : public VoidClosure {
G1ConcurrentMark* _cm;
G1CMTask* _task; bool _is_serial; public:
G1CMDrainMarkingStackClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
_cm(cm), _task(task), _is_serial(is_serial) {
assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
}
void do_void() { do { // We call G1CMTask::do_marking_step() to completely drain the local // and global marking stacks of entries pushed by the 'keep alive' // oop closure (an instance of G1CMKeepAliveAndDrainClosure above). // // G1CMTask::do_marking_step() is called in a loop, which we'll exit // if there's nothing more to do (i.e. we've completely drained the // entries that were pushed as a result of applying the 'keep alive' // closure to the entries on the discovered ref lists) or we overflow // the global marking stack. // // Note: G1CMTask::do_marking_step() can set the G1CMTask::has_aborted() // flag while there may still be some work to do. (See the comment at // the beginning of G1CMTask::do_marking_step() for those conditions - // one of which is reaching the specified time target.) It is only // when G1CMTask::do_marking_step() returns without setting the // has_aborted() flag that the marking step has completed.
_task->do_marking_step(1000000000.0 /* something very large */, true/* do_termination */,
_is_serial);
} while (_task->has_aborted() && !_cm->has_overflown());
}
};
class G1CMRefProcProxyTask : public RefProcProxyTask {
G1CollectedHeap& _g1h;
G1ConcurrentMark& _cm;
void prepare_run_task_hook() override { // We need to reset the concurrency level before each // proxy task execution, so that the termination protocol // and overflow handling in G1CMTask::do_marking_step() knows // how many workers to wait for.
_cm.set_concurrency(_queue_count);
}
};
ReferenceProcessor* rp = _g1h->ref_processor_cm();
// See the comment in G1CollectedHeap::ref_processing_init() // about how reference processing currently works in G1.
assert(_global_mark_stack.is_empty(), "mark stack should be empty");
// We need at least one active thread. If reference processing // is not multi-threaded we use the current (VMThread) thread, // otherwise we use the workers from the G1CollectedHeap and // we utilize all the worker threads we can.
uint active_workers = (ParallelRefProcEnabled ? _g1h->workers()->active_workers() : 1U);
active_workers = clamp(active_workers, 1u, _max_num_tasks);
// Set the degree of MT processing here. If the discovery was done MT, // the number of threads involved during discovery could differ from // the number of active workers. This is OK as long as the discovered // Reference lists are balanced (see balance_all_queues() and balance_queues()).
rp->set_active_mt_degree(active_workers);
// Process the weak references. const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
_gc_tracer_cm->report_gc_reference_stats(stats);
pt.print_all_references();
// The do_oop work routines of the keep_alive and drain_marking_stack // oop closures will set the has_overflown flag if we overflow the // global marking stack.
assert(has_overflown() || _global_mark_stack.is_empty(), "Mark stack should be empty (unless it has overflown)");
if (has_overflown()) { // We can not trust g1_is_alive and the contents of the heap if the marking stack // overflowed while processing references. Exit the VM.
fatal("Overflow during reference processing, can not continue. Current mark stack depth: "
SIZE_FORMAT ", MarkStackSize: " SIZE_FORMAT ", MarkStackSizeMax: " SIZE_FORMAT ". " "Please increase MarkStackSize and/or MarkStackSizeMax and restart.",
_global_mark_stack.size(), MarkStackSize, MarkStackSizeMax); return;
}
assert(_global_mark_stack.is_empty(), "Marking should have completed");
void G1ConcurrentMark::report_object_count(bool mark_completed) { // Depending on the completion of the marking liveness needs to be determined // using either the bitmap or after the cycle using the scrubbing information. if (mark_completed) {
G1ObjectCountIsAliveClosure is_alive(_g1h);
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
} else {
G1CMIsAliveClosure is_alive(_g1h);
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
}
}
// Closure for marking entries in SATB buffers. class G1CMSATBBufferClosure : public SATBBufferClosure { private:
G1CMTask* _task;
G1CollectedHeap* _g1h;
// This is very similar to G1CMTask::deal_with_reference, but with // more relaxed requirements for the argument, so this must be more // circumspect about treating the argument as an object. void do_entry(void* entry) const {
_task->increment_refs_reached();
oop const obj = cast_to_oop(entry);
_task->make_reference_grey(obj);
}
void do_thread(Thread* thread) { if (thread->claim_threads_do(true, _claim_token)) { // Transfer any partial buffer to the qset for completed buffer processing.
_qset.flush_queue(G1ThreadLocalData::satb_mark_queue(thread)); if (thread->is_Java_thread()) { // In theory it should not be necessary to explicitly walk the nmethods to find roots for concurrent marking // however the liveness of oops reachable from nmethods have very complex lifecycles: // * Alive if on the stack of an executing method // * Weakly reachable otherwise // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be // live by the SATB invariant but other oops recorded in nmethods may behave differently.
JavaThread::cast(thread)->nmethods_do(&_code_cl);
}
}
}
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.