/* * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
#if SHENANDOAH_OPTIMIZED_MARKTASK // The optimized ShenandoahMarkTask takes some bits away from the full object bits. // Fail if we ever attempt to address more than we can. if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n" "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n" "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
vm_exit_during_initialization("Fatal Error", buf);
} #endif
guarantee(bitmap_bytes_per_region != 0, "Bitmap bytes per region should not be zero");
guarantee(is_power_of_2(bitmap_bytes_per_region), "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
// Reserve aux bitmap for use in object_iterate(). We don't commit it here.
ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
_aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
_aux_bitmap_region_special = aux_bitmap.special();
_aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
// // Create regions and region sets //
size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
ReservedSpace region_storage(region_storage_size, region_page_size);
MemTracker::record_virtual_memory_type(region_storage.base(), mtGC); if (!region_storage.special()) {
os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false, "Cannot commit region memory");
}
// Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks. // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there. // If not successful, bite a bullet and allocate at whatever address.
{
size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
uintptr_t min = round_up_power_of_2(cset_align);
uintptr_t max = (1u << 30u);
if (_collection_set == NULL) {
ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
}
}
_regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
_free_set = new ShenandoahFreeSet(this, _num_regions);
{
ShenandoahHeapLocker locker(lock());
for (size_t i = 0; i < _num_regions; i++) {
HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i; bool is_committed = i < num_committed_regions; void* loc = region_storage.base() + i * region_align;
ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
_marking_context->initialize_top_at_mark_start(r);
_regions[i] = r;
assert(!collection_set()->is_in(i), "New region should not be in collection set");
}
// Initialize to complete
_marking_context->mark_complete();
_free_set->rebuild();
}
if (AlwaysPreTouch) { // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, // before initialize() below zeroes it with initializing thread. For any given region, // we touch the region and the corresponding bitmaps from the same thread.
ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
#ifdef LINUX // UseTransparentHugePages would madvise that backing memory can be coalesced into huge // pages. But, the kernel needs to know that every small page is used, in order to coalesce // them into huge one. Therefore, we need to pretouch with smaller pages. if (UseTransparentHugePages) {
_pretouch_heap_page_size = (size_t)os::vm_page_size();
_pretouch_bitmap_page_size = (size_t)os::vm_page_size();
} #endif
// OS memory managers may want to coalesce back-to-back pages. Make their jobs // simpler by pre-touching continuous spaces (heap and bitmap) separately.
// There should probably be Shenandoah-specific options for these, // just as there are G1-specific options.
{
ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
}
_monitoring_support = new ShenandoahMonitoringSupport(this);
_phase_timings = new ShenandoahPhaseTimings(max_workers());
ShenandoahCodeRoots::initialize();
if (ShenandoahPacing) {
_pacer = new ShenandoahPacer(this);
_pacer->setup_for_idle();
} else {
_pacer = NULL;
}
_control_thread = new ShenandoahControlThread();
ShenandoahInitLogger::print();
return JNI_OK;
}
void ShenandoahHeap::initialize_mode() { if (ShenandoahGCMode != NULL) { if (strcmp(ShenandoahGCMode, "satb") == 0) {
_gc_mode = new ShenandoahSATBMode();
} elseif (strcmp(ShenandoahGCMode, "iu") == 0) {
_gc_mode = new ShenandoahIUMode();
} elseif (strcmp(ShenandoahGCMode, "passive") == 0) {
_gc_mode = new ShenandoahPassiveMode();
} else {
vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
}
} else {
vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
}
_gc_mode->initialize_flags(); if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
vm_exit_during_initialization(
err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
_gc_mode->name()));
} if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
vm_exit_during_initialization(
err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
_gc_mode->name()));
}
}
if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
vm_exit_during_initialization(
err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
_heuristics->name()));
} if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
vm_exit_during_initialization(
err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
_heuristics->name()));
}
}
#ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif
// gclab can not be initialized early during VM startup, as it can not determinate its max_size. // Now, we will let WorkerThreads to initialize gclab when new worker is created.
_workers->set_initialize_gclab(); if (_safepoint_workers != NULL) {
_safepoint_workers->threads_do(&init_gclabs);
_safepoint_workers->set_initialize_gclab();
}
// Application allocates from the beginning of the heap, and GC allocates at // the end of it. It is more efficient to uncommit from the end, so that applications // could enjoy the near committed regions. GC allocations are much less frequent, // and therefore can accept the committing costs.
size_t count = 0; for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
ShenandoahHeapRegion* r = get_region(i - 1); if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
ShenandoahHeapLocker locker(lock()); if (r->is_empty_committed()) { if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) { break;
}
r->make_uncommitted();
count++;
}
}
SpinPause(); // allow allocators to take the lock
}
if (count > 0) {
control_thread()->notify_heap_changed();
}
}
HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { // New object should fit the GCLAB size
size_t min_size = MAX2(size, PLAB::min_size());
// Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
new_size = MIN2(new_size, PLAB::max_size());
new_size = MAX2(new_size, PLAB::min_size());
// Record new heuristic value even if we take any shortcut. This captures // the case when moderately-sized objects always take a shortcut. At some point, // heuristics should catch up with them.
ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
if (new_size < size) { // New size still does not fit the object. Fall back to shared allocation. // This avoids retiring perfectly good GCLABs, when we encounter a large object. return NULL;
}
// Retire current GCLAB, and allocate a new one.
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
gclab->retire();
assert (size <= actual_size, "allocation should fit");
if (ZeroTLAB) { // ..and clear it.
Copy::zero_to_words(gclab_buf, actual_size);
} else { // ...and zap just allocated object. #ifdef ASSERT // Skip mangling the space corresponding to the object header to // ensure that the returned space is not considered parsable by // any concurrent GC thread.
size_t hdr_size = oopDesc::header_size();
Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal); #endif// ASSERT
}
gclab->set_buf(gclab_buf, actual_size); return gclab->allocate(size);
}
if (req.is_mutator_alloc()) { if (ShenandoahPacing) {
pacer()->pace_for_alloc(req.size());
pacer_epoch = pacer()->epoch();
}
if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
result = allocate_memory_under_lock(req, in_new_region);
}
// Allocation failed, block until control thread reacted, then retry allocation. // // It might happen that one of the threads requesting allocation would unblock // way later after GC happened, only to fail the second allocation, because // other threads have already depleted the free storage. In this case, a better // strategy is to try again, as long as GC makes progress. // // Then, we need to make sure the allocation was retried after at least one // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
size_t tries = 0;
while (result == NULL && _progress_last_gc.is_set()) {
tries++;
control_thread()->handle_alloc_failure(req);
result = allocate_memory_under_lock(req, in_new_region);
}
while (result == NULL && tries <= ShenandoahFullGCThreshold) {
tries++;
control_thread()->handle_alloc_failure(req);
result = allocate_memory_under_lock(req, in_new_region);
}
} else {
assert(req.is_gc_alloc(), "Can only accept GC allocs here");
result = allocate_memory_under_lock(req, in_new_region); // Do not call handle_alloc_failure() here, because we cannot block. // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
}
if (in_new_region) {
control_thread()->notify_heap_changed();
}
if (result != NULL) {
size_t requested = req.size();
size_t actual = req.actual_size();
if (req.is_mutator_alloc()) {
notify_mutator_alloc_words(actual, false);
// If we requested more than we were granted, give the rest back to pacer. // This only matters if we are in the same pacing epoch: do not try to unpace // over the budget for the other phase. if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
}
} else {
increase_used(actual*HeapWordSize);
}
}
// Inform metaspace OOM to GC heuristics if class unloading is possible. if (heuristics()->can_unload_classes()) {
ShenandoahHeuristics* h = heuristics();
h->record_metaspace_oom();
}
// Expand and retry allocation
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); if (result != NULL) { return result;
}
// Start full GC
collect(GCCause::_metadata_GC_clear_soft_refs);
// Retry allocation
result = loader_data->metaspace_non_null()->allocate(size, mdtype); if (result != NULL) { return result;
}
// Expand and retry allocation
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); if (result != NULL) { return result;
}
// Out of memory return NULL;
}
class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure { private:
ShenandoahHeap* const _heap;
Thread* const _thread; public:
ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
_heap(heap), _thread(Thread::current()) {}
for (size_t i = 0; i < num_regions(); i++) {
get_region(i)->print_on(st);
}
}
void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
assert(start->is_humongous_start(), "reclaim regions starting with the first one");
assert(!start->has_live(), "liveness must be zero");
for(size_t i = 0; i < required_regions; i++) { // Reclaim from tail. Otherwise, assertion fails when printing region to trace log, // as it expects that every region belongs to a humongous region starting with a humongous start region.
ShenandoahHeapRegion* region = get_region(index --);
assert(region->is_humongous(), "expect correct humongous start or continuation");
assert(!region->is_cset(), "Humongous region should not be in collection set");
region->make_trash_immediate();
}
}
class ShenandoahCheckCleanGCLABClosure : public ThreadClosure { public:
ShenandoahCheckCleanGCLABClosure() {} void do_thread(Thread* thread) {
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
}
};
class ShenandoahRetireGCLABClosure : public ThreadClosure { private: boolconst _resize; public:
ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {} void do_thread(Thread* thread) {
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
gclab->retire(); if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
ShenandoahThreadLocalData::set_gclab_size(thread, 0);
}
}
};
void ShenandoahHeap::labs_make_parsable() {
assert(UseTLAB, "Only call with UseTLAB");
void ShenandoahHeap::tlabs_retire(bool resize) {
assert(UseTLAB, "Only call with UseTLAB");
assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
ThreadLocalAllocStats stats;
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
ThreadLocalAllocBuffer& tlab = t->tlab();
tlab.retire(&stats); if (resize) {
tlab.resize();
}
}
void ShenandoahHeap::gclabs_retire(bool resize) {
assert(UseTLAB, "Only call with UseTLAB");
assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
if (safepoint_workers() != NULL) {
safepoint_workers()->threads_do(&cl);
}
}
// Returns size in bytes
size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { if (ShenandoahElasticTLAB) { // With Elastic TLABs, return the max allowed size, and let the allocation path // figure out the safe size for current allocation. return ShenandoahHeapRegion::max_tlab_size_bytes();
} else { return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
}
}
size_t ShenandoahHeap::max_tlab_size() const { // Returns size in words return ShenandoahHeapRegion::max_tlab_size_words();
}
void ShenandoahHeap::verify(VerifyOption vo) { if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { if (ShenandoahVerify) {
verifier()->verify_generic(vo);
} else { // TODO: Consider allocating verification bitmaps on demand, // and turn this on unconditionally.
}
}
}
size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { return _free_set->capacity();
}
class ObjectIterateScanRootClosure : public BasicOopIterateClosure { private:
MarkBitMap* _bitmap;
ShenandoahScanObjectStack* _oop_stack;
ShenandoahHeap* const _heap;
ShenandoahMarkingContext* const _marking_context;
template <class T> void do_oop_work(T* p) {
T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) {
oop obj = CompressedOops::decode_not_null(o); if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) { // There may be dead oops in weak roots in concurrent root phase, do not touch them. return;
}
obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
/* * This is public API, used in preparation of object_iterate(). * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can * control, we call SH::tlabs_retire, SH::gclabs_retire.
*/ void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { // No-op.
}
/* * Iterates objects in the heap. This is public API, used for, e.g., heap dumping. * * We cannot safely iterate objects by doing a linear scan at random points in time. Linear * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g. * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear * scanning therefore depends on having a valid marking bitmap to support it. However, we only * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid * marking bitmap during marking, after aborted marking or during/after cleanup (when we just * wiped the bitmap in preparation for next marking). * * For all those reasons, we implement object iteration as a single marking traversal, reporting * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap * is allowed to report dead objects, but is not required to do so.
*/ void ShenandoahHeap::object_iterate(ObjectClosure* cl) { // Reset bitmap if (!prepare_aux_bitmap_for_iteration()) return;
ShenandoahScanObjectStack oop_stack;
ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack); // Seed the stack with root scan
scan_roots_for_iteration(&oop_stack, &oops);
// Work through the oop stack to traverse heap while (! oop_stack.is_empty()) {
oop obj = oop_stack.pop();
assert(oopDesc::is_oop(obj), "must be a valid oop");
cl->do_object(obj);
obj->oop_iterate(&oops);
}
assert(oop_stack.is_empty(), "should be empty"); // Reclaim bitmap
reclaim_aux_bitmap_for_iteration();
}
bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration"); returnfalse;
} // Reset bitmap
_aux_bit_map.clear(); returntrue;
}
void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) { // Process GC roots according to current GC cycle // This populates the work stack with initial objects // It is important to relinquish the associated locks before diving // into heap dumper
uint n_workers = safepoint_workers() != NULL ? safepoint_workers()->active_workers() : 1;
ShenandoahHeapIterationRootScanner rp(n_workers);
rp.roots_do(oops);
}
void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() { if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
}
}
// Closure for parallelly iterate objects class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure { private:
MarkBitMap* _bitmap;
ShenandoahObjToScanQueue* _queue;
ShenandoahHeap* const _heap;
ShenandoahMarkingContext* const _marking_context;
template <class T> void do_oop_work(T* p) {
T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) {
oop obj = CompressedOops::decode_not_null(o); if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) { // There may be dead oops in weak roots in concurrent root phase, do not touch them. return;
}
obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
private: // Divide global root_stack into worker queues bool prepare_worker_queues() {
_task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers); // Initialize queues for every workers for (uint i = 0; i < _num_workers; ++i) {
ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
_task_queues->register_queue(i, task_queue);
} // Divide roots among the workers. Assume that object referencing distribution // is related with root kind, use round-robin to make every worker have same chance // to process every kind of roots
size_t roots_num = _roots_stack.size(); if (roots_num == 0) { // No work to do returnfalse;
}
void object_iterate_parallel(ObjectClosure* cl,
uint worker_id,
ShenandoahObjToScanQueueSet* queue_set) {
assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
assert(queue_set != NULL, "task queue must not be NULL");
ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
assert(q != NULL, "object iterate queue must not be NULL");
// Work through the queue to traverse heap. // Steal when there is no task in queue. while (q->pop(t) || queue_set->steal(worker_id, t)) {
oop obj = t.obj();
assert(oopDesc::is_oop(obj), "must be a valid oop");
cl->do_object(obj);
obj->oop_iterate(&oops);
}
assert(q->is_empty(), "should be empty");
}
};
// Keep alive an object that was loaded with AS_NO_KEEPALIVE. void ShenandoahHeap::keep_alive(oop obj) { if (is_concurrent_mark_in_progress() && (obj != NULL)) {
ShenandoahBarrierSet::barrier_set()->enqueue(obj);
}
}
void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { for (size_t i = 0; i < num_regions(); i++) {
ShenandoahHeapRegion* current = get_region(i);
blk->heap_region_do(current);
}
}
class ShenandoahParallelHeapRegionTask : public WorkerTask { private:
ShenandoahHeap* const _heap;
ShenandoahHeapRegionClosure* const _blk;
class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { private:
ShenandoahMarkingContext* const _ctx; public:
ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
void heap_region_do(ShenandoahHeapRegion* r) {
assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); if (r->is_active()) { // Check if region needs updating its TAMS. We have updated it already during concurrent // reset, so it is very likely we don't need to do another write here. if (_ctx->top_at_mark_start(r) != r->top()) {
_ctx->capture_top_at_mark_start(r);
}
} else {
assert(_ctx->top_at_mark_start(r) == r->top(), "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
}
}
bool is_thread_safe() { returntrue; }
};
class ShenandoahRendezvousClosure : public HandshakeClosure { public: inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {} inlinevoid do_thread(Thread* thread) {}
};
class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { private:
ShenandoahMarkingContext* const _ctx; public:
ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
void heap_region_do(ShenandoahHeapRegion* r) { if (r->is_active()) { // Reset live data and set TAMS optimistically. We would recheck these under the pause // anyway to capture any updates that happened since now.
r->clear_live_data();
_ctx->capture_top_at_mark_start(r);
}
}
void heap_region_do(ShenandoahHeapRegion* r) { if (r->is_active()) { // All allocations past TAMS are implicitly live, adjust the region data. // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
HeapWord *tams = _ctx->top_at_mark_start(r);
HeapWord *top = r->top(); if (top > tams) {
r->increase_live_data_alloc_words(pointer_delta(top, tams));
}
// We are about to select the collection set, make sure it knows about // current pinning status. Also, this allows trashing more regions that // now have their pinning status dropped. if (r->is_pinned()) { if (r->pin_count() == 0) {
ShenandoahHeapLocker locker(_lock);
r->make_unpinned();
}
} else { if (r->pin_count() > 0) {
ShenandoahHeapLocker locker(_lock);
r->make_pinned();
}
}
// Remember limit for updating refs. It's guaranteed that we get no // from-space-refs written from here on.
r->set_update_watermark_at_safepoint(r->top());
} else {
assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
assert(_ctx->top_at_mark_start(r) == r->top(), "Region " SIZE_FORMAT " should have correct TAMS", r->index());
}
}
void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
// Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to // make them parsable for update code to work correctly. Plus, we can compute new sizes // for future GCLABs here. if (UseTLAB) {
ShenandoahGCPhase phase(concurrent ?
ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
gclabs_retire(ResizeTLAB);
}
bool ShenandoahHeap::try_cancel_gc() { while (true) {
jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE); if (prev == CANCELLABLE) returntrue; elseif (prev == CANCELLED) returnfalse;
assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
Thread* thread = Thread::current(); if (thread->is_Java_thread()) { // We need to provide a safepoint here, otherwise we might // spin forever if a SP is pending.
ThreadBlockInVM sp(JavaThread::cast(thread));
SpinPause();
}
}
}
void ShenandoahHeap::stop() { // The shutdown sequence should be able to terminate when GC is running.
// Step 0. Notify policy to disable event recording.
_shenandoah_policy->record_shutdown();
// Step 1. Notify control thread that we are in shutdown. // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
control_thread()->prepare_for_graceful_shutdown();
// Step 2. Notify GC workers that we are cancelling GC.
cancel_gc(GCCause::_shenandoah_stop_vm);
// Weak roots are either pre-evacuated (final mark) or updated (final updaterefs), // so they should not have forwarded oops. // However, we do need to "null" dead oops in the roots, if can not be done // in concurrent cycles. void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
uint num_workers = _workers->active_workers();
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.