/* * Copyright (c) 2017, 2022, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const { // Return max allocatable TLAB size, and let allocation path figure out // the actual allocation size. Note: result should be in bytes. return _max_tlab_size * HeapWordSize;
}
HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
HeapWord* res = NULL; while (true) { // Try to allocate, assume space is available
res = _space->par_allocate(size); if (res != NULL) { break;
}
// Allocation failed, attempt expansion, and retry:
{
MutexLocker ml(Heap_lock);
// Try to allocate under the lock, assume another thread was able to expand
res = _space->par_allocate(size); if (res != NULL) { break;
}
// Expand and loop back if space is available
size_t space_left = max_capacity() - capacity();
size_t want_space = MAX2(size, EpsilonMinHeapExpand);
if (want_space < space_left) { // Enough space to expand in bulk: bool expand = _virtual_space.expand_by(want_space);
assert(expand, "Should be able to expand");
} elseif (size < space_left) { // No space to expand in bulk, and this allocation is still possible, // take all the remaining space: bool expand = _virtual_space.expand_by(space_left);
assert(expand, "Should be able to expand");
} else { // No space left: return NULL;
}
// Allocation successful, update counters if (verbose) {
size_t last = _last_counter_update; if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
_monitoring_support->update_counters();
}
}
// ...and print the occupancy line, if needed if (verbose) {
size_t last = _last_heap_print; if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
print_heap_info(used);
print_metaspace_info();
}
}
assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res)); return res;
}
// Defaults in case elastic paths are not taken bool fits = true;
size_t size = requested_size;
size_t ergo_tlab = requested_size;
int64_t time = 0;
if (EpsilonElasticTLAB) {
ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
if (EpsilonElasticTLABDecay) {
int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
time = (int64_t) os::javaTimeNanos();
assert(last_time <= time, "time should be monotonic");
// If the thread had not allocated recently, retract the ergonomic size. // This conserves memory when the thread had initial burst of allocations, // and then started allocating only sporadically. if (last_time != 0 && (time - last_time > _decay_time_ns)) {
ergo_tlab = 0;
EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
}
}
// If we can fit the allocation under current TLAB size, do so. // Otherwise, we want to elastically increase the TLAB size.
fits = (requested_size <= ergo_tlab); if (!fits) {
size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
}
}
// All prepared, let's do it!
HeapWord* res = allocate_work(size);
if (res != NULL) { // Allocation successful
*actual_size = size; if (EpsilonElasticTLABDecay) {
EpsilonThreadLocalData::set_last_tlab_time(thread, time);
} if (EpsilonElasticTLAB && !fits) { // If we requested expansion, this is our new ergonomic TLAB size
EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
}
} else { // Allocation failed, reset ergonomics to try and fit smaller TLABs if (EpsilonElasticTLAB) {
EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
}
}
HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) { // Cannot use verbose=true because Metaspace is not initialized return allocate_work(size, /* verbose = */false);
}
void EpsilonHeap::collect(GCCause::Cause cause) { switch (cause) { case GCCause::_metadata_GC_threshold: case GCCause::_metadata_GC_clear_soft_refs: // Receiving these causes means the VM itself entered the safepoint for metadata collection. // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would // re-enter the safepoint again very soon.
assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
MetaspaceGC::compute_new_size();
print_metaspace_info(); break; default:
log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
}
_monitoring_support->update_counters();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.