/* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
/* * In normal concurrent cycle, we have to pace the application to let GC finish. * * Here, we do not know how large would be the collection set, and what are the * relative performances of the each stage in the concurrent cycle, and so we have to * make some assumptions. * * For concurrent mark, there is no clear notion of progress. The moderately accurate * and easy to get metric is the amount of live objects the mark had encountered. But, * that does directly correlate with the used heap, because the heap might be fully * dead or fully alive. We cannot assume either of the extremes: we would either allow * application to run out of memory if we assume heap is fully dead but it is not, and, * conversely, we would pacify application excessively if we assume heap is fully alive * but it is not. So we need to guesstimate the particular expected value for heap liveness. * The best way to do this is apparently recording the past history. * * For concurrent evac and update-refs, we are walking the heap per-region, and so the * notion of progress is clear: we get reported the "used" size from the processed regions * and use the global heap-used as the baseline. * * The allocatable space when GC is running is "free" at the start of phase, but the * accounted budget is based on "used". So, we need to adjust the tax knowing that.
*/
void ShenandoahPacer::setup_for_mark() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
size_t live = update_and_get_progress_history();
size_t free = _heap->free_set()->available();
double tax = 1.0 * live / taxable; // base tax for available free space
tax *= 1; // mark can succeed with immediate garbage, claim all available space
tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
double tax = 1.0 * used / taxable; // base tax for available free space
tax *= 2; // evac is followed by update-refs, claim 1/2 of remaining free
tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase
tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
double tax = 1.0 * used / taxable; // base tax for available free space
tax *= 1; // update-refs is the last phase, claim the remaining free
tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase
tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
/* * In idle phase, we have to pace the application to let control thread react with GC start. * * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges * it had seen recent allocations. It will naturally pace the allocations if control thread is * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget * for applications to allocate at.
*/
void ShenandoahPacer::setup_for_idle() {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
void ShenandoahPacer::pace_for_alloc(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
// Fast path: try to allocate right away bool claimed = claim_for_alloc(words, false); if (claimed) { return;
}
// Forcefully claim the budget: it may go negative at this point, and // GC should replenish for this and subsequent allocations. After this claim, // we would wait a bit until our claim is matched by additional progress, // or the time budget depletes.
claimed = claim_for_alloc(words, true);
assert(claimed, "Should always succeed");
// Threads that are attaching should not block at all: they are not // fully initialized yet. Blocking them would be awkward. // This is probably the path that allocates the thread oop itself. // // Thread which is not an active Java thread should also not block. // This can happen during VM init when main thread is still not an // active Java thread.
JavaThread* current = JavaThread::current(); if (current->is_attaching_via_jni() ||
!current->is_active_Java_thread()) { return;
}
while (true) { // We could instead assist GC, but this would suffice for now.
size_t cur_ms = (max_ms > total_ms) ? (max_ms - total_ms) : 1;
wait(cur_ms);
if (total_ms > max_ms || Atomic::load(&_budget) >= 0) { // Exiting if either: // a) Spent local time budget to wait for enough GC progress. // Breaking out and allocating anyway, which may mean we outpace GC, // and start Degenerated GC cycle. // b) The budget had been replenished, which means our claim is satisfied.
ShenandoahThreadLocalData::add_paced_time(JavaThread::current(), end - start); break;
}
}
}
void ShenandoahPacer::wait(size_t time_ms) { // Perform timed wait. It works like like sleep(), except without modifying // the thread interruptible status. MonitorLocker also checks for safepoints.
assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify");
assert(time_ms <= LONG_MAX, "Sanity");
MonitorLocker locker(_wait_monitor);
_wait_monitor->wait((long)time_ms);
}
size_t threads_total = 0;
size_t threads_nz = 0; double sum = 0; for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { double d = ShenandoahThreadLocalData::paced_time(t); if (d > 0) {
threads_nz++;
sum += d;
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): %s",
d * 1000, total * 1000, d/total*100, t->name());
}
threads_total++;
ShenandoahThreadLocalData::reset_paced_time(t);
}
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): ",
sum * 1000, total * 1000, sum/total*100);
if (threads_total > 0) {
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): ",
sum / threads_total * 1000, total * 1000, sum / threads_total / total * 100);
} if (threads_nz > 0) {
out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): ",
sum / threads_nz * 1000, total * 1000, sum / threads_nz / total * 100);
}
out->cr();
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.2 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.