/* * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) { for (uint i = 0; i < _max_num_threads; i++) { if (_threads[i] != NULL) {
tc->do_thread(_threads[i]);
}
}
}
void G1ConcurrentRefineThreadControl::stop() { for (uint i = 0; i < _max_num_threads; i++) { if (_threads[i] != NULL) {
_threads[i]->stop();
}
}
}
uint64_t G1ConcurrentRefine::adjust_threads_period_ms() const { // Instead of a fixed value, this could be a command line option. But then // we might also want to allow configuration of adjust_threads_wait_ms(). return 50;
}
static size_t minimum_pending_cards_target() { // One buffer per thread. return ParallelGCThreads * G1UpdateBufferSize;
}
update_pending_cards_target(logged_cards_time_ms,
processed_logged_cards,
predicted_thread_buffer_cards,
goal_ms); if (_thread_control.max_num_threads() == 0) { // If no refinement threads then the mutator threshold is the target.
_dcqs.set_mutator_refinement_threshold(_pending_cards_target);
} else { // Provisionally make the mutator threshold unlimited, to be updated by // the next periodic adjustment. Because card state may have changed // drastically, record that adjustment is needed and kick the primary // thread, in case it is waiting.
_dcqs.set_mutator_refinement_threshold(SIZE_MAX);
_needs_adjust = true; if (is_pending_cards_target_initialized()) {
_thread_control.activate(0);
}
}
}
// Wake up the primary thread less frequently when the time available until // the next GC is longer. But don't increase the wait time too rapidly. // This reduces the number of primary thread wakeups that just immediately // go back to waiting, while still being responsive to behavior changes. static uint64_t compute_adjust_wait_time_ms(double available_ms) { returnstatic_cast<uint64_t>(sqrt(available_ms) * 4.0);
}
uint64_t G1ConcurrentRefine::adjust_threads_wait_ms() const {
assert_current_thread_is_primary_refinement_thread(); if (is_pending_cards_target_initialized()) { double available_ms = _threads_needed.predicted_time_until_next_gc_ms();
uint64_t wait_time_ms = compute_adjust_wait_time_ms(available_ms); return MAX2(wait_time_ms, adjust_threads_period_ms());
} else { // If target not yet initialized then wait forever (until explicitly // activated). This happens during startup, when we don't bother with // refinement. return 0;
}
}
class G1ConcurrentRefine::RemSetSamplingClosure : public HeapRegionClosure {
G1CollectionSet* _cset;
size_t _sampled_rs_length;
// Adjust the target length (in regions) of the young gen, based on the the // current length of the remembered sets. // // At the end of the GC G1 determines the length of the young gen based on // how much time the next GC can take, and when the next GC may occur // according to the MMU. // // The assumption is that a significant part of the GC is spent on scanning // the remembered sets (and many other components), so this thread constantly // reevaluates the prediction for the remembered set scanning costs, and potentially // resizes the young gen. This may do a premature GC or even increase the young // gen size to keep pause time length goal. void G1ConcurrentRefine::adjust_young_list_target_length() { if (_policy->use_adaptive_young_list_length()) {
G1CollectionSet* cset = G1CollectedHeap::heap()->collection_set();
RemSetSamplingClosure cl{cset};
cset->iterate(&cl);
_policy->revise_young_list_target_length(cl.sampled_rs_length());
}
}
// Check whether it's time to do a periodic adjustment. if (!_needs_adjust) {
Tickspan since_adjust = Ticks::now() - _last_adjust; if (since_adjust.milliseconds() >= adjust_threads_period_ms()) {
_needs_adjust = true;
}
}
// If needed, try to adjust threads wanted. if (_needs_adjust) { // Getting used young bytes requires holding Heap_lock. But we can't use // normal lock and block until available. Blocking on the lock could // deadlock with a GC VMOp that is holding the lock and requesting a // safepoint. Instead try to lock, and if fail then skip adjustment for // this iteration of the thread, do some refinement work, and retry the // adjustment later. if (Heap_lock->try_lock()) {
size_t used_bytes = _policy->estimate_used_young_bytes_locked();
Heap_lock->unlock();
adjust_young_list_target_length();
size_t young_bytes = _policy->young_list_target_length() * HeapRegion::GrainBytes;
size_t available_bytes = young_bytes - MIN2(young_bytes, used_bytes);
adjust_threads_wanted(available_bytes);
_needs_adjust = false;
_last_adjust = Ticks::now(); returntrue;
}
}
_threads_needed.update(old_wanted,
available_bytes,
num_cards,
_pending_cards_target);
uint new_wanted = _threads_needed.threads_needed(); if (new_wanted > _thread_control.max_num_threads()) { // If running all the threads can't reach goal, turn on refinement by // mutator threads. Using target as the threshold may be stronger // than required, but will do the most to get us under goal, and we'll // reevaluate with the next adjustment.
mutator_threshold = _pending_cards_target;
new_wanted = _thread_control.max_num_threads();
} elseif (is_in_last_adjustment_period()) { // If very little time remains until GC, enable mutator refinement. If // the target has been reached, this keeps the number of pending cards on // target even if refinement threads deactivate in the meantime. And if // the target hasn't been reached, this prevents things from getting // worse.
mutator_threshold = _pending_cards_target;
}
Atomic::store(&_threads_wanted, new_wanted);
_dcqs.set_mutator_refinement_threshold(mutator_threshold);
log_debug(gc, refine)("Concurrent refinement: wanted %u, cards: %zu, " "predicted: %zu, time: %1.2fms",
new_wanted,
num_cards,
_threads_needed.predicted_cards_at_next_gc(),
_threads_needed.predicted_time_until_next_gc_ms()); // Activate newly wanted threads. The current thread is the primary // refinement thread, so is already active. for (uint i = MAX2(old_wanted, 1u); i < new_wanted; ++i) { if (!_thread_control.activate(i)) { // Failed to allocate and activate thread. Stop trying to activate, and // instead use mutator threads to make up the gap.
Atomic::store(&_threads_wanted, i);
_dcqs.set_mutator_refinement_threshold(_pending_cards_target); break;
}
}
}
void G1ConcurrentRefine::reduce_threads_wanted() {
assert_current_thread_is_primary_refinement_thread(); if (!_needs_adjust) { // Defer if adjustment request is active.
uint wanted = Atomic::load(&_threads_wanted); if (wanted > 0) {
Atomic::store(&_threads_wanted, --wanted);
} // If very little time remains until GC, enable mutator refinement. If // the target has been reached, this keeps the number of pending cards on // target even as refinement threads deactivate in the meantime. if (is_in_last_adjustment_period()) {
_dcqs.set_mutator_refinement_threshold(_pending_cards_target);
}
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.