/* * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset) :
PtrQueue(qset), // SATB queues are only active during marking cycles. We create them // with their active field set to false. If a thread is created // during a cycle, it's SATB queue needs to be activated before the // thread starts running. This is handled by the collector-specific // BarrierSet thread attachment protocol.
_active(false)
{ }
// _count_and_process_flag has flag in least significant bit, count in // remaining bits. _process_completed_buffers_threshold is scaled // accordingly, with the lsbit set, so a _count_and_process_flag value // is directly comparable with the recorded threshold value. The // process flag is set whenever the count exceeds the threshold, and // remains set until the count is reduced to zero.
// Increment count. If count > threshold, set flag, else maintain flag. staticvoid increment_count(volatile size_t* cfptr, size_t threshold) {
size_t old;
size_t value = Atomic::load(cfptr); do {
old = value;
value += 2;
assert(value > old, "overflow"); if (value > threshold) value |= 1;
value = Atomic::cmpxchg(cfptr, old, value);
} while (value != old);
}
// Decrement count. If count == 0, clear flag, else maintain flag. staticvoid decrement_count(volatile size_t* cfptr) {
size_t old;
size_t value = Atomic::load(cfptr); do {
assert((value >> 1) != 0, "underflow");
old = value;
value -= 2; if (value <= 1) value = 0;
value = Atomic::cmpxchg(cfptr, old, value);
} while (value != old);
}
void SATBMarkQueueSet::set_process_completed_buffers_threshold(size_t value) { // Scale requested threshold to align with count field. If scaling // overflows, just use max value. Set process flag field to make // comparison in increment_count exact.
size_t scaled_value = value << 1; if ((scaled_value >> 1) != value) {
scaled_value = SIZE_MAX;
}
_process_completed_buffers_threshold = scaled_value | 1;
}
void SATBMarkQueueSet::verify_active_states(bool expected_active) { // Verify queue set state if (is_active() != expected_active) {
dump_active_states(expected_active);
fatal("SATB queue set has an unexpected active state");
}
// Verify thread queue states class VerifyThreadStatesClosure : public ThreadClosure {
SATBMarkQueueSet* _qset; bool _expected_active; public:
VerifyThreadStatesClosure(SATBMarkQueueSet* qset, bool expected_active) :
_qset(qset), _expected_active(expected_active) {} virtualvoid do_thread(Thread* t) { if (_qset->satb_queue_for_thread(t).is_active() != _expected_active) {
_qset->dump_active_states(_expected_active);
fatal("Thread SATB queue has an unexpected active state");
}
}
} closure(this, expected_active);
Threads::threads_do(&closure);
} #endif// ASSERT
void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); #ifdef ASSERT
verify_active_states(expected_active); #endif// ASSERT // Update the global state, synchronized with threads list management.
{
MutexLocker ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag);
_all_active = active;
}
class SetThreadActiveClosure : public ThreadClosure {
SATBMarkQueueSet* _qset; bool _active; public:
SetThreadActiveClosure(SATBMarkQueueSet* qset, bool active) :
_qset(qset), _active(active) {} virtualvoid do_thread(Thread* t) {
SATBMarkQueue& queue = _qset->satb_queue_for_thread(t); if (queue.buffer() != nullptr) {
assert(!_active || queue.index() == _qset->buffer_size(), "queues should be empty when activated");
queue.set_index(_qset->buffer_size());
}
queue.set_active(_active);
}
} closure(this, active);
Threads::threads_do(&closure);
}
void SATBMarkQueueSet::flush_queue(SATBMarkQueue& queue) { // Filter now to possibly save work later. If filtering empties the // buffer then flush_queue can deallocate the buffer.
filter(queue);
PtrQueueSet::flush_queue(queue);
}
void SATBMarkQueueSet::handle_zero_index(SATBMarkQueue& queue) {
assert(queue.index() == 0, "precondition"); if (queue.buffer() == nullptr) {
install_new_buffer(queue);
} else {
filter(queue); if (should_enqueue_buffer(queue)) {
enqueue_completed_buffer(exchange_buffer_with_new(queue));
} // Else continue to use the existing buffer.
}
assert(queue.buffer() != nullptr, "post condition");
assert(queue.index() > 0, "post condition");
}
bool SATBMarkQueueSet::should_enqueue_buffer(SATBMarkQueue& queue) { // Keep the current buffer if filtered index >= threshold.
size_t threshold = buffer_enqueue_threshold(); // Ensure we'll enqueue completely full buffers.
assert(threshold > 0, "enqueue threshold = 0"); // Ensure we won't enqueue empty buffers.
assert(threshold <= buffer_size(), "enqueue threshold %zu exceeds capacity %zu",
threshold, buffer_size()); return queue.index() < threshold;
}
// SATB buffer life-cycle - Per-thread queues obtain buffers from the // qset's buffer allocator, fill them, and push them onto the qset's // list. The GC concurrently pops buffers from the qset, processes // them, and returns them to the buffer allocator for re-use. Both // the allocator and the qset use lock-free stacks. The ABA problem // is solved by having both allocation pops and GC pops performed // within GlobalCounter critical sections, while the return of buffers // to the allocator performs a GlobalCounter synchronize before // pushing onto the allocator's list.
void SATBMarkQueueSet::enqueue_completed_buffer(BufferNode* node) {
assert(node != NULL, "precondition"); // Increment count and update flag appropriately. Done before // pushing buffer so count is always at least the actual number in // the list, and decrement never underflows.
increment_count(&_count_and_process_flag, _process_completed_buffers_threshold);
_list.push(*node);
}
BufferNode* SATBMarkQueueSet::get_completed_buffer() {
BufferNode* node;
{
GlobalCounter::CriticalSection cs(Thread::current());
node = _list.pop();
} if (node != NULL) { // Got a buffer so decrement count and update flag appropriately.
decrement_count(&_count_and_process_flag);
} return node;
}
#ifndef PRODUCT // Helpful for debugging
#define SATB_PRINTER_BUFFER_SIZE 256
void SATBMarkQueueSet::print_all(constchar* msg) { char buffer[SATB_PRINTER_BUFFER_SIZE];
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
void SATBMarkQueueSet::abandon_partial_marking() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
abandon_completed_buffers();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.