/* * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// TODO: we need to define a naming convention for perf counters // to distinguish counters for: // - standard JSR174 use // - Hotspot extension (public and committed) // - Hotspot extension (private/internal and uncommitted)
// These counters are for java.lang.management API support. // They are created even if -XX:-UsePerfData is set and in // that case, they will be allocated on C heap.
void ThreadService::reset_peak_thread_count() { // Acquire the lock to update the peak thread count // to synchronize with thread addition and removal.
MutexLocker mu(Threads_lock);
_peak_threads_count->set_value(get_live_thread_count());
}
staticbool is_hidden_thread(JavaThread *thread) { // hide VM internal or JVMTI agent threads return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread();
}
// Do not count hidden threads if (is_hidden_thread(thread)) { return;
}
assert(!thread->is_terminated(), "must not be terminated"); if (!thread->is_exiting()) { // We did not get here via JavaThread::exit() so current_thread_exiting() // was not called, e.g., JavaThread::cleanup_failed_attach_current_thread().
decrement_thread_counts(thread, daemon);
}
int daemon_count = _atomic_daemon_threads_count; int count = _atomic_threads_count;
// Counts are incremented at the same time, but atomic counts are // decremented earlier than perf counts.
assert(_live_threads_count->get_value() > count, "thread count mismatch %d : %d",
(int)_live_threads_count->get_value(), count);
// Counts are incremented at the same time, but atomic counts are // decremented earlier than perf counts.
assert(_daemon_threads_count->get_value() >= daemon_count, "thread count mismatch %d : %d",
(int)_daemon_threads_count->get_value(), daemon_count);
assert(_live_threads_count->get_value() >= count, "thread count mismatch %d : %d",
(int)_live_threads_count->get_value(), count);
assert(_live_threads_count->get_value() > 0 ||
(_live_threads_count->get_value() == 0 && count == 0 &&
_daemon_threads_count->get_value() == 0 && daemon_count == 0), "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d",
(int)_live_threads_count->get_value(), count,
(int)_daemon_threads_count->get_value(), daemon_count);
assert(_daemon_threads_count->get_value() > 0 ||
(_daemon_threads_count->get_value() == 0 && daemon_count == 0), "thread counts should reach 0 at the same time, daemon %d,%d",
(int)_daemon_threads_count->get_value(), daemon_count);
}
void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) { // Do not count hidden threads if (is_hidden_thread(jt)) { return;
}
assert(jt == JavaThread::current(), "Called by current thread");
assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting");
decrement_thread_counts(jt, daemon);
}
// FIXME: JVMTI should call this function
Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
assert(thread != NULL, "should be non-NULL");
debug_only(Thread::check_for_dangling_thread_pointer(thread);)
// This function can be called on a target JavaThread that is not // the caller and we are not at a safepoint. So it is possible for // the waiting or pending condition to be over/stale and for the // first stage of async deflation to clear the object field in // the ObjectMonitor. It is also possible for the object to be // inflated again and to be associated with a completely different // ObjectMonitor by the time this object reference is processed // by the caller.
ObjectMonitor *wait_obj = thread->current_waiting_monitor();
oop obj = NULL; if (wait_obj != NULL) { // thread is doing an Object.wait() call
obj = wait_obj->object();
} else {
ObjectMonitor *enter_obj = thread->current_pending_monitor(); if (enter_obj != NULL) { // thread is trying to enter() an ObjectMonitor.
obj = enter_obj->object();
}
}
ThreadDumpResult* prev = NULL; bool found = false; for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) { if (d == dump) { if (prev == NULL) {
_threaddump_list = dump->next();
} else {
prev->set_next(dump->next());
}
found = true; break;
}
}
assert(found, "The threaddump result to be removed must exist.");
}
// Dump stack trace of threads specified in the given threads array. // Returns StackTraceElement[][] each element is the stack trace of a thread in // the corresponding entry in the given threads array
Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads, int num_threads,
TRAPS) {
assert(num_threads > 0, "just checking");
// Allocate the resulting StackTraceElement[][] object
ResourceMark rm(THREAD);
Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
ObjArrayKlass* ik = ObjArrayKlass::cast(k);
objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
objArrayHandle result_obj(THREAD, r);
int num_snapshots = dump_result.num_snapshots();
assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot"); int i = 0; for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
ThreadStackTrace* stacktrace = ts->get_stack_trace(); if (stacktrace == NULL) { // No stack trace
result_obj->obj_at_put(i, NULL);
} else { // Construct an array of java/lang/StackTraceElement object
Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
result_obj->obj_at_put(i, backtrace_h());
}
}
return result_obj;
}
void ThreadService::reset_contention_count_stat(JavaThread* thread) {
ThreadStatistics* stat = thread->get_thread_stat(); if (stat != NULL) {
stat->reset_count_stat();
}
}
void ThreadService::reset_contention_time_stat(JavaThread* thread) {
ThreadStatistics* stat = thread->get_thread_stat(); if (stat != NULL) {
stat->reset_time_stat();
}
}
bool ThreadService::is_virtual_or_carrier_thread(JavaThread* jt) {
oop threadObj = jt->threadObj(); if (threadObj != NULL && threadObj->is_a(vmClasses::BasicVirtualThread_klass())) { // a virtual thread backed by JavaThread returntrue;
} if (jt->is_vthread_mounted()) { // carrier thread returntrue;
} returnfalse;
}
// Find deadlocks involving raw monitors, object monitors and concurrent locks // if concurrent_locks is true. // We skip virtual thread carriers under the assumption that the current scheduler, ForkJoinPool, // doesn't hold any locks while mounting a virtual thread, so any owned monitor (or j.u.c., lock for that matter) // on that JavaThread must be owned by the virtual thread, and we don't support deadlock detection for virtual threads.
DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, boolconcurrent_locks) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
// This code was modified from the original Threads::find_deadlocks code. int globalDfn = 0, thisDfn;
ObjectMonitor* waitingToLockMonitor = NULL;
JvmtiRawMonitor* waitingToLockRawMonitor = NULL;
oop waitingToLockBlocker = NULL; bool blocked_on_monitor = false;
JavaThread *currentThread, *previousThread; int num_deadlocks = 0;
// Initialize the depth-first-number for each JavaThread.
JavaThreadIterator jti(t_list); for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { if (!is_virtual_or_carrier_thread(jt)) {
jt->set_depth_first_number(-1);
}
}
DeadlockCycle* deadlocks = NULL;
DeadlockCycle* last = NULL;
DeadlockCycle* cycle = new DeadlockCycle(); for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) { if (is_virtual_or_carrier_thread(jt)) { // skip virtual and carrier threads continue;
} if (jt->depth_first_number() >= 0) { // this thread was already visited continue;
}
// The ObjectMonitor* can't be async deflated since we are at a safepoint. // When there is a deadlock, all the monitors involved in the dependency // cycle must be contended and heavyweight. So we only care about the // heavyweight monitor a thread is waiting to lock.
waitingToLockMonitor = jt->current_pending_monitor(); // JVM TI raw monitors can also be involved in deadlocks, and we can be // waiting to lock both a raw monitor and ObjectMonitor at the same time. // It isn't clear how to make deadlock detection work correctly if that // happens.
waitingToLockRawMonitor = jt->current_pending_raw_monitor();
if (concurrent_locks) {
waitingToLockBlocker = jt->current_park_blocker();
}
while (waitingToLockMonitor != NULL ||
waitingToLockRawMonitor != NULL ||
waitingToLockBlocker != NULL) {
cycle->add_thread(currentThread); // Give preference to the raw monitor if (waitingToLockRawMonitor != NULL) {
Thread* owner = waitingToLockRawMonitor->owner(); if (owner != NULL && // the raw monitor could be released at any time
owner->is_Java_thread()) {
currentThread = JavaThread::cast(owner);
}
} elseif (waitingToLockMonitor != NULL) { if (waitingToLockMonitor->has_owner()) {
currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor); if (currentThread == NULL) { // This function is called at a safepoint so the JavaThread // that owns waitingToLockMonitor should be findable, but // if it is not findable, then the previous currentThread is // blocked permanently. We record this as a deadlock.
num_deadlocks++;
// add this cycle to the deadlocks list if (deadlocks == NULL) {
deadlocks = cycle;
} else {
last->set_next(cycle);
}
last = cycle;
cycle = new DeadlockCycle(); break;
}
}
} else { if (concurrent_locks) { if (waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); // This JavaThread (if there is one) is protected by the // ThreadsListSetter in VM_FindDeadlocks::doit().
currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL;
} else {
currentThread = NULL;
}
}
}
if (currentThread == NULL || is_virtual_or_carrier_thread(currentThread)) { // No dependency on another thread break;
} if (currentThread->depth_first_number() < 0) { // First visit to this thread
currentThread->set_depth_first_number(globalDfn++);
} elseif (currentThread->depth_first_number() < thisDfn) { // Thread already visited, and not on a (new) cycle break;
} elseif (currentThread == previousThread) { // Self-loop, ignore break;
} else { // We have a (new) cycle
num_deadlocks++;
// add this cycle to the deadlocks list if (deadlocks == NULL) {
deadlocks = cycle;
} else {
last->set_next(cycle);
}
last = cycle;
cycle = new DeadlockCycle(); break;
}
previousThread = currentThread;
waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor(); if (concurrent_locks) {
waitingToLockBlocker = currentThread->current_park_blocker();
}
}
// Create a new ThreadDumpResult object and append to the list. // If GC happens before this function returns, Method* // in the stack trace will be visited.
ThreadService::add_thread_dump(this);
}
ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() { // Create a new ThreadDumpResult object and append to the list. // If GC happens before this function returns, oops // will be visited.
ThreadService::add_thread_dump(this);
}
// free all the ThreadSnapshot objects created during // the VM_ThreadDump operation
ThreadSnapshot* ts = _snapshots; while (ts != NULL) {
ThreadSnapshot* p = ts;
ts = ts->next(); delete p;
}
}
ThreadStackTrace::~ThreadStackTrace() { for (int i = 0; i < _frames->length(); i++) { delete _frames->at(i);
} delete _frames; if (_jni_locked_monitors != NULL) { for (int i = 0; i < _jni_locked_monitors->length(); i++) {
_jni_locked_monitors->at(i).release(_thread_service_storage);
} delete _jni_locked_monitors;
}
}
void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth, ObjectMonitorsHashtable* table, bool full) {
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
if (_thread->has_last_Java_frame()) {
RegisterMap reg_map(_thread,
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
// If full, we want to print both vthread and carrier frames
vframe* start_vf = !full && _thread->is_vthread_mounted()
? _thread->carrier_last_java_vframe(®_map)
: _thread->last_java_vframe(®_map); int count = 0; for (vframe* f = start_vf; f; f = f->sender() ) { if (maxDepth >= 0 && count == maxDepth) { // Skip frames if more than maxDepth break;
} if (!full && f->is_vthread_entry()) { break;
} if (f->is_java_frame()) {
javaVFrame* jvf = javaVFrame::cast(f);
add_stack_frame(jvf);
count++;
} else { // Ignore non-Java frames
}
}
}
if (_with_locked_monitors) { // Iterate inflated monitors and find monitors locked by this thread // that are not found in the stack, e.g. JNI locked monitors:
InflatedMonitorsClosure imc(this); if (table != nullptr) { // Get the ObjectMonitors locked by the target thread, if any, // and does not include any where owner is set to a stack lock // address in the target thread:
ObjectMonitorsHashtable::PtrList* list = table->get_entry(_thread); if (list != nullptr) {
ObjectSynchronizer::monitors_iterate(&imc, list, _thread);
}
} else {
ObjectSynchronizer::monitors_iterate(&imc, _thread);
}
}
}
bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
bool found = false; int num_frames = get_stack_depth(); for (int depth = 0; depth < num_frames; depth++) {
StackFrameInfo* frame = stack_frame_at(depth); int len = frame->num_locked_monitors();
GrowableArray<OopHandle>* locked_monitors = frame->locked_monitors(); for (int j = 0; j < len; j++) {
oop monitor = locked_monitors->at(j).resolve();
assert(monitor != NULL, "must be a Java object"); if (monitor == object) {
found = true; break;
}
}
} return found;
}
Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
InstanceKlass* ik = vmClasses::StackTraceElement_klass();
assert(ik != NULL, "must be loaded in 1.4+");
void ThreadStackTrace::metadata_do(void f(Metadata*)) { int length = _frames->length(); for (int i = 0; i < length; i++) {
_frames->at(i)->metadata_do(f);
}
}
ConcurrentLocksDump::~ConcurrentLocksDump() { if (_retain_map_on_free) { return;
}
for (ThreadConcurrentLocks* t = _map; t != NULL;) {
ThreadConcurrentLocks* tcl = t;
t = t->next(); delete tcl;
}
}
void ConcurrentLocksDump::dump_at_safepoint() { // dump all locked concurrent locks
assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
GrowableArray<oop>* aos_objects = new (mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
// Find all instances of AbstractOwnableSynchronizer
HeapInspection::find_instances_at_safepoint(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
aos_objects); // Build a map of thread to its owned AQS locks
build_map(aos_objects);
delete aos_objects;
}
// build a map of JavaThread to all its owned AbstractOwnableSynchronizer void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) { int length = aos_objects->length(); for (int i = 0; i < length; i++) {
oop o = aos_objects->at(i);
oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o); if (owner_thread_obj != NULL) { // See comments in ThreadConcurrentLocks to see how this // JavaThread* is protected.
JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
assert(o->is_instance(), "Must be an instanceOop");
add_lock(thread, (instanceOop) o);
}
}
}
// First owned lock found for this thread
tcl = new ThreadConcurrentLocks(thread);
tcl->add_lock(o); if (_map == NULL) {
_map = tcl;
} else {
_last->set_next(tcl);
}
_last = tcl;
}
// If thread is still attaching then threadObj will be NULL.
_thread_status = threadObj == NULL ? JavaThreadStatus::NEW
: java_lang_Thread::get_thread_status(threadObj);
if (obj() == NULL) { // monitor no longer exists; thread is not blocked
_thread_status = JavaThreadStatus::RUNNABLE;
} else {
blocker_object = obj();
JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj); if ((owner == NULL && _thread_status == JavaThreadStatus::BLOCKED_ON_MONITOR_ENTER)
|| (owner != NULL && owner->is_attaching_via_jni())) { // ownership information of the monitor is not available // (may no longer be owned or releasing to some other thread) // make this thread in RUNNABLE state. // And when the owner thread is in attaching state, the java thread // is not completely initialized. For example thread name and id // and may not be set, so hide the attaching thread.
_thread_status = JavaThreadStatus::RUNNABLE;
blocker_object = NULL;
} elseif (owner != NULL) {
blocker_object_owner = owner->threadObj();
}
}
} elseif (_thread_status == JavaThreadStatus::PARKED || _thread_status == JavaThreadStatus::PARKED_TIMED) {
blocker_object = thread->current_park_blocker(); if (blocker_object != NULL && blocker_object->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(blocker_object);
}
}
JavaThread* currentThread;
JvmtiRawMonitor* waitingToLockRawMonitor;
oop waitingToLockBlocker; int len = _threads->length(); for (int i = 0; i < len; i++) {
currentThread = _threads->at(i); // The ObjectMonitor* can't be async deflated since we are at a safepoint.
ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor();
waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
waitingToLockBlocker = currentThread->current_park_blocker();
st->cr();
st->print_cr("\"%s\":", currentThread->name()); constchar* owner_desc = ",\n which is held by";
// Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor // sets the current pending monitor, it is possible to then see a pending raw monitor as well. if (waitingToLockRawMonitor != NULL) {
st->print(" waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
Thread* owner = waitingToLockRawMonitor->owner(); // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread if (owner != NULL) { if (owner->is_Java_thread()) {
currentThread = JavaThread::cast(owner);
st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
} else {
st->print_cr(",\n which has now been released");
}
} else {
st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
}
}
if (waitingToLockMonitor != NULL) {
st->print(" waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
oop obj = waitingToLockMonitor->object();
st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
obj->klass()->external_name());
if (!currentThread->current_pending_monitor_is_from_java()) {
owner_desc = "\n in JNI, which is held by";
}
currentThread = Threads::owning_thread_from_monitor(t_list, waitingToLockMonitor); if (currentThread == NULL) { // The deadlock was detected at a safepoint so the JavaThread // that owns waitingToLockMonitor should be findable, but // if it is not findable, then the previous currentThread is // blocked permanently.
st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
p2i(waitingToLockMonitor->owner())); continue;
}
} else {
st->print(" waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
p2i(waitingToLockBlocker),
waitingToLockBlocker->klass()->external_name());
assert(waitingToLockBlocker->is_a(vmClasses::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()), "Must be an AbstractOwnableSynchronizer");
oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
currentThread = java_lang_Thread::thread(ownerObj);
assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL");
}
st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
}
int init_size = ThreadService::get_live_thread_count();
_threads_array = new GrowableArray<instanceHandle>(init_size);
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { // skips JavaThreads in the process of exiting // and also skips VM internal JavaThreads // Threads in _thread_new or _thread_new_trans state are included. // i.e. threads have been started but not yet running. if (jt->threadObj() == NULL ||
jt->is_exiting() ||
!java_lang_Thread::is_alive(jt->threadObj()) ||
jt->is_hidden_from_external_view()) { continue;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.