/* * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// perform initializations that must occur before any JVMTI environments // are released but which should only be initialized once (no matter // how many environments are created). void
JvmtiEnvBase::globally_initialize() {
assert(Threads::number_of_threads() == 0 || JvmtiThreadState_lock->is_locked(), "sanity check");
assert(_globally_initialized == false, "bad call");
JvmtiManageCapabilities::initialize();
// register extension functions and events
JvmtiExtensions::register_extensions();
// Add this environment to the end of the environment list (order is important)
{ // This block of code must not contain any safepoints, as list deallocation // (which occurs at a safepoint) cannot occur simultaneously with this list // addition. Note: NoSafepointVerifier cannot, currently, be used before // threads exist.
JvmtiEnvIterator it;
JvmtiEnvBase *previous_env = NULL; for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) {
previous_env = env;
} if (previous_env == NULL) {
_head_environment = this;
} else {
previous_env->set_next_environment(this);
}
}
if (_globally_initialized == false) {
globally_initialize();
}
}
jvmtiPhase
JvmtiEnvBase::phase() { // For the JVMTI environments possessed the can_generate_early_vmstart: // replace JVMTI_PHASE_PRIMORDIAL with JVMTI_PHASE_START if (_phase == JVMTI_PHASE_PRIMORDIAL &&
JvmtiExport::early_vmstart_recorded() &&
early_vmstart_env()) { return JVMTI_PHASE_START;
} return _phase; // Normal case
}
bool
JvmtiEnvBase::is_valid() {
jint value = 0;
// This object might not be a JvmtiEnvBase so we can't assume // the _magic field is properly aligned. Get the value in a safe // way and then check against JVMTI_MAGIC.
switch (sizeof(_magic)) { case 2:
value = Bytes::get_native_u2((address)&_magic); break;
case 4:
value = Bytes::get_native_u4((address)&_magic); break;
case 8:
value = Bytes::get_native_u8((address)&_magic); break;
default:
guarantee(false, "_magic field is an unexpected size");
}
return value == JVMTI_MAGIC;
}
bool
JvmtiEnvBase::use_version_1_0_semantics() { int major, minor, micro;
JvmtiExport::decode_version_values(_version, &major, &minor, µ); return major == 1 && minor == 0; // micro version doesn't matter here
}
bool
JvmtiEnvBase::use_version_1_1_semantics() { int major, minor, micro;
JvmtiExport::decode_version_values(_version, &major, &minor, µ); return major == 1 && minor == 1; // micro version doesn't matter here
}
bool
JvmtiEnvBase::use_version_1_2_semantics() { int major, minor, micro;
JvmtiExport::decode_version_values(_version, &major, &minor, µ); return major == 1 && minor == 2; // micro version doesn't matter here
}
// Moot since ClassFileLoadHook not yet enabled. // But "true" will give a more predictable ClassFileLoadHook behavior // for environment creation during ClassFileLoadHook.
_is_retransformable = true;
// all callbacks initially NULL
memset(&_event_callbacks,0,sizeof(jvmtiEventCallbacks));
// all capabilities initially off
memset(&_current_capabilities, 0, sizeof(_current_capabilities));
// all prohibited capabilities initially off
memset(&_prohibited_capabilities, 0, sizeof(_prohibited_capabilities));
// Dispose of event info and let the event controller call us back // in a locked state (env_dispose, below)
JvmtiEventController::env_dispose(this);
}
// We have been entered with all events disabled on this environment. // A race to re-enable events (by setting callbacks) is prevented by // checking for a valid environment when setting callbacks (while // holding the JvmtiThreadState_lock).
// Same situation as with events (see above)
set_native_method_prefixes(0, NULL);
JvmtiTagMap* tag_map_to_clear = tag_map_acquire(); // A tag map can be big, clear it now to save memory until // the destructor runs. if (tag_map_to_clear != NULL) {
tag_map_to_clear->clear();
}
// There is a small window of time during which the tag map of a // disposed environment could have been reallocated. // Make sure it is gone.
JvmtiTagMap* tag_map_to_deallocate = _tag_map;
set_tag_map(NULL); // A tag map can be big, deallocate it now if (tag_map_to_deallocate != NULL) { delete tag_map_to_deallocate;
}
// JvmtiEnvBase reference is saved in JvmtiEnvThreadState. So // clean up JvmtiThreadState before deleting JvmtiEnv pointer.
JvmtiThreadState::periodic_clean_up();
// Unlink all invalid environments from the list of environments // and deallocate them
JvmtiEnvIterator it;
JvmtiEnvBase* previous_env = NULL;
JvmtiEnvBase* env = it.first(); while (env != NULL) { if (env->is_valid()) {
previous_env = env;
env = it.next(env);
} else { // This one isn't valid, remove it from the list and deallocate it
JvmtiEnvBase* defunct_env = env;
env = it.next(env); if (previous_env == NULL) {
_head_environment = env;
} else {
previous_env->set_next_environment(env);
} delete defunct_env;
}
}
if (_needs_clean_up) { // Check if we are currently iterating environment, // deallocation should not occur if we are
ThreadInsideIterationClosure tiic;
Threads::threads_do(&tiic); if (!tiic.is_inside_jvmti_env_iteration() &&
!is_inside_dying_thread_env_iteration()) {
_needs_clean_up = false;
JvmtiEnvBase::periodic_clean_up();
}
}
}
int old_prefix_count = get_native_method_prefix_count(); char **old_prefixes = get_native_method_prefixes();
// allocate and install the new prefixex if (prefix_count == 0 || !is_valid()) {
_native_method_prefix_count = 0;
_native_method_prefixes = NULL;
} else { // there are prefixes, allocate an array to hold them, and fill it char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*), mtInternal); if (new_prefixes == NULL) { return JVMTI_ERROR_OUT_OF_MEMORY;
} for (int i = 0; i < prefix_count; i++) { char* prefix = prefixes[i]; if (prefix == NULL) { for (int j = 0; j < (i-1); j++) {
os::free(new_prefixes[j]);
}
os::free(new_prefixes); return JVMTI_ERROR_NULL_POINTER;
}
prefix = os::strdup(prefixes[i]); if (prefix == NULL) { for (int j = 0; j < (i-1); j++) {
os::free(new_prefixes[j]);
}
os::free(new_prefixes); return JVMTI_ERROR_OUT_OF_MEMORY;
}
new_prefixes[i] = prefix;
}
_native_method_prefix_count = prefix_count;
_native_method_prefixes = new_prefixes;
}
// now that we know the new prefixes have been successfully installed we can // safely remove the old ones if (old_prefix_count != 0) { for (int i = 0; i < old_prefix_count; i++) {
os::free(old_prefixes[i]);
}
os::free(old_prefixes);
}
return JVMTI_ERROR_NONE;
}
// Collect all the prefixes which have been set in any JVM TI environments // by the SetNativeMethodPrefix(es) functions. Be sure to maintain the // order of environments and the order of prefixes within each environment. // Return in a resource allocated array. char**
JvmtiEnvBase::get_all_native_method_prefixes(int* count_ptr) {
assert(Threads::number_of_threads() == 0 ||
SafepointSynchronize::is_at_safepoint() ||
JvmtiThreadState_lock->is_locked(), "sanity check");
int total_count = 0;
GrowableArray<char*>* prefix_array =new GrowableArray<char*>(5);
JvmtiEnvIterator it; for (JvmtiEnvBase* env = it.first(); env != NULL; env = it.next(env)) { int prefix_count = env->get_native_method_prefix_count(); char** prefixes = env->get_native_method_prefixes(); for (int j = 0; j < prefix_count; j++) { // retrieve a prefix and so that it is safe against asynchronous changes // copy it into the resource area char* prefix = prefixes[j]; char* prefix_copy = NEW_RESOURCE_ARRAY(char, strlen(prefix)+1);
strcpy(prefix_copy, prefix);
prefix_array->at_put_grow(total_count++, prefix_copy);
}
}
char** all_prefixes = NEW_RESOURCE_ARRAY(char*, total_count); char** p = all_prefixes; for (int i = 0; i < total_count; ++i) {
*p++ = prefix_array->at(i);
}
*count_ptr = total_count; return all_prefixes;
}
// clear in either case to be sure we got any gap between sizes
memset(&_event_callbacks, 0, byte_cnt);
// Now that JvmtiThreadState_lock is held, prevent a possible race condition where events // are re-enabled by a call to set event callbacks where the DisposeEnvironment // occurs after the boiler-plate environment check and before the lock is acquired. if (callbacks != NULL && is_valid()) { if (size_of_callbacks < (jint)byte_cnt) {
byte_cnt = size_of_callbacks;
}
memcpy(&_event_callbacks, callbacks, byte_cnt);
}
}
// In the fullness of time, all users of the method should instead // directly use allocate, besides being cleaner and faster, this will // mean much better out of memory handling unsignedchar *
JvmtiEnvBase::jvmtiMalloc(jlong size) { unsignedchar* mem = NULL;
jvmtiError result = allocate(size, &mem);
assert(result == JVMTI_ERROR_NONE, "Allocate failed"); return mem;
}
for (int i = 0; i < length; i++) {
objArray[i] = (jthreadGroup)JNIHandles::make_local(groups->obj_at(i));
} return objArray;
}
// Return the vframe on the specified thread and depth, NULL if no such frame. // The thread and the oops in the returned vframe might not have been processed.
javaVFrame*
JvmtiEnvBase::jvf_for_thread_and_depth(JavaThread* java_thread, jint depth) { if (!java_thread->has_last_Java_frame()) { return NULL;
}
RegisterMap reg_map(java_thread,
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::skip,
RegisterMap::WalkContinuation::include);
javaVFrame *jvf = java_thread->last_java_vframe(®_map);
bool
JvmtiEnvBase::get_field_descriptor(Klass* k, jfieldID field, fieldDescriptor* fd) { if (!jfieldIDWorkaround::is_valid_jfieldID(k, field)) { returnfalse;
} bool found = false; if (jfieldIDWorkaround::is_static_jfieldID(field)) {
JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field);
found = id->find_local_field(fd);
} else { // Non-static field. The fieldID is really the offset of the field within the object. int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field);
found = InstanceKlass::cast(k)->find_field_from_offset(offset, false, fd);
} return found;
}
// This could be a different thread to the current one. So we need to ensure that // processing has started before we are allowed to read the continuation oop of // another thread, as it is a direct root of that other thread.
StackWatermarkSet::start_processing(java_thread, StackWatermarkKind::gc);
JavaThread* java_thread = get_JavaThread_or_null(vthread); if (java_thread != NULL) { if (!java_thread->has_last_Java_frame()) { // TBD: This is a temporary work around to avoid a guarantee caused by // the native enterSpecial frame on the top. No frames will be found // by the JVMTI functions such as GetStackTrace. return NULL;
}
vframeStream vfs(java_thread);
jvf = vfs.at_end() ? NULL : vfs.asJavaVFrame();
jvf = check_and_skip_hidden_frames(java_thread, jvf);
} else {
vframeStream vfs(cont);
jvf = vfs.at_end() ? NULL : vfs.asJavaVFrame();
jvf = check_and_skip_hidden_frames(vthread, jvf);
} return jvf;
}
// Return correct javaVFrame for a carrier (non-virtual) thread. // It strips vthread frames at the top if there are any.
javaVFrame*
JvmtiEnvBase::get_cthread_last_java_vframe(JavaThread* jt, RegisterMap* reg_map_p) { // Strip vthread frames in case of carrier thread with mounted continuation. bool cthread_with_cont = JvmtiEnvBase::is_cthread_with_continuation(jt);
javaVFrame *jvf = cthread_with_cont ? jt->carrier_last_java_vframe(reg_map_p)
: jt->last_java_vframe(reg_map_p); // Skip hidden frames only for carrier threads // which are in non-temporary VTMS transition. if (jt->is_in_VTMS_transition()) {
jvf = check_and_skip_hidden_frames(jt, jvf);
} return jvf;
}
jint
JvmtiEnvBase::get_thread_state(oop thread_oop, JavaThread* jt) {
jint state = 0;
if (thread_oop != NULL) { // Get most state bits.
state = (jint)java_lang_Thread::get_thread_status(thread_oop);
} if (jt != NULL) { // We have a JavaThread* so add more state bits.
JavaThreadState jts = jt->thread_state();
if (jt->is_carrier_thread_suspended() ||
((jt->jvmti_vthread() == NULL || jt->jvmti_vthread() == thread_oop) && jt->is_suspended())) { // Suspended non-virtual thread.
state |= JVMTI_THREAD_STATE_SUSPENDED;
} if (jts == _thread_in_native) {
state |= JVMTI_THREAD_STATE_IN_NATIVE;
} if (jt->is_interrupted(false)) {
state |= JVMTI_THREAD_STATE_INTERRUPTED;
}
} return state;
}
if (java_thread != NULL) { // If virtual thread is blocked on a monitor enter the BLOCKED_ON_MONITOR_ENTER bit // is set for carrier thread instead of virtual. // Other state bits except filtered ones are expected to be the same.
oop ct_oop = java_lang_VirtualThread::carrier_thread(thread_oop);
jint filtered_bits = JVMTI_THREAD_STATE_SUSPENDED | JVMTI_THREAD_STATE_INTERRUPTED;
// This call can trigger a safepoint, so thread_oop must not be used after it.
state = get_thread_state(ct_oop, java_thread) & ~filtered_bits;
} else {
jshort vt_state = java_lang_VirtualThread::state(thread_oop);
state = (jint)java_lang_VirtualThread::map_state_to_thread_status(vt_state);
} if (ext_suspended && ((state & JVMTI_THREAD_STATE_ALIVE) != 0)) {
state &= ~java_lang_VirtualThread::RUNNING;
state |= JVMTI_THREAD_STATE_ALIVE | JVMTI_THREAD_STATE_RUNNABLE | JVMTI_THREAD_STATE_SUSPENDED;
} if (interrupted) {
state |= JVMTI_THREAD_STATE_INTERRUPTED;
} return state;
}
// // Count the number of objects for a lightweight monitor. The hobj // parameter is object that owns the monitor so this routine will // count the number of times the same object was locked by frames // in java_thread. //
jint
JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) {
jint ret = 0; if (!java_thread->has_last_Java_frame()) { return ret; // no Java frames so no monitors
}
for (javaVFrame *jvf = java_thread->last_java_vframe(®_map); jvf != NULL;
jvf = jvf->java_sender()) {
GrowableArray<MonitorInfo*>* mons = jvf->monitors(); if (!mons->is_empty()) { for (int i = 0; i < mons->length(); i++) {
MonitorInfo *mi = mons->at(i); if (mi->owner_is_scalar_replaced()) continue;
// see if owner of the monitor is our object if (mi->owner() != NULL && mi->owner() == hobj()) {
ret++;
}
}
}
} return ret;
}
jvmtiError
JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThread *java_thread,
jobject *monitor_ptr, bool is_virtual) {
Thread *current_thread = Thread::current();
assert(java_thread->is_handshake_safe_for(current_thread), "call by myself or at handshake"); if (!is_virtual && JvmtiEnvBase::is_cthread_with_continuation(java_thread)) { // Carrier thread with a mounted continuation case. // No contended monitor can be owned by carrier thread in this case.
*monitor_ptr = nullptr; return JVMTI_ERROR_NONE;
}
oop obj = NULL; // The ObjectMonitor* can't be async deflated since we are either // at a safepoint or the calling thread is operating on itself so // it cannot leave the underlying wait()/enter() call.
ObjectMonitor *mon = java_thread->current_waiting_monitor(); if (mon == NULL) { // thread is not doing an Object.wait() call
mon = java_thread->current_pending_monitor(); if (mon != NULL) { // The thread is trying to enter() an ObjectMonitor.
obj = mon->object();
assert(obj != NULL, "ObjectMonitor should have a valid object!");
} // implied else: no contended ObjectMonitor
} else { // thread is doing an Object.wait() call
obj = mon->object();
assert(obj != NULL, "Object.wait() should have an object");
}
jvmtiError
JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_thread,
GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list) { // Note: // calling_thread is the thread that requested the list of monitors for java_thread. // java_thread is the thread owning the monitors. // current_thread is the thread executing this code, can be a non-JavaThread (e.g. VM Thread). // And they all may be different threads.
jvmtiError err = JVMTI_ERROR_NONE;
Thread *current_thread = Thread::current();
assert(java_thread->is_handshake_safe_for(current_thread), "call by myself or at handshake");
if (JvmtiEnvBase::is_cthread_with_continuation(java_thread)) { // Carrier thread with a mounted continuation case. // No contended monitor can be owned by carrier thread in this case. return JVMTI_ERROR_NONE;
} if (java_thread->has_last_Java_frame()) {
ResourceMark rm(current_thread);
HandleMark hm(current_thread);
RegisterMap reg_map(java_thread,
RegisterMap::UpdateMap::include,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
int depth = 0; for (javaVFrame *jvf = get_cthread_last_java_vframe(java_thread, ®_map);
jvf != NULL; jvf = jvf->java_sender()) { if (MaxJavaStackTraceDepth == 0 || depth++ < MaxJavaStackTraceDepth) { // check for stack too deep // add locked objects for this frame into list
err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth-1); if (err != JVMTI_ERROR_NONE) { return err;
}
}
}
}
// Get off stack monitors. (e.g. acquired via jni MonitorEnter).
JvmtiMonitorClosure jmc(calling_thread, owned_monitors_list, this);
ObjectSynchronizer::monitors_iterate(&jmc, java_thread);
err = jmc.error();
return err;
}
jvmtiError
JvmtiEnvBase::get_owned_monitors(JavaThread* calling_thread, JavaThread* java_thread, javaVFrame* jvf,
GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list) {
jvmtiError err = JVMTI_ERROR_NONE;
Thread *current_thread = Thread::current();
assert(java_thread->is_handshake_safe_for(current_thread), "call by myself or at handshake");
int depth = 0; for ( ; jvf != NULL; jvf = jvf->java_sender()) { if (MaxJavaStackTraceDepth == 0 || depth++ < MaxJavaStackTraceDepth) { // check for stack too deep // Add locked objects for this frame into list.
err = get_locked_objects_in_frame(calling_thread, java_thread, jvf, owned_monitors_list, depth - 1); if (err != JVMTI_ERROR_NONE) { return err;
}
}
}
// Get off stack monitors. (e.g. acquired via jni MonitorEnter).
JvmtiMonitorClosure jmc(calling_thread, owned_monitors_list, this);
ObjectSynchronizer::monitors_iterate(&jmc, java_thread);
err = jmc.error();
return err;
}
// Save JNI local handles for any objects that this frame owns.
jvmtiError
JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread,
javaVFrame *jvf, GrowableArray<jvmtiMonitorStackDepthInfo*>* owned_monitors_list, jint stack_depth) {
jvmtiError err = JVMTI_ERROR_NONE;
Thread* current_thread = Thread::current();
ResourceMark rm(current_thread);
HandleMark hm(current_thread);
GrowableArray<MonitorInfo*>* mons = jvf->monitors(); if (mons->is_empty()) { return err; // this javaVFrame holds no monitors
}
oop wait_obj = NULL;
{ // The ObjectMonitor* can't be async deflated since we are either // at a safepoint or the calling thread is operating on itself so // it cannot leave the underlying wait() call. // Save object of current wait() call (if any) for later comparison.
ObjectMonitor *mon = java_thread->current_waiting_monitor(); if (mon != NULL) {
wait_obj = mon->object();
}
}
oop pending_obj = NULL;
{ // The ObjectMonitor* can't be async deflated since we are either // at a safepoint or the calling thread is operating on itself so // it cannot leave the underlying enter() call. // Save object of current enter() call (if any) for later comparison.
ObjectMonitor *mon = java_thread->current_pending_monitor(); if (mon != NULL) {
pending_obj = mon->object();
}
}
for (int i = 0; i < mons->length(); i++) {
MonitorInfo *mi = mons->at(i);
if (mi->owner_is_scalar_replaced()) continue;
oop obj = mi->owner(); if (obj == NULL) { // this monitor doesn't have an owning object so skip it continue;
}
if (wait_obj == obj) { // the thread is waiting on this monitor so it isn't really owned continue;
}
if (pending_obj == obj) { // the thread is pending on this monitor so it isn't really owned continue;
}
if (owned_monitors_list->length() > 0) { // Our list has at least one object on it so we have to check // for recursive object locking bool found = false; for (int j = 0; j < owned_monitors_list->length(); j++) {
jobject jobj = ((jvmtiMonitorStackDepthInfo*)owned_monitors_list->at(j))->monitor;
oop check = JNIHandles::resolve(jobj); if (check == obj) {
found = true; // we found the object break;
}
}
if (found) { // already have this object so don't include it continue;
}
}
// add the owning object to our list
jvmtiMonitorStackDepthInfo *jmsdi;
err = allocate(sizeof(jvmtiMonitorStackDepthInfo), (unsignedchar **)&jmsdi); if (err != JVMTI_ERROR_NONE) { return err;
}
Handle hobj(Thread::current(), obj);
jmsdi->monitor = jni_reference(calling_thread, hobj);
jmsdi->stack_depth = stack_depth;
owned_monitors_list->append(jmsdi);
}
if (start_depth != 0) { if (start_depth > 0) { for (int j = 0; j < start_depth && jvf != NULL; j++) {
jvf = jvf->java_sender();
} if (jvf == NULL) { // start_depth is deeper than the stack depth. return JVMTI_ERROR_ILLEGAL_ARGUMENT;
}
} else { // start_depth < 0 // We are referencing the starting depth based on the oldest // part of the stack. // Optimize to limit the number of times that java_sender() is called.
javaVFrame *jvf_cursor = jvf;
javaVFrame *jvf_prev = NULL;
javaVFrame *jvf_prev_prev = NULL; int j = 0; while (jvf_cursor != NULL) {
jvf_prev_prev = jvf_prev;
jvf_prev = jvf_cursor; for (j = 0; j > start_depth && jvf_cursor != NULL; j--) {
jvf_cursor = jvf_cursor->java_sender();
}
} if (j == start_depth) { // Previous pointer is exactly where we want to start.
jvf = jvf_prev;
} else { // We need to back up further to get to the right place. if (jvf_prev_prev == NULL) { // The -start_depth is greater than the stack depth. return JVMTI_ERROR_ILLEGAL_ARGUMENT;
} // j is now the number of frames on the stack starting with // jvf_prev, we start from jvf_prev_prev and move older on // the stack that many, and the result is -start_depth frames // remaining.
jvf = jvf_prev_prev; for (; j < 0; j++) {
jvf = jvf->java_sender();
}
}
}
} for (; count < max_count && jvf != NULL; count++) {
frame_buffer[count].method = jvf->method()->jmethod_id();
frame_buffer[count].location = (jvf->method()->is_native() ? -1 : jvf->bci());
jvf = jvf->java_sender();
}
*count_ptr = count; return JVMTI_ERROR_NONE;
}
jvmtiError
JvmtiEnvBase::get_stack_trace(JavaThread *java_thread,
jint start_depth, jint max_count,
jvmtiFrameInfo* frame_buffer, jint* count_ptr) {
Thread *current_thread = Thread::current();
assert(SafepointSynchronize::is_at_safepoint() ||
java_thread->is_handshake_safe_for(current_thread), "call by myself / at safepoint / at handshake"); int count = 0;
jvmtiError err = JVMTI_ERROR_NONE;
if (thread == NULL) {
java_thread = cur_thread;
thread_oop = get_vthread_or_thread_oop(java_thread); if (thread_oop == NULL || !thread_oop->is_a(vmClasses::Thread_klass())) { return JVMTI_ERROR_INVALID_THREAD;
}
} else {
jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(t_list, thread, &java_thread, &thread_oop); if (err != JVMTI_ERROR_NONE) { // We got an error code so we don't have a JavaThread*, but only return // an error from here if we didn't get a valid thread_oop. In a vthread case // the cv_external_thread_to_JavaThread is expected to correctly set the // thread_oop and return JVMTI_ERROR_INVALID_THREAD which we ignore here. if (thread_oop == NULL || err != JVMTI_ERROR_INVALID_THREAD) {
*thread_oop_p = thread_oop; return err;
}
} if (java_thread == NULL && java_lang_VirtualThread::is_instance(thread_oop)) {
java_thread = get_JavaThread_or_null(thread_oop);
}
}
*jt_pp = java_thread;
*thread_oop_p = thread_oop; if (java_lang_VirtualThread::is_instance(thread_oop) &&
!JvmtiEnvBase::is_vthread_alive(thread_oop)) { return JVMTI_ERROR_THREAD_NOT_ALIVE;
} return JVMTI_ERROR_NONE;
}
uint32_t debug_bits = 0; // first derive the object's owner and entry_count (if any)
owning_thread = ObjectSynchronizer::get_lock_owner(tlh.list(), hobj); if (owning_thread != NULL) {
Handle th(current_thread, get_vthread_or_thread_oop(owning_thread));
ret.owner = (jthread)jni_reference(calling_thread, th);
// The recursions field of a monitor does not reflect recursions // as lightweight locks before inflating the monitor are not included. // We have to count the number of recursive monitor entries the hard way. // We pass a handle to survive any GCs along the way.
ret.entry_count = count_locked_objects(owning_thread, hobj);
} // implied else: entry_count == 0
jint nWant = 0, nWait = 0;
markWord mark = hobj->mark(); if (mark.has_monitor()) {
mon = mark.monitor();
assert(mon != NULL, "must have monitor"); // this object has a heavyweight monitor
nWant = mon->contentions(); // # of threads contending for monitor
nWait = mon->waiters(); // # of threads in Object.wait()
ret.waiter_count = nWant + nWait;
ret.notify_waiter_count = nWait;
} else { // this object has a lightweight monitor
ret.waiter_count = 0;
ret.notify_waiter_count = 0;
}
// now derive the rest of the fields if (mon != NULL) { // this object has a heavyweight monitor
// Number of waiters may actually be less than the waiter count. // So NULL out memory so that unused memory will be NULL.
memset(ret.waiters, 0, ret.waiter_count * sizeof(jthread *));
memset(ret.notify_waiters, 0, ret.notify_waiter_count * sizeof(jthread *));
if (ret.waiter_count > 0) { // we have contending and/or waiting threads if (nWant > 0) { // we have contending threads
ResourceMark rm(current_thread); // get_pending_threads returns only java thread so we do not need to // check for non java threads.
GrowableArray<JavaThread*>* wantList = Threads::get_pending_threads(tlh.list(), nWant, (address)mon); if (wantList->length() < nWant) { // robustness: the pending list has gotten smaller
nWant = wantList->length();
} for (int i = 0; i < nWant; i++) {
JavaThread *pending_thread = wantList->at(i);
Handle th(current_thread, get_vthread_or_thread_oop(pending_thread));
ret.waiters[i] = (jthread)jni_reference(calling_thread, th);
}
} if (nWait > 0) { // we have threads in Object.wait() int offset = nWant; // add after any contending threads
ObjectWaiter *waiter = mon->first_waiter(); for (int i = 0, j = 0; i < nWait; i++) { if (waiter == NULL) { // robustness: the waiting list has gotten smaller
nWait = j; break;
}
JavaThread *w = mon->thread_of_waiter(waiter); if (w != NULL) { // If the thread was found on the ObjectWaiter list, then // it has not been notified. This thread can't change the // state of the monitor so it doesn't need to be suspended.
Handle th(current_thread, get_vthread_or_thread_oop(w));
ret.waiters[offset + j] = (jthread)jni_reference(calling_thread, th);
ret.notify_waiters[j++] = (jthread)jni_reference(calling_thread, th);
}
waiter = mon->next_waiter(waiter);
}
}
} // ThreadsListHandle is destroyed here.
// Adjust count. nWant and nWait count values may be less than original.
ret.waiter_count = nWant + nWait;
ret.notify_waiter_count = nWait;
} else { // this object has a lightweight monitor and we have nothing more // to do here because the defaults are just fine.
}
// we don't update return parameter unless everything worked
*info_ptr = ret;
if (is_virtual) { if (single_suspend) { if (JvmtiVTSuspender::is_vthread_suspended(thread_h())) { return JVMTI_ERROR_THREAD_SUSPENDED;
}
JvmtiVTSuspender::register_vthread_suspend(thread_h()); // Check if virtual thread is mounted and there is a java_thread. // A non-NULL java_thread is always passed in the !single_suspend case.
oop carrier_thread = java_lang_VirtualThread::carrier_thread(thread_h());
java_thread = carrier_thread == NULL ? NULL : java_lang_Thread::thread(carrier_thread);
} // The java_thread can be still blocked in VTMS transition after a previous JVMTI resume call. // There is no need to suspend the java_thread in this case. After vthread unblocking, // it will check for ext_suspend request and suspend itself if necessary. if (java_thread == NULL || java_thread->is_suspended()) { // We are done if the virtual thread is unmounted or // the java_thread is externally suspended. return JVMTI_ERROR_NONE;
} // The virtual thread is mounted: suspend the java_thread.
} // Don't allow hidden thread suspend request. if (java_thread->is_hidden_from_external_view()) { return JVMTI_ERROR_NONE;
} bool is_passive_cthread = is_passive_carrier_thread(java_thread, thread_h());
// A case of non-virtual thread. if (!is_virtual) { // Thread.suspend() is used in some tests. It sets jt->is_suspended() only. if (java_thread->is_carrier_thread_suspended() ||
(!is_passive_cthread && java_thread->is_suspended())) { return JVMTI_ERROR_THREAD_SUSPENDED;
}
java_thread->set_carrier_thread_suspended();
}
assert(!java_thread->is_in_VTMS_transition(), "sanity check");
// An attempt to handshake-suspend a passive carrier thread will result in // suspension of mounted virtual thread. So, we just mark it as suspended // and it will be actually suspended at virtual thread unmount transition. if (!is_passive_cthread) {
assert(single_suspend || is_virtual, "SuspendAllVirtualThreads should never suspend non-virtual threads"); // Case of mounted virtual or attached carrier thread. if (!JvmtiSuspendControl::suspend(java_thread)) { // Thread is already suspended or in process of exiting. if (java_thread->is_exiting()) { // The thread was in the process of exiting. return JVMTI_ERROR_THREAD_NOT_ALIVE;
} return JVMTI_ERROR_THREAD_SUSPENDED;
}
} return JVMTI_ERROR_NONE;
}
if (is_virtual) { if (single_resume) { if (!JvmtiVTSuspender::is_vthread_suspended(thread_h())) { return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
JvmtiVTSuspender::register_vthread_resume(thread_h()); // Check if virtual thread is mounted and there is a java_thread. // A non-NULL java_thread is always passed in the !single_resume case.
oop carrier_thread = java_lang_VirtualThread::carrier_thread(thread_h());
java_thread = carrier_thread == NULL ? NULL : java_lang_Thread::thread(carrier_thread);
} // The java_thread can be still blocked in VTMS transition after a previous JVMTI suspend call. // There is no need to resume the java_thread in this case. After vthread unblocking, // it will check for is_vthread_suspended request and remain resumed if necessary. if (java_thread == NULL || !java_thread->is_suspended()) { // We are done if the virtual thread is unmounted or // the java_thread is not externally suspended. return JVMTI_ERROR_NONE;
} // The virtual thread is mounted and java_thread is supended: resume the java_thread.
} // Don't allow hidden thread resume request. if (java_thread->is_hidden_from_external_view()) { return JVMTI_ERROR_NONE;
} bool is_passive_cthread = is_passive_carrier_thread(java_thread, thread_h());
// A case of a non-virtual thread. if (!is_virtual) { if (!java_thread->is_carrier_thread_suspended() &&
(is_passive_cthread || !java_thread->is_suspended())) { return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
java_thread->clear_carrier_thread_suspended();
}
assert(!java_thread->is_in_VTMS_transition(), "sanity check");
if (!is_passive_cthread) {
assert(single_resume || is_virtual, "ResumeAllVirtualThreads should never resume non-virtual threads"); if (java_thread->is_suspended()) { if (!JvmtiSuspendControl::resume(java_thread)) { return JVMTI_ERROR_THREAD_NOT_SUSPENDED;
}
}
} return JVMTI_ERROR_NONE;
}
// Create a jvmtiStackInfo inside a linked list node and create a // buffer for the frame information, both allocated as resource objects. // Fill in both the jvmtiStackInfo and the jvmtiFrameInfo. // Note that either or both of thr and thread_oop // may be null if the thread is new or has exited. void
MultipleStackTracesCollector::fill_frames(jthread jt, JavaThread *thr, oop thread_oop) { #ifdef ASSERT
Thread *current_thread = Thread::current();
assert(SafepointSynchronize::is_at_safepoint() ||
thr == NULL ||
thr->is_handshake_safe_for(current_thread), "unmounted virtual thread / call by myself / at safepoint / at handshake"); #endif
// Based on the stack information in the linked list, allocate memory // block to return and fill it from the info in the linked list. void
MultipleStackTracesCollector::allocate_and_fill_stacks(jint thread_count) { // do I need to worry about alignment issues?
jlong alloc_size = thread_count * sizeof(jvmtiStackInfo)
+ _frame_count_total * sizeof(jvmtiFrameInfo);
env()->allocate(alloc_size, (unsignedchar **)&_stack_info);
// pointers to move through the newly allocated space as it is filled in
jvmtiStackInfo *si = _stack_info + thread_count; // bottom of stack info
jvmtiFrameInfo *fi = (jvmtiFrameInfo *)si; // is the top of frame info
// copy information in resource area into allocated buffer // insert stack info backwards since linked list is backwards // insert frame info forwards // walk the StackInfoNodes for (struct StackInfoNode *sin = head(); sin != NULL; sin = sin->next) {
jint frame_count = sin->info.frame_count;
size_t frames_size = frame_count * sizeof(jvmtiFrameInfo);
--si;
memcpy(si, &(sin->info), sizeof(jvmtiStackInfo)); if (frames_size == 0) {
si->frame_buffer = NULL;
} else {
memcpy(fi, sin->info.frame_buffer, frames_size);
si->frame_buffer = fi; // point to the new allocated copy of the frames
fi += frame_count;
}
}
assert(si == _stack_info, "the last copied stack info must be the first record");
assert((unsignedchar *)fi == ((unsignedchar *)_stack_info) + alloc_size, "the last copied frame info must be the last record");
}
void
VM_GetThreadListStackTraces::doit() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
ResourceMark rm;
ThreadsListHandle tlh; for (int i = 0; i < _thread_count; ++i) {
jthread jt = _thread_list[i];
JavaThread* java_thread = NULL;
oop thread_oop = NULL;
jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), jt, &java_thread, &thread_oop); if (err != JVMTI_ERROR_NONE) { // We got an error code so we don't have a JavaThread *, but // only return an error from here if we didn't get a valid // thread_oop. // In the virtual thread case the cv_external_thread_to_JavaThread is expected to correctly set // the thread_oop and return JVMTI_ERROR_INVALID_THREAD which we ignore here. if (thread_oop == NULL) {
_collector.set_result(err); return;
} // We have a valid thread_oop.
}
_collector.fill_frames(jt, java_thread, thread_oop);
}
_collector.allocate_and_fill_stacks(_thread_count);
}
void
VM_GetAllStackTraces::doit() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
ResourceMark rm;
_final_thread_count = 0; for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
oop thread_oop = jt->threadObj(); if (thread_oop != NULL &&
!jt->is_exiting() &&
java_lang_Thread::is_alive(thread_oop) &&
!jt->is_hidden_from_external_view()) {
++_final_thread_count; // Handle block of the calling thread is used to create local refs.
_collector.fill_frames((jthread)JNIHandles::make_local(_calling_thread, thread_oop),
jt, thread_oop);
}
}
_collector.allocate_and_fill_stacks(_final_thread_count);
}
// Verifies that the top frame is a java frame in an expected state. // Deoptimizes frame if needed. // Checks that the frame method signature matches the return type (tos). // HandleMark must be defined in the caller only. // It is to keep a ret_ob_h handle alive after return to the caller.
jvmtiError
JvmtiEnvBase::check_top_frame(Thread* current_thread, JavaThread* java_thread,
jvalue value, TosState tos, Handle* ret_ob_h) {
ResourceMark rm(current_thread);
if (jvf->method()->is_native()) { return JVMTI_ERROR_OPAQUE_FRAME;
}
// If the frame is a compiled one, need to deoptimize it. if (jvf->is_compiled_frame()) { if (!jvf->fr().can_be_deoptimized()) { return JVMTI_ERROR_OPAQUE_FRAME;
}
Deoptimization::deoptimize_frame(java_thread, jvf->fr().id());
}
// Get information about method return type
Symbol* signature = jvf->method()->signature();
if (!VM_GetOrSetLocal::is_assignable(ty_sign, ob_k, current_thread)) { return JVMTI_ERROR_TYPE_MISMATCH;
}
*ret_ob_h = ob_h;
} return JVMTI_ERROR_NONE;
} /* end check_top_frame */
// ForceEarlyReturn<type> follows the PopFrame approach in many aspects. // Main difference is on the last stage in the interpreter. // The PopFrame stops method execution to continue execution // from the same method call instruction. // The ForceEarlyReturn forces return from method so the execution // continues at the bytecode following the method call.
// thread - NOT protected by ThreadsListHandle and NOT pre-checked
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.