/* * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
// Manages a scope for a JVMCI runtime call that attempts a heap allocation. // If there is a pending nonasync exception upon closing the scope and the runtime // call is of the variety where allocation failure returns NULL without an // exception, the following action is taken: // 1. The pending nonasync exception is cleared // 2. NULL is written to JavaThread::_vm_result // 3. Checks that an OutOfMemoryError is Universe::out_of_memory_error_retry(). class RetryableAllocationMark: public StackObj { private:
JavaThread* _thread; public:
RetryableAllocationMark(JavaThread* thread, bool activate) { if (activate) {
assert(!thread->in_retryable_allocation(), "retryable allocation scope is non-reentrant");
_thread = thread;
_thread->set_in_retryable_allocation(true);
} else {
_thread = NULL;
}
}
~RetryableAllocationMark() { if (_thread != NULL) {
_thread->set_in_retryable_allocation(false);
JavaThread* THREAD = _thread; // For exception macros. if (HAS_PENDING_EXCEPTION) {
oop ex = PENDING_EXCEPTION; // Do not clear probable async exceptions.
CLEAR_PENDING_NONASYNC_EXCEPTION;
oop retry_oome = Universe::out_of_memory_error_retry(); if (ex->is_a(retry_oome->klass()) && retry_oome != ex) {
ResourceMark rm;
fatal("Unexpected exception in scope of retryable allocation: " INTPTR_FORMAT " of type %s", p2i(ex), ex->klass()->external_name());
}
_thread->set_vm_result(NULL);
}
}
}
};
JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_instance_common(JavaThread* current, Klass* klass, bool null_on_fail))
JRT_BLOCK;
assert(klass->is_klass(), "not a class");
Handle holder(current, klass->klass_holder()); // keep the klass alive
InstanceKlass* h = InstanceKlass::cast(klass);
{
RetryableAllocationMark ram(current, null_on_fail);
h->check_valid_for_instantiation(true, CHECK);
oop obj; if (null_on_fail) { if (!h->is_initialized()) { // Cannot re-execute class initialization without side effects // so return without attempting the initialization return;
}
} else { // make sure klass is initialized
h->initialize(CHECK);
} // allocate instance and return via TLS
obj = h->allocate_instance(CHECK);
current->set_vm_result(obj);
}
JRT_BLOCK_END;
SharedRuntime::on_slowpath_allocation_exit(current);
JRT_END
JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array_common(JavaThread* current, Klass* array_klass, jint length, bool null_on_fail))
JRT_BLOCK; // Note: no handle for klass needed since they are not used // anymore after new_objArray() and no GC can happen before. // (This may have to change if this code changes!)
assert(array_klass->is_klass(), "not a class");
oop obj; if (array_klass->is_typeArray_klass()) {
BasicType elt_type = TypeArrayKlass::cast(array_klass)->element_type();
RetryableAllocationMark ram(current, null_on_fail);
obj = oopFactory::new_typeArray(elt_type, length, CHECK);
} else {
Handle holder(current, array_klass->klass_holder()); // keep the klass alive
Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
RetryableAllocationMark ram(current, null_on_fail);
obj = oopFactory::new_objArray(elem_klass, length, CHECK);
}
current->set_vm_result(obj); // This is pretty rare but this runtime patch is stressful to deoptimization // if we deoptimize here so force a deopt to stress the path. if (DeoptimizeALot) { staticint deopts = 0; // Alternate between deoptimizing and raising an error (which will also cause a deopt) if (deopts++ % 2 == 0) { if (null_on_fail) { return;
} else {
ResourceMark rm(current); THROW(vmSymbols::java_lang_OutOfMemoryError());
}
} else {
deopt_caller();
}
}
JRT_BLOCK_END;
SharedRuntime::on_slowpath_allocation_exit(current);
JRT_END
JRT_ENTRY(void, JVMCIRuntime::new_multi_array_common(JavaThread* current, Klass* klass, int rank, jint* dims, bool null_on_fail))
assert(klass->is_klass(), "not a class");
assert(rank >= 1, "rank must be nonzero");
Handle holder(current, klass->klass_holder()); // keep the klass alive
RetryableAllocationMark ram(current, null_on_fail);
oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
current->set_vm_result(obj);
JRT_END
// Create new instance (the receiver)
klass->check_valid_for_instantiation(false, CHECK);
if (null_on_fail) { if (!klass->is_initialized()) { // Cannot re-execute class initialization without side effects // so return without attempting the initialization return;
}
} else { // Make sure klass gets initialized
klass->initialize(CHECK);
}
// Enter this method from compiled code handler below. This is where we transition // to VM mode. This is done as a helper routine so that the method called directly // from compiled code does not have to transition to VM. This allows the entry // method to see if the nmethod that we have just looked up a handler for has // been deoptimized while we were in the vm. This simplifies the assembly code // cpu directories. // // We are entering here from exception stub (via the entry method below) // If there is a compiled exception handler in this method, we will continue there; // otherwise we will unwind the stack and continue at the caller of top frame method // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to // control the area where we can allow a safepoint. After we exit the safepoint area we can // check to see if the handler we are going to return is now in a nmethod that has // been deoptimized. If that is the case we return the deopt blob // unpack_with_exception entry instead. This makes life for the exception blob easier // because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, CompiledMethod*& cm)) // Reset method handle flag.
current->set_is_method_handle_return(false);
Handle exception(current, ex);
cm = CodeCache::find_compiled(pc);
assert(cm != NULL, "this is not a compiled method"); // Adjust the pc as needed/ if (cm->is_deopt_pc(pc)) {
RegisterMap map(current,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::include,
RegisterMap::WalkContinuation::skip);
frame exception_frame = current->last_frame().sender(&map); // if the frame isn't deopted then pc must not correspond to the caller of last_frame
assert(exception_frame.is_deoptimized_frame(), "must be deopted");
pc = exception_frame.pc();
}
assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
assert(oopDesc::is_oop(exception()), "just checking"); // Check that exception is a subclass of Throwable
assert(exception->is_a(vmClasses::Throwable_klass()), "Exception not subclass of Throwable");
// debugging support // tracing if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;
stringStream tempst;
assert(cm->method() != NULL, "Unexpected null method()");
tempst.print("JVMCI compiled method <%s>\n" " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
cm->method()->print_value_string(), p2i(pc), p2i(current));
Exceptions::log_exception(exception, tempst.as_string());
} // for AbortVMOnException flag
Exceptions::debug_check_abort(exception);
// Check the stack guard pages and re-enable them if necessary and there is // enough space on the stack to do so. Use fast exceptions only if the guard // pages are enabled. bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed();
if (JvmtiExport::can_post_on_exceptions()) { // To ensure correct notification of exception catches and throws // we have to deoptimize here. If we attempted to notify the // catches and throws during this exception lookup it's possible // we could deoptimize on the way out of the VM and end back in // the interpreter at the throw site. This would result in double // notifications since the interpreter would also notify about // these same catches and throws as it unwound the frame.
// We don't really want to deoptimize the nmethod itself since we // can actually continue in the exception handler ourselves but I // don't see an easy way to have the desired effect.
Deoptimization::deoptimize_frame(current, caller_frame.id(), Deoptimization::Reason_constraint);
assert(caller_is_deopted(), "Must be deoptimized");
// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions if (guard_pages_enabled) {
address fast_continuation = cm->handler_for_exception_and_pc(exception, pc); if (fast_continuation != NULL) { // Set flag if return address is a method handle call site.
current->set_is_method_handle_return(cm->is_method_handle_return(pc)); return fast_continuation;
}
}
// If the stack guard pages are enabled, check whether there is a handler in // the current method. Otherwise (guard pages disabled), force an unwind and // skip the exception cache update (i.e., just leave continuation==NULL).
address continuation = NULL; if (guard_pages_enabled) {
// New exception handling mechanism can support inlined methods // with exception handlers since the mappings are from PC to PC
// Clear out the exception oop and pc since looking up an // exception handler can cause class loading, which might throw an // exception and those fields are expected to be clear during // normal bytecode execution.
current->clear_exception_oop_and_pc();
bool recursive_exception = false;
continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false, recursive_exception); // If an exception was thrown during exception dispatch, the exception oop may have changed
current->set_exception_oop(exception());
current->set_exception_pc(pc);
// The exception cache is used only for non-implicit exceptions // Update the exception cache only when another exception did // occur during the computation of the compiled exception handler // (e.g., when loading the class of the catch type). // Checking for exception oop equality is not // sufficient because some exceptions are pre-allocated and reused. if (continuation != NULL && !recursive_exception && !SharedRuntime::deopt_blob()->contains(continuation)) {
cm->add_handler_for_exception_and_pc(exception, pc, continuation);
}
}
// Set flag if return address is a method handle call site.
current->set_is_method_handle_return(cm->is_method_handle_return(pc));
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;
log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,
p2i(current), p2i(continuation), p2i(pc));
}
return continuation;
JRT_END
// Enter this method from compiled code only if there is a Java exception handler // in the method handling the exception. // We are entering here from exception stub. We don't do a normal VM transition here. // We do it in a helper. This is so we can check to see if the nmethod we have just // searched for an exception handler has been deoptimized in the meantime.
address JVMCIRuntime::exception_handler_for_pc(JavaThread* current) {
oop exception = current->exception_oop();
address pc = current->exception_pc(); // Still in Java mode
DEBUG_ONLY(NoHandleMark nhm);
CompiledMethod* cm = NULL;
address continuation = NULL;
{ // Enter VM mode by calling the helper
ResetNoHandleMark rnhm;
continuation = exception_handler_for_pc_helper(current, exception, pc, cm);
} // Back in JAVA, use no oops DON'T safepoint
// Now check to see if the compiled method we were called from is now deoptimized. // If so we must return to the deopt blob and deoptimize the nmethod if (cm != NULL && caller_is_deopted()) {
continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
}
assert(continuation != NULL, "no handler found"); return continuation;
}
// Object.notify() fast path, caller does slow path
JRT_LEAF(jboolean, JVMCIRuntime::object_notify(JavaThread* current, oopDesc* obj))
assert(current == JavaThread::current(), "pre-condition");
// Very few notify/notifyAll operations find any threads on the waitset, so // the dominant fast-path is to simply return. // Relatedly, it's critical that notify/notifyAll be fast in order to // reduce lock hold times. if (!SafepointSynchronize::is_synchronizing()) { if (ObjectSynchronizer::quick_notify(obj, current, false)) { returntrue;
}
} returnfalse; // caller must perform slow path
JRT_END
// Object.notifyAll() fast path, caller does slow path
JRT_LEAF(jboolean, JVMCIRuntime::object_notifyAll(JavaThread* current, oopDesc* obj))
assert(current == JavaThread::current(), "pre-condition");
if (!SafepointSynchronize::is_synchronizing() ) { if (ObjectSynchronizer::quick_notify(obj, current, true)) { returntrue;
}
} returnfalse; // caller must perform slow path
methodHandle mh(current, method); if (mh->size_of_parameters() > 1 && !mh->is_static()) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Invoked method must be static and take at most one argument");
}
void JVMCINMethodData::set_nmethod_mirror(nmethod* nm, oop new_mirror) {
assert(_nmethod_mirror_index != -1, "cannot set JVMCI mirror for nmethod");
oop* addr = nm->oop_addr_at(_nmethod_mirror_index);
assert(new_mirror != NULL, "use clear_nmethod_mirror to clear the mirror");
assert(*addr == NULL, "cannot overwrite non-null mirror");
*addr = new_mirror;
// Since we've patched some oops in the nmethod, // (re)register it with the heap.
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
Universe::heap()->register_nmethod(nm);
}
// Update the values in the mirror if it still refers to nm. // We cannot use JVMCIObject to wrap the mirror as this is called // during GC, forbidding the creation of JNIHandles.
JVMCIEnv* jvmciEnv = NULL;
nmethod* current = (nmethod*) HotSpotJVMCI::InstalledCode::address(jvmciEnv, nmethod_mirror); if (nm == current) { if (nm->is_unloading()) { // Break the link from the mirror to nm such that // future invocations via the mirror will result in // an InvalidInstalledCodeException.
HotSpotJVMCI::InstalledCode::set_address(jvmciEnv, nmethod_mirror, 0);
HotSpotJVMCI::InstalledCode::set_entryPoint(jvmciEnv, nmethod_mirror, 0);
HotSpotJVMCI::HotSpotInstalledCode::set_codeStart(jvmciEnv, nmethod_mirror, 0);
} elseif (nm->is_not_entrant()) { // Zero the entry point so any new invocation will fail but keep // the address link around that so that existing activations can // be deoptimized via the mirror (i.e. JVMCIEnv::invalidate_installed_code).
HotSpotJVMCI::InstalledCode::set_entryPoint(jvmciEnv, nmethod_mirror, 0);
HotSpotJVMCI::HotSpotInstalledCode::set_codeStart(jvmciEnv, nmethod_mirror, 0);
}
}
if (_nmethod_mirror_index != -1 && nm->is_unloading()) { // Drop the reference to the nmethod mirror object but don't clear the actual oop reference. Otherwise // it would appear that the nmethod didn't need to be unloaded in the first place.
_nmethod_mirror_index = -1;
}
}
// Handles to objects in the Hotspot heap. static OopStorage* object_handles() { return Universe::vm_global();
}
jlong JVMCIRuntime::make_oop_handle(const Handle& obj) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
assert(oopDesc::is_oop(obj()), "not an oop");
int JVMCIRuntime::release_and_clear_oop_handles() {
guarantee(_num_attached_threads == cannot_be_attached, "only call during JVMCI runtime shutdown"); int released = release_cleared_oop_handles(); if (_oop_handles.length() != 0) { for (int i = 0; i < _oop_handles.length(); i++) {
oop* oop_ptr = _oop_handles.at(i);
guarantee(oop_ptr != nullptr, "release_cleared_oop_handles left null entry in _oop_handles");
guarantee(*oop_ptr != nullptr, "unexpected cleared handle"); // Satisfy OopHandles::release precondition that all // handles being released are null.
NativeAccess<>::oop_store(oop_ptr, (oop) NULL);
}
// Do the bulk release
object_handles()->release(_oop_handles.adr_at(0), _oop_handles.length());
released += _oop_handles.length();
}
_oop_handles.clear(); return released;
}
// Swaps the elements in `array` at index `a` and index `b` staticvoid swap(GrowableArray<oop*>* array, int a, int b) {
oop* tmp = array->at(a);
array->at_put(a, array->at(b));
array->at_put(b, tmp);
}
int JVMCIRuntime::release_cleared_oop_handles() { // Despite this lock, it's possible for another thread // to clear a handle's referent concurrently (e.g., a thread // executing IndirectHotSpotObjectConstantImpl.clear()). // This is benign - it means there can still be cleared // handles in _oop_handles when this method returns.
MutexLocker ml(_lock);
int next = 0; if (_oop_handles.length() != 0) { // Key for _oop_handles contents in example below: // H: handle with non-null referent // h: handle with clear (i.e., null) referent // -: null entry
// Shuffle all handles with non-null referents to the front of the list // Example: Before: 0HHh-Hh- // After: HHHh--h- for (int i = 0; i < _oop_handles.length(); i++) {
oop* handle = _oop_handles.at(i); if (is_referent_non_null(handle)) { if (i != next && !is_referent_non_null(_oop_handles.at(next))) { // Swap elements at index `next` and `i`
swap(&_oop_handles, next, i);
}
next++;
}
}
// `next` is now the index of the first null handle or handle with a null referent int num_alive = next;
// Shuffle all null handles to the end of the list // Example: Before: HHHh--h- // After: HHHhh--- // num_alive: 3 for (int i = next; i < _oop_handles.length(); i++) {
oop* handle = _oop_handles.at(i); if (handle != nullptr) { if (i != next && _oop_handles.at(next) == nullptr) { // Swap elements at index `next` and `i`
swap(&_oop_handles, next, i);
}
next++;
}
} int to_release = next - num_alive;
// `next` is now the index of the first null handle // Example: to_release: 2
// Bulk release the handles with a null referent
object_handles()->release(_oop_handles.adr_at(num_alive), to_release);
// Truncate oop handles to only those with a non-null referent
JVMCI_event_1("compacted oop handles in JVMCI runtime %d from %d to %d", _id, _oop_handles.length(), num_alive);
_oop_handles.trunc_to(num_alive); // Example: HHH
// Function for redirecting shared library JavaVM output to tty staticvoid _log(constchar* buf, size_t count) {
tty->write((char*) buf, count);
}
// Function for redirecting shared library JavaVM fatal error data to a log file. // The log file is opened on first call to this function. staticvoid _fatal_log(constchar* buf, size_t count) {
JVMCI::fatal_log(buf, count);
}
// Function for shared library JavaVM to flush tty staticvoid _flush_log() {
tty->flush();
}
// Function for shared library JavaVM to exit HotSpot on a fatal error staticvoid _fatal() {
Thread* thread = Thread::current_or_null_safe(); if (thread != nullptr && thread->is_Java_thread()) {
JavaThread* jthread = (JavaThread*) thread;
JVMCIRuntime* runtime = jthread->libjvmci_runtime(); if (runtime != nullptr) { int javaVM_id = runtime->get_shared_library_javavm_id();
fatal("Fatal error in JVMCI shared library JavaVM[%d] owned by JVMCI runtime %d", javaVM_id, runtime->id());
}
}
intx current_thread_id = os::current_thread_id();
fatal("thread " INTX_FORMAT ": Fatal error in JVMCI shared library", current_thread_id);
}
JVMCIRuntime* JVMCIRuntime::select_runtime_in_shutdown(JavaThread* thread) {
assert(JVMCI_lock->owner() == thread, "must be"); // When shutting down, use the first available runtime. for (JVMCIRuntime* runtime = JVMCI::_compiler_runtimes; runtime != nullptr; runtime = runtime->_next) { if (runtime->_num_attached_threads != cannot_be_attached) {
runtime->pre_attach_thread(thread);
JVMCI_event_1("using pre-existing JVMCI runtime %d in shutdown", runtime->id()); return runtime;
}
} // Lazily initialize JVMCI::_shutdown_compiler_runtime. Safe to // do here since JVMCI_lock is locked. if (JVMCI::_shutdown_compiler_runtime == nullptr) {
JVMCI::_shutdown_compiler_runtime = new JVMCIRuntime(nullptr, -2, true);
}
JVMCIRuntime* runtime = JVMCI::_shutdown_compiler_runtime;
JVMCI_event_1("using reserved shutdown JVMCI runtime %d", runtime->id()); return runtime;
}
JVMCIRuntime* JVMCIRuntime::select_runtime(JavaThread* thread, JVMCIRuntime* skip, int* count) {
assert(JVMCI_lock->owner() == thread, "must be"); bool for_compile_broker = thread->is_Compiler_thread(); for (JVMCIRuntime* runtime = JVMCI::_compiler_runtimes; runtime != nullptr; runtime = runtime->_next) { if (count != nullptr) {
(*count)++;
} if (for_compile_broker == runtime->_for_compile_broker) { int count = runtime->_num_attached_threads; if (count == cannot_be_attached || runtime == skip) { // Cannot attach to rt continue;
} // If selecting for repacking, ignore a runtime without an existing JavaVM if (skip != nullptr && !runtime->has_shared_library_javavm()) { continue;
}
// Select first runtime with sufficient capacity if (count < (int) JVMCIThreadsPerNativeLibraryRuntime) {
runtime->pre_attach_thread(thread); return runtime;
}
}
} return nullptr;
}
JVMCIRuntime* JVMCIRuntime::select_or_create_runtime(JavaThread* thread) {
assert(JVMCI_lock->owner() == thread, "must be"); int id = 0;
JVMCIRuntime* runtime; if (JVMCI::using_singleton_shared_library_runtime()) {
runtime = JVMCI::_compiler_runtimes;
guarantee(runtime != nullptr, "must be"); while (runtime->_num_attached_threads == cannot_be_attached) { // Since there is only a singleton JVMCIRuntime, we // need to wait for it to be available for attaching.
JVMCI_lock->wait();
}
runtime->pre_attach_thread(thread);
} else {
runtime = select_runtime(thread, nullptr, &id);
} if (runtime == nullptr) {
runtime = new JVMCIRuntime(JVMCI::_compiler_runtimes, id, thread->is_Compiler_thread());
JVMCI::_compiler_runtimes = runtime;
runtime->pre_attach_thread(thread);
} return runtime;
}
void JVMCIRuntime::repack(JavaThread* thread) {
JVMCIRuntime* new_runtime = nullptr;
{
MutexLocker locker(JVMCI_lock); if (JVMCI::using_singleton_shared_library_runtime() || _num_attached_threads != 1 || JVMCI::in_shutdown()) { return;
}
new_runtime = select_runtime(thread, this, nullptr);
} if (new_runtime != nullptr) {
JVMCI_event_1("Moving thread from JVMCI runtime %d to JVMCI runtime %d (%d attached)", _id, new_runtime->_id, new_runtime->_num_attached_threads - 1);
detach_thread(thread, "moving thread to another JVMCI runtime");
new_runtime->attach_thread(thread);
}
}
bool JVMCIRuntime::detach_thread(JavaThread* thread, constchar* reason, bool can_destroy_javavm) { if (this == JVMCI::_shutdown_compiler_runtime || JVMCI::in_shutdown()) { // Do minimal work when shutting down JVMCI
thread->set_libjvmci_runtime(nullptr); returnfalse;
} bool should_shutdown; bool destroyed_javavm = false;
{
MutexLocker locker(JVMCI_lock);
_num_attached_threads--;
JVMCI_event_1("detaching from JVMCI runtime %d: %s (%d other threads still attached)", _id, reason, _num_attached_threads);
should_shutdown = _num_attached_threads == 0 && !JVMCI::in_shutdown(); if (should_shutdown && !can_destroy_javavm) { // If it's not possible to destroy the JavaVM on this thread then the VM must // not be shutdown. This can happen when a shared library thread is the last // thread to detach from a shared library JavaVM (e.g. GraalServiceThread).
JVMCI_event_1("Cancelled shut down of JVMCI runtime %d", _id);
should_shutdown = false;
} if (should_shutdown) { // Prevent other threads from attaching to this runtime // while it is shutting down and destroying its JavaVM
_num_attached_threads = cannot_be_attached;
}
} if (should_shutdown) { // Release the JavaVM resources associated with this // runtime once there are no threads attached to it.
shutdown(); if (can_destroy_javavm) {
destroyed_javavm = destroy_shared_library_javavm(); if (destroyed_javavm) { // Can release all handles now that there's no code executing // that could be using them. Handles for the Java JVMCI runtime // are never released as we cannot guarantee all compiler threads // using it have been stopped. int released = release_and_clear_oop_handles();
JVMCI_event_1("releasing handles for JVMCI runtime %d: oop handles=%d, metadata handles={total=%d, live=%d, blocks=%d}",
_id,
released,
_metadata_handles->num_handles(),
_metadata_handles->num_handles() - _metadata_handles->num_free_handles(),
_metadata_handles->num_blocks());
// No need to acquire _lock since this is the only thread accessing this runtime
_metadata_handles->clear();
}
} // Allow other threads to attach to this runtime now
MutexLocker locker(JVMCI_lock);
_num_attached_threads = 0; if (JVMCI::using_singleton_shared_library_runtime()) { // Notify any thread waiting to attach to the // singleton JVMCIRuntime
JVMCI_lock->notify();
}
}
thread->set_libjvmci_runtime(nullptr);
JVMCI_event_1("detached from JVMCI runtime %d", _id); return destroyed_javavm;
}
// Protocol: JVMCI shared library JavaVM should support a non-standard "_javavm_id" // option whose extraInfo info field is a pointer to which a unique id for the // JavaVM should be written.
options[0].optionString = (char*) "_javavm_id";
options[0].extraInfo = &javaVM_id;
void JVMCIRuntime::initialize_HotSpotJVMCIRuntime(JVMCI_TRAPS) { if (is_HotSpotJVMCIRuntime_initialized()) { if (JVMCIENV->is_hotspot() && UseJVMCINativeLibrary) {
JVMCI_THROW_MSG(InternalError, "JVMCI has already been enabled in the JVMCI shared library");
}
}
initialize(JVMCI_CHECK);
// This should only be called in the context of the JVMCI class being initialized
JVMCIObject result = JVMCIENV->call_HotSpotJVMCIRuntime_runtime(JVMCI_CHECK);
result = JVMCIENV->make_global(result);
OrderAccess::storestore(); // Ensure handle is fully constructed before publishing
_HotSpotJVMCIRuntime_instance = result;
class JavaVMRefsInitialization: public StackObj {
JVMCIRuntime::InitState *_state; int _id; public:
JavaVMRefsInitialization(JVMCIRuntime::InitState *state, int id) {
_state = state;
_id = id; // All classes, methods and fields in the JVMCI shared library // are in the read-only part of the image. As such, these // values (and any global handle derived from them via NewGlobalRef) // are the same for all JavaVM instances created in the // shared library which means they only need to be initialized // once. In non-product mode, we check this invariant. // See com.oracle.svm.jni.JNIImageHeapHandles. // The same is true for Klass* and field offsets in HotSpotJVMCI. if (*state == JVMCIRuntime::uninitialized DEBUG_ONLY( || true)) {
*state = JVMCIRuntime::being_initialized;
JVMCI_event_1("initializing JavaVM references in JVMCI runtime %d", id);
} else { while (*state != JVMCIRuntime::fully_initialized) {
JVMCI_event_1("waiting for JavaVM references initialization in JVMCI runtime %d", id);
JVMCI_lock->wait();
}
JVMCI_event_1("done waiting for JavaVM references initialization in JVMCI runtime %d", id);
}
}
JNIJVMCI::initialize_ids(jni.env()); if (jni()->ExceptionCheck()) {
jni()->ExceptionDescribe();
fatal("JNI exception during init");
} // _lock is re-locked at this point
}
}
}
JVMCIObject JVMCIRuntime::create_jvmci_primitive_type(BasicType type, JVMCI_TRAPS) {
JavaThread* THREAD = JavaThread::current(); // For exception macros. // These primitive types are long lived and are created before the runtime is fully set up // so skip registering them for scanning.
JVMCIObject mirror = JVMCIENV->get_object_constant(java_lang_Class::primitive_mirror(type), false, true); if (JVMCIENV->is_hotspot()) {
JavaValue result(T_OBJECT);
JavaCallArguments args;
args.push_oop(Handle(THREAD, HotSpotJVMCI::resolve(mirror)));
args.push_int(type2char(type));
JavaCalls::call_static(&result, HotSpotJVMCI::HotSpotResolvedPrimitiveType::klass(), vmSymbols::fromMetaspace_name(), vmSymbols::primitive_fromMetaspace_signature(), &args, CHECK_(JVMCIObject()));
// Ensure _non_oop_bits is initialized
Universe::non_oop_word();
if (JNI_OK != env->RegisterNatives(c2vmClass, CompilerToVM::methods, CompilerToVM::methods_count())) { if (!env->ExceptionCheck()) { for (int i = 0; i < CompilerToVM::methods_count(); i++) { if (JNI_OK != env->RegisterNatives(c2vmClass, CompilerToVM::methods + i, 1)) {
guarantee(false, "Error registering JNI method %s%s", CompilerToVM::methods[i].name, CompilerToVM::methods[i].signature); break;
}
}
} else {
env->ExceptionDescribe();
}
guarantee(false, "Failed registering CompilerToVM native methods");
}
}
JVM_END
void JVMCIRuntime::shutdown() { if (_HotSpotJVMCIRuntime_instance.is_non_null()) {
JVMCI_event_1("shutting down HotSpotJVMCIRuntime for JVMCI runtime %d", _id);
JVMCIEnv __stack_jvmci_env__(JavaThread::current(), _HotSpotJVMCIRuntime_instance.is_hotspot(), __FILE__, __LINE__);
JVMCIEnv* JVMCIENV = &__stack_jvmci_env__;
JVMCIENV->call_HotSpotJVMCIRuntime_shutdown(_HotSpotJVMCIRuntime_instance); if (_num_attached_threads == cannot_be_attached) { // Only when no other threads are attached to this runtime // is it safe to reset these fields.
_HotSpotJVMCIRuntime_instance = JVMCIObject();
_init_state = uninitialized;
JVMCI_event_1("shut down JVMCI runtime %d", _id);
}
}
}
bool JVMCIRuntime::destroy_shared_library_javavm() {
guarantee(_num_attached_threads == cannot_be_attached, "cannot destroy JavaVM for JVMCI runtime %d with %d attached threads", _id, _num_attached_threads);
JavaVM* javaVM; int javaVM_id = _shared_library_javavm_id;
{ // Exactly one thread can destroy the JavaVM // and release the handle to it.
MutexLocker only_one(_lock);
javaVM = _shared_library_javavm; if (javaVM != nullptr) {
_shared_library_javavm = nullptr;
_shared_library_javavm_id = 0;
}
} if (javaVM != nullptr) { int result;
{ // Must transition into native before calling into libjvmci
ThreadToNativeFromVM ttnfv(JavaThread::current());
result = javaVM->DestroyJavaVM();
} if (result == JNI_OK) {
JVMCI_event_1("destroyed JavaVM[%d]@" PTR_FORMAT " for JVMCI runtime %d", javaVM_id, p2i(javaVM), _id);
} else {
warning("Non-zero result (%d) when calling JNI_DestroyJavaVM on JavaVM[%d]@" PTR_FORMAT, result, javaVM_id, p2i(javaVM));
} returntrue;
} returnfalse;
}
// Clear and ignore any exceptions raised during printing
CLEAR_PENDING_EXCEPTION; if (!clear) {
THREAD->set_pending_exception(exception(), exception_file, exception_line);
}
}
}
staticvolatileint report_error = 0; if (!report_error && Atomic::cmpxchg(&report_error, 0, 1) == 0) { // Only report an error once
tty->print_raw_cr(message); if (JVMCIENV != NULL) {
JVMCIENV->describe_pending_exception(true);
} else {
describe_pending_hotspot_exception(THREAD, true);
}
} else { // Allow error reporting thread to print the stack trace.
THREAD->sleep(200);
}
fatal("Fatal exception in JVMCI: %s", message);
}
// ------------------------------------------------------------------ // Note: the logic of this method should mirror the logic of // constantPoolOopDesc::verify_constant_pool_resolve. bool JVMCIRuntime::check_klass_accessibility(Klass* accessing_klass, Klass* resolved_klass) { if (accessing_klass->is_objArray_klass()) {
accessing_klass = ObjArrayKlass::cast(accessing_klass)->bottom_klass();
} if (!accessing_klass->is_instance_klass()) { returntrue;
}
if (resolved_klass->is_objArray_klass()) { // Find the element klass, if this is an array.
resolved_klass = ObjArrayKlass::cast(resolved_klass)->bottom_klass();
} if (resolved_klass->is_instance_klass()) {
Reflection::VerifyClassAccessResults result =
Reflection::verify_class_access(accessing_klass, InstanceKlass::cast(resolved_klass), true); return result == Reflection::ACCESS_OK;
} returntrue;
}
// Now we need to check the SystemDictionary if (sym->char_at(0) == JVM_SIGNATURE_CLASS &&
sym->char_at(sym->utf8_length()-1) == JVM_SIGNATURE_ENDCLASS) { // This is a name from a signature. Strip off the trimmings. // Call recursive to keep scope of strippedsym.
TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
sym->utf8_length()-2); return get_klass_by_name_impl(accessing_klass, cpool, strippedsym, require_local);
}
// If we fail to find an array klass, look again for its element type. // The element type may be available either locally or via constraints. // In either case, if we can find the element type in the system dictionary, // we must build an array type around it. The CI requires array klasses // to be loaded if their element klasses are loaded, except when memory // is exhausted. if (sym->char_at(0) == JVM_SIGNATURE_ARRAY &&
(sym->char_at(1) == JVM_SIGNATURE_ARRAY || sym->char_at(1) == JVM_SIGNATURE_CLASS)) { // We have an unloaded array. // Build it on the fly if the element class exists.
TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
sym->utf8_length()-1);
// Get element Klass recursively.
Klass* elem_klass =
get_klass_by_name_impl(accessing_klass,
cpool,
elem_sym,
require_local); if (elem_klass != NULL) { // Now make an array for it return elem_klass->array_klass(THREAD);
}
}
if (found_klass == NULL && !cpool.is_null() && cpool->has_preresolution()) { // Look inside the constant pool for pre-resolved class entries. for (int i = cpool->length() - 1; i >= 1; i--) { if (cpool->tag_at(i).is_klass()) {
Klass* kls = cpool->resolved_klass_at(i);
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.55 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.