/* * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// put OS-includes here # include <sys/types.h> # include <sys/mman.h> # include <pthread.h> # include <signal.h> # include <errno.h> # include <dlfcn.h> # include <stdlib.h> # include <stdio.h> # include <unistd.h> # include <sys/resource.h> # include <sys/stat.h> # include <sys/time.h> # include <sys/utsname.h> # include <sys/socket.h> # include <sys/wait.h> # include <pwd.h> # include <poll.h> #ifndef __OpenBSD__ # include <ucontext.h> #endif
#if !defined(__APPLE__) && !defined(__NetBSD__) # include <pthread_np.h> #endif
// needed by current_stack_region() workaround for Mavericks #ifdefined(__APPLE__) # include <errno.h> # include <sys/types.h> # include <sys/sysctl.h> # define DEFAULT_MAIN_THREAD_STACK_PAGES 2048 # define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13 #endif
#ifdef __APPLE__ # if __DARWIN_UNIX03 && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_5) // 10.5 UNIX03 member name prefixes #define DU3_PREFIX(s, m) __ ## s.__ ## m # else #define DU3_PREFIX(s, m) s ## . ## m # endif
char* os::non_memory_address_word() { // Must never look like an address returned by reserve_memory, // even in its subfields (as defined by the CPU immediate fields, // if the CPU splits constants across multiple instructions).
frame os::fetch_compiled_frame_from_context(constvoid* ucVoid) { const ucontext_t* uc = (const ucontext_t*)ucVoid;
frame fr = os::fetch_frame_from_context(uc); // in compiled code, the stack banging is performed just after the return pc // has been pushed on the stack return frame(fr.sp() + 1, fr.fp(), (address)*(fr.sp()));
}
// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get // turned off by -fomit-frame-pointer,
frame os::get_sender_for_C_frame(frame* fr) { return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
}
intptr_t* _get_previous_fp() { #ifdefined(__clang__) || defined(__llvm__)
intptr_t **ebp;
__asm__("mov %%" SPELL_REG_FP ", %0":"=r"(ebp)); #else register intptr_t **ebp __asm__ (SPELL_REG_FP); #endif // ebp is for this frame (_get_previous_fp). We want the ebp for the // caller of os::current_frame*(), so go up two frames. However, for // optimized builds, _get_previous_fp() will be inlined, so only go // up 1 frame in that case. #ifdef _NMT_NOINLINE_ return **(intptr_t***)ebp; #else return *ebp; #endif
}
frame os::current_frame() {
intptr_t* fp = _get_previous_fp();
frame myframe((intptr_t*)os::current_stack_pointer(),
(intptr_t*)fp,
CAST_FROM_FN_PTR(address, os::current_frame)); if (os::is_first_C_frame(&myframe)) { // stack is not walkable return frame();
} else { return os::get_sender_for_C_frame(&myframe);
}
}
// From IA32 System Programming Guide enum {
trap_page_fault = 0xE
};
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) { // decide if this trap can be handled by a stub
address stub = NULL;
address pc = NULL;
//%note os_trap_1 if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Posix::ucontext_get_pc(uc);
// Handle ALL stack overflow variations here if (sig == SIGSEGV || sig == SIGBUS) {
address addr = (address) info->si_addr;
// check if fault address is within thread stack if (thread->is_in_full_stack(addr)) { // stack overflow if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) { returntrue; // continue
}
}
}
if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr(pc)) { // Verify that OS save/restore AVX registers.
stub = VM_Version::cpuinfo_cont_addr();
}
// We test if stub is already set (by the stack overflow code // above) so it is not overwritten by the code that follows. This // check is not required on other platforms, because on other // platforms we check for SIGSEGV only or SIGBUS only, where here // we have to check for both SIGSEGV and SIGBUS. if (thread->thread_state() == _thread_in_Java && stub == NULL) { // Java thread running in Java code => find exception handler if any // a fault inside compiled code, the interpreter, or a stub
if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) {
stub = SharedRuntime::get_poll_stub(pc); #ifdefined(__APPLE__) // 32-bit Darwin reports a SIGBUS for nearly all memory access exceptions. // 64-bit Darwin may also use a SIGBUS (seen with compressed oops). // Catching SIGBUS here prevents the implicit SIGBUS NULL check below from // being called, so only do so if the implicit NULL check is not necessary.
} elseif (sig == SIGBUS && !MacroAssembler::uses_implicit_null_check(info->si_addr)) { #else
} elseif (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { #endif // BugId 4454115: A read from a MappedByteBuffer can fault // here if the underlying file has been truncated. // Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc); if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = Assembler::locate_next_instruction(pc); if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
} else
#ifdef AMD64 if (sig == SIGFPE &&
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV // Workaround for macOS ARM incorrectly reporting FPE_FLTINV for "div by 0" // instead of the expected FPE_FLTDIV when running x86_64 binary under Rosetta emulation
MACOS_ONLY(|| (VM_Version::is_cpu_emulated() && info->si_code == FPE_FLTINV)))) {
stub =
SharedRuntime::
continuation_for_implicit_exception(thread,
pc,
SharedRuntime::
IMPLICIT_DIVIDE_BY_ZERO); #ifdef __APPLE__
} elseif (sig == SIGFPE && info->si_code == FPE_NOOP) { int op = pc[0];
// Skip REX if ((pc[0] & 0xf0) == 0x40) {
op = pc[1];
} else {
op = pc[0];
}
// Check for IDIV if (op == 0xF7) {
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime:: IMPLICIT_DIVIDE_BY_ZERO);
} else { // TODO: handle more cases if we are using other x86 instructions // that can generate SIGFPE signal.
tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
fatal("please update this code.");
} #endif/* __APPLE__ */
#else if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) { // HACK: si_code does not work on bsd 2.2.12-20!!! int op = pc[0]; if (op == 0xDB) { // FIST // TODO: The encoding of D2I in x86_32.ad can cause an exception // prior to the fist instruction if there was an invalid operation // pending. We want to dismiss that exception. From the win_32 // side it also seems that if it really was the fist causing // the exception that we do the d2i by hand with different // rounding. Seems kind of weird. // NOTE: that we take the exception at the NEXT floating point instruction.
assert(pc[0] == 0xDB, "not a FIST opcode");
assert(pc[1] == 0x14, "not a FIST opcode");
assert(pc[2] == 0x24, "not a FIST opcode"); returntrue;
} elseif (op == 0xF7) { // IDIV
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
} else { // TODO: handle more cases if we are using other x86 instructions // that can generate SIGFPE signal on bsd.
tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
fatal("please update this code.");
} #endif// AMD64
} elseif ((sig == SIGSEGV || sig == SIGBUS) &&
MacroAssembler::uses_implicit_null_check(info->si_addr)) { // Determination of interpreter/vtable stub/compiled code null exception
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
} elseif ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = Assembler::locate_next_instruction(pc); if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in // and the heap gets shrunk before the field access. if ((sig == SIGSEGV) || (sig == SIGBUS)) {
address addr = JNI_FastGetField::find_slowcase_pc(pc); if (addr != (address)-1) {
stub = addr;
}
}
}
#ifndef AMD64 // Execution protection violation // // This should be kept as the last step in the triage. We don't // have a dedicated trap number for a no-execute fault, so be // conservative and allow other handlers the first shot. // // Note: We don't test that info->si_code == SEGV_ACCERR here. // this si_code is so generic that it is almost meaningless; and // the si_code for this condition may change in the future. // Furthermore, a false-positive should be harmless. if (UnguardOnExecutionViolation > 0 &&
stub == NULL &&
(sig == SIGSEGV || sig == SIGBUS) &&
uc->context_trapno == trap_page_fault) { int page_size = os::vm_page_size();
address addr = (address) info->si_addr;
address pc = os::Posix::ucontext_get_pc(uc); // Make sure the pc and the faulting address are sane. // // If an instruction spans a page boundary, and the page containing // the beginning of the instruction is executable but the following // page is not, the pc and the faulting address might be slightly // different - we still want to unguard the 2nd page in this case. // // 15 bytes seems to be a (very) safe value for max instruction size. bool pc_is_near_addr =
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); bool instr_spans_page_boundary =
(align_down((intptr_t) pc ^ (intptr_t) addr,
(intptr_t) page_size) > 0);
// In conservative mode, don't unguard unless the address is in the VM if (addr != last_addr &&
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Set memory to RWX and retry
address page_start = align_down(addr, page_size); bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX);
// Set last_addr so if we fault again at the same address, we don't end // up in an endless loop. // // There are two potential complications here. Two threads trapping at // the same address at the same time could cause one of the threads to // think it already unguarded, and abort the VM. Likely very rare. // // The other race involves two threads alternately trapping at // different addresses and failing to unguard the page, resulting in // an endless loop. This condition is probably even more unlikely than // the first. // // Although both cases could be avoided by using locks or thread local // last_addr, these solutions are unnecessary complication: this // handler is a best-effort safety net, not a complete solution. It is // disabled by default and should only be used as a workaround in case // we missed any no-execute-unsafe VM code.
last_addr = addr;
}
}
} #endif// !AMD64
if (stub != NULL) { // save all thread context in case we need to restore it if (thread != NULL) thread->set_saved_exception_pc(pc);
// From solaris_i486.s ported to bsd_i486.s extern"C"void fixcw();
void os::Bsd::init_thread_fpu_state(void) { #ifndef AMD64 // Set fpu to 53 bit precision. This happens too early to use a stub.
fixcw(); #endif// !AMD64
}
juint os::cpu_microcode_revision() {
juint result = 0; char data[8];
size_t sz = sizeof(data); int ret = sysctlbyname("machdep.cpu.microcode_version", data, &sz, NULL, 0); if (ret == 0) { if (sz == 4) result = *((juint*)data); if (sz == 8) result = *((juint*)data + 1); // upper 32-bits
} return result;
}
// Java thread: // // Low memory addresses // +------------------------+ // | |\ Java thread created by VM does not have glibc // | glibc guard page | - guard, attached Java thread usually has // | |/ 1 glibc guard page. // P1 +------------------------+ Thread::stack_base() - Thread::stack_size() // | |\ // | HotSpot Guard Pages | - red, yellow and reserved pages // | |/ // +------------------------+ StackOverflow::stack_reserved_zone_base() // | |\ // | Normal Stack | - // | |/ // P2 +------------------------+ Thread::stack_base() // // Non-Java thread: // // Low memory addresses // +------------------------+ // | |\ // | glibc guard page | - usually 1 page // | |/ // P1 +------------------------+ Thread::stack_base() - Thread::stack_size() // | |\ // | Normal Stack | - // | |/ // P2 +------------------------+ Thread::stack_base() // // ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from // pthread_attr_getstack()
staticvoid current_stack_region(address * bottom, size_t * size) { #ifdef __APPLE__
pthread_t self = pthread_self(); void *stacktop = pthread_get_stackaddr_np(self);
*size = pthread_get_stacksize_np(self); // workaround for OS X 10.9.0 (Mavericks) // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages if (pthread_main_np() == 1) { // At least on Mac OS 10.12 we have observed stack sizes not aligned // to pages boundaries. This can be provoked by e.g. setrlimit() (ulimit -s xxxx in the // shell). Apparently Mac OS actually rounds upwards to next multiple of page size, // however, we round downwards here to be on the safe side.
*size = align_down(*size, getpagesize());
if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) { char kern_osrelease[256];
size_t kern_osrelease_size = sizeof(kern_osrelease); int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0); if (ret == 0) { // get the major number, atoi will ignore the minor amd micro portions of the version string if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) {
*size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize());
}
}
}
}
*bottom = (address) stacktop - *size; #elifdefined(__OpenBSD__)
stack_t ss; int rslt = pthread_stackseg_np(pthread_self(), &ss);
if (rslt != 0)
fatal("pthread_stackseg_np failed with error = %d", rslt);
// Note: it may be unsafe to inspect memory near pc. For example, pc may // point to garbage if entry point in an nmethod is corrupted. Leave // this at the end, and hope for the best.
address pc = os::Posix::ucontext_get_pc(uc);
print_instructions(st, pc, sizeof(char));
st->cr();
}
st->print_cr("Register to memory mapping:");
st->cr();
// this is horrendously verbose but the layout of the registers in the // context does not match how we defined our abstract Register set, so // we can't just iterate through the gregs area
// this is only for the "general purpose" registers