/* * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// put OS-includes here # include <sys/types.h> # include <sys/mman.h> # include <sys/stat.h> # include <sys/select.h> # include <pthread.h> # include <signal.h> # include <endian.h> # include <errno.h> # include <dlfcn.h> # include <stdio.h> # include <unistd.h> # include <sys/resource.h> # include <pthread.h> # include <sys/stat.h> # include <sys/time.h> # include <sys/times.h> # include <sys/utsname.h> # include <sys/socket.h> # include <pwd.h> # include <poll.h> # include <fcntl.h> # include <string.h> # include <syscall.h> # include <sys/sysinfo.h> # include <sys/ipc.h> # include <sys/shm.h> # include <link.h> # include <stdint.h> # include <inttypes.h> # include <sys/ioctl.h> # include <linux/elf-em.h> #ifdef __GLIBC__ # include <malloc.h> #endif
// if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling // getrusage() is prepared to handle the associated failure. #ifndef RUSAGE_THREAD #define RUSAGE_THREAD (1) /* only the calling thread */ #endif
#define MAX_PATH (2 * K)
#define MAX_SECS 100000000
// for timer info max values which include all bits #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
#ifdef MUSL_LIBC // dlvsym is not a part of POSIX // and musl libc doesn't implement it. staticvoid *dlvsym(void *handle, constchar *symbol, constchar *version) { // load the latest version of symbol return dlsym(handle, symbol);
} #endif
#ifdef __GLIBC__ // We want to be buildable and runnable on older and newer glibcs, so resolve both // mallinfo and mallinfo2 dynamically. struct old_mallinfo { int arena; int ordblks; int smblks; int hblks; int hblkhd; int usmblks; int fsmblks; int uordblks; int fordblks; int keepcost;
}; typedefstruct old_mallinfo (*mallinfo_func_t)(void); static mallinfo_func_t g_mallinfo = NULL;
// If the VM might have been created on the primordial thread, we need to resolve the // primordial thread stack bounds and check if the current thread might be the // primordial thread in places. If we know that the primordial thread is never used, // such as when the VM was created by one of the standard java launchers, we can // avoid this staticbool suppress_primordial_thread_resolution = false;
staticvoid next_line(FILE *f) { int c; do {
c = fgetc(f);
} while (c != '\n' && c != EOF);
}
bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
FILE* fh;
uint64_t userTicks, niceTicks, systemTicks, idleTicks; // since at least kernel 2.6 : iowait: time waiting for I/O to complete // irq: time servicing interrupts; softirq: time servicing softirqs
uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0; // steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment
uint64_t stealTicks = 0; // guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the // control of the Linux kernel
uint64_t guestNiceTicks = 0; int logical_cpu = -1; constint required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5; int n;
memset(pticks, 0, sizeof(CPUPerfTicks));
if ((fh = os::fopen("/proc/stat", "r")) == NULL) { returnfalse;
}
// pid_t gettid() // // Returns the kernel thread id of the currently running thread. Kernel // thread id is used to access /proc.
pid_t os::Linux::gettid() { int rslt = syscall(SYS_gettid);
assert(rslt != -1, "must be."); // old linuxthreads implementation? return (pid_t)rslt;
}
// Returns the amount of swap currently configured, in bytes. // This can change at any time.
julong os::Linux::host_swap() { struct sysinfo si;
sysinfo(&si); return (julong)si.totalswap;
}
// Most versions of linux have a bug where the number of processors are // determined by looking at the /proc file system. In a chroot environment, // the system call returns 1. staticbool unsafe_chroot_detected = false; staticconstchar *unstable_chroot_error = "/proc file system not found.\n" "Java may be unstable running multithreaded in a chroot " "environment on Linux when /proc filesystem is not mounted.";
void os::init_system_properties_values() { // The next steps are taken in the product version: // // Obtain the JAVA_HOME value from the location of libjvm.so. // This library should be located at: // <JAVA_HOME>/lib/{client|server}/libjvm.so. // // If "/jre/lib/" appears at the right place in the path, then we // assume libjvm.so is installed in a JDK and we use this path. // // Otherwise exit with message: "Could not create the Java virtual machine." // // The following extra steps are taken in the debugging version: // // If "/jre/lib/" does NOT appear at the right place in the path // instead of exit check for $JAVA_HOME environment variable. // // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, // then we append a fake suffix "hotspot/libjvm.so" to this path so // it looks like libjvm.so is installed there // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so. // // Otherwise exit. // // Important note: if the location of libjvm.so changes this // code needs to be changed accordingly.
// See ld(1): // The linker uses the following search paths to locate required // shared libraries: // 1: ... // ... // 7: The default directories, normally /lib and /usr/lib. #ifndef OVERRIDE_LIBPATH #ifdefined(_LP64) #define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib" #else #define DEFAULT_LIBPATH "/lib:/usr/lib" #endif #else #define DEFAULT_LIBPATH OVERRIDE_LIBPATH #endif
// Base path of extensions installed on the system. #define SYS_EXT_DIR "/usr/java/packages" #define EXTENSIONS_DIR "/lib/ext"
// Buffer that fits several sprintfs. // Note that the space for the colon and the trailing null are provided // by the nulls included by the sizeof operator. const size_t bufsize =
MAX2((size_t)MAXPATHLEN, // For dll_dir & friends.
(size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
// Found the full path to libjvm.so. // Now cut the path to <java_home>/jre if we can.
pslash = strrchr(buf, '/'); if (pslash != NULL) {
*pslash = '\0'; // Get rid of /libjvm.so.
}
pslash = strrchr(buf, '/'); if (pslash != NULL) {
*pslash = '\0'; // Get rid of /{client|server|hotspot}.
}
Arguments::set_dll_dir(buf);
if (pslash != NULL) {
pslash = strrchr(buf, '/'); if (pslash != NULL) {
*pslash = '\0'; // Get rid of /lib.
}
}
Arguments::set_java_home(buf); if (!set_boot_path('/', ':')) {
vm_exit_during_initialization("Failed setting boot class path.", NULL);
}
}
// Where to look for native libraries. // // Note: Due to a legacy implementation, most of the library path // is set in the launcher. This was to accommodate linking restrictions // on legacy Linux implementations (which are no longer supported). // Eventually, all the library path setting will be done here. // // However, to prevent the proliferation of improperly built native // libraries, the new path component /usr/java/packages is added here. // Eventually, all the library path setting will be done here.
{ // Get the user setting of LD_LIBRARY_PATH, and prepended it. It // should always exist (until the legacy problem cited above is // addressed). constchar *v = ::getenv("LD_LIBRARY_PATH"); constchar *v_colon = ":"; if (v == NULL) { v = ""; v_colon = ""; } // That's +1 for the colon and +1 for the trailing '\0'. char *ld_library_path = NEW_C_HEAP_ARRAY(char,
strlen(v) + 1 + sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1,
mtInternal);
sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib:" DEFAULT_LIBPATH, v, v_colon);
Arguments::set_library_path(ld_library_path);
FREE_C_HEAP_ARRAY(char, ld_library_path);
}
void os::Linux::libpthread_init() { // Save glibc and pthread version strings. #if !defined(_CS_GNU_LIBC_VERSION) || \
!defined(_CS_GNU_LIBPTHREAD_VERSION) #error"glibc too old (< 2.3.2)" #endif
// os::Linux::manually_expand_stack() takes care of expanding the thread // stack. Note that this is normally not needed: pthread stacks allocate // thread stack using mmap() without MAP_NORESERVE, so the stack is already // committed. Therefore it is not necessary to expand the stack manually. // // Manually expanding the stack was historically needed on LinuxThreads // thread stacks, which were allocated with mmap(MAP_GROWSDOWN). Nowadays // it is kept to deal with very rare corner cases: // // For one, user may run the VM on an own implementation of threads // whose stacks are - like the old LinuxThreads - implemented using // mmap(MAP_GROWSDOWN). // // Also, this coding may be needed if the VM is running on the primordial // thread. Normally we avoid running on the primordial thread; however, // user may still invoke the VM on the primordial thread. // // The following historical comment describes the details about running // on a thread stack allocated with mmap(MAP_GROWSDOWN):
// Force Linux kernel to expand current thread stack. If "bottom" is close // to the stack guard, caller should block all signals. // // MAP_GROWSDOWN: // A special mmap() flag that is used to implement thread stacks. It tells // kernel that the memory region should extend downwards when needed. This // allows early versions of LinuxThreads to only mmap the first few pages // when creating a new thread. Linux kernel will automatically expand thread // stack as needed (on page faults). // // However, because the memory region of a MAP_GROWSDOWN stack can grow on // demand, if a page fault happens outside an already mapped MAP_GROWSDOWN // region, it's hard to tell if the fault is due to a legitimate stack // access or because of reading/writing non-exist memory (e.g. buffer // overrun). As a rule, if the fault happens below current stack pointer, // Linux kernel does not expand stack, instead a SIGSEGV is sent to the // application (see Linux kernel fault.c). // // This Linux feature can cause SIGSEGV when VM bangs thread stack for // stack overflow detection. // // Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do // not use MAP_GROWSDOWN. // // To get around the problem and allow stack banging on Linux, we need to // manually expand thread stack after receiving the SIGSEGV. // // There are two ways to expand thread stack to address "bottom", we used // both of them in JVM before 1.5: // 1. adjust stack pointer first so that it is below "bottom", and then // touch "bottom" // 2. mmap() the page in question // // Now alternate signal stack is gone, it's harder to use 2. For instance, // if current sp is already near the lower end of page 101, and we need to // call mmap() to map page 100, it is possible that part of the mmap() frame // will be placed in page 100. When page 100 is mapped, it is zero-filled. // That will destroy the mmap() frame and cause VM to crash. // // The following code works by adjusting sp first, then accessing the "bottom" // page to force a page fault. Linux kernel will then automatically expand the // stack mapping. // // _expand_stack_to() assumes its frame size is less than page size, which // should always be true if the function is not inlined.
// Adjust bottom to point to the largest address within the same page, it // gives us a one-page buffer if alloca() allocates slightly more memory.
bottom = (address)align_down((uintptr_t)bottom, os::vm_page_size());
bottom += os::vm_page_size() - 1;
// sp might be slightly above current stack pointer; if that's the case, we // will alloca() a little more space than necessary, which is OK. Don't use // os::current_stack_pointer(), as its result can be slightly below current // stack pointer, causing us to not alloca enough to reach "bottom".
sp = (address)&sp;
if (sp > bottom) {
size = sp - bottom;
p = (volatilechar *)alloca(size);
assert(p != NULL && p <= (volatilechar *)bottom, "alloca problem?");
p[0] = '\0';
}
}
////////////////////////////////////////////////////////////////////////////// // create new thread
// Thread start routine for all newly created threads staticvoid *thread_native_entry(Thread *thread) {
thread->record_stack_base_and_size();
#ifndef __GLIBC__ // Try to randomize the cache line index of hot stack frames. // This helps when threads of the same stack traces evict each other's // cache lines. The threads can be either from the same JVM instance, or // from different JVM instances. The benefit is especially true for // processors with hyperthreading technology. // This code is not needed anymore in glibc because it has MULTI_PAGE_ALIASING // and we did not see any degradation in performance without `alloca()`. staticint counter = 0; int pid = os::current_process_id(); int random = ((pid ^ counter++) & 7) * 128; void *stackmem = alloca(random != 0 ? random : 1); // ensure we allocate > 0 // Ensure the alloca result is used in a way that prevents the compiler from eliding it.
*(char *)stackmem = 1; #endif
if (UseNUMA) { int lgrp_id = os::numa_get_group_id(); if (lgrp_id != -1) {
thread->set_lgrp_id(lgrp_id);
}
} // initialize signal mask for this thread
PosixSignals::hotspot_sigmask(thread);
// initialize floating point control register
os::Linux::init_thread_fpu_state();
// handshaking with parent thread
{
MutexLocker ml(sync, Mutex::_no_safepoint_check_flag);
// On Linux, glibc places static TLS blocks (for __thread variables) on // the thread stack. This decreases the stack size actually available // to threads. // // For large static TLS sizes, this may cause threads to malfunction due // to insufficient stack space. This is a well-known issue in glibc: // http://sourceware.org/bugzilla/show_bug.cgi?id=11787. // // As a workaround, we call a private but assumed-stable glibc function, // __pthread_get_minstack() to obtain the minstack size and derive the // static TLS size from it. We then increase the user requested stack // size by this TLS size. // // Due to compatibility concerns, this size adjustment is opt-in and // controlled via AdjustStackSizeForTLS. typedef size_t (*GetMinStack)(const pthread_attr_t *attr);
// Returns the size of the static TLS area glibc puts on thread stacks. // The value is cached on first use, which occurs when the first thread // is created during VM initialization. static size_t get_static_tls_area_size(const pthread_attr_t *attr) {
size_t tls_size = 0; if (_get_minstack_func != NULL) { // Obtain the pthread minstack size by calling __pthread_get_minstack.
size_t minstack_size = _get_minstack_func(attr);
// Remove non-TLS area size included in minstack size returned // by __pthread_get_minstack() to get the static TLS size. // In glibc before 2.27, minstack size includes guard_size. // In glibc 2.27 and later, guard_size is automatically added // to the stack size by pthread_create and is no longer included // in minstack size. In both cases, the guard_size is taken into // account, so there is no need to adjust the result for that. // // Although __pthread_get_minstack() is a private glibc function, // it is expected to have a stable behavior across future glibc // versions while glibc still allocates the static TLS blocks off // the stack. Following is glibc 2.28 __pthread_get_minstack(): // // size_t // __pthread_get_minstack (const pthread_attr_t *attr) // { // return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN; // } // // // The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN' // if check is done for precaution. if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) {
tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN;
}
}
log_info(os, thread)("Stack size adjustment for TLS is " SIZE_FORMAT,
tls_size); return tls_size;
}
// Calculate stack size if it's not specified by caller.
size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); // In glibc versions prior to 2.27 the guard size mechanism // is not implemented properly. The posix standard requires adding // the size of the guard pages to the stack size, instead Linux // takes the space out of 'stacksize'. Thus we adapt the requested // stack_size by the size of the guard pages to mimic proper // behaviour. However, be careful not to end up with a size // of zero due to overflow. Don't add the guard page in that case.
size_t guard_size = os::Linux::default_guard_size(thr_type); // Configure glibc guard page. Must happen before calling // get_static_tls_area_size(), which uses the guard_size.
pthread_attr_setguardsize(&attr, guard_size);
size_t stack_adjust_size = 0; if (AdjustStackSizeForTLS) { // Adjust the stack_size for on-stack TLS - see get_static_tls_area_size().
stack_adjust_size += get_static_tls_area_size(&attr);
} else {
stack_adjust_size += guard_size;
}
stack_adjust_size = align_up(stack_adjust_size, os::vm_page_size()); if (stack_size <= SIZE_MAX - stack_adjust_size) {
stack_size += stack_adjust_size;
}
assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
int status = pthread_attr_setstacksize(&attr, stack_size); if (status != 0) { // pthread_attr_setstacksize() function can fail // if the stack size exceeds a system-imposed limit.
assert_status(status == EINVAL, status, "pthread_attr_setstacksize");
log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
(thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
stack_size / K);
thread->set_osthread(NULL); delete osthread; returnfalse;
}
ThreadState state;
{
ResourceMark rm;
pthread_t tid; int ret = 0; int limit = 3; do {
ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
} while (ret == EAGAIN && limit-- > 0);
char buf[64]; if (ret == 0) {
log_info(os, thread)("Thread \"%s\" started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
thread->name(), (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread \"%s\" - pthread_create failed (%s) for attributes: %s.",
thread->name(), os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr)); // Log some OS information which might explain why creating the thread failed.
log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
LogStream st(Log(os, thread)::info());
os::Posix::print_rlimit_info(&st);
os::print_memory_info(&st);
os::Linux::print_proc_sys_info(&st);
os::Linux::print_container_info(&st);
}
pthread_attr_destroy(&attr);
if (ret != 0) { // Need to clean up stuff we've allocated so far
thread->set_osthread(NULL); delete osthread; returnfalse;
}
// Store pthread info into the OSThread
osthread->set_pthread_id(tid);
// Wait until child thread is either initialized or aborted
{
Monitor* sync_with_child = osthread->startThread_lock();
MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag); while ((state = osthread->get_state()) == ALLOCATED) {
sync_with_child->wait_without_safepoint_check();
}
}
}
// The thread is returned suspended (in state INITIALIZED), // and is started higher up in the call chain
assert(state == INITIALIZED, "race condition"); returntrue;
}
// bootstrap the main thread bool os::create_main_thread(JavaThread* thread) {
assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread"); return create_attached_thread(thread);
}
// Allocate the OSThread object
OSThread* osthread = new OSThread();
if (osthread == NULL) { returnfalse;
}
// Store pthread info into the OSThread
osthread->set_thread_id(os::Linux::gettid());
osthread->set_pthread_id(::pthread_self());
// initialize floating point control register
os::Linux::init_thread_fpu_state();
// Initial thread state is RUNNABLE
osthread->set_state(RUNNABLE);
thread->set_osthread(osthread);
if (UseNUMA) { int lgrp_id = os::numa_get_group_id(); if (lgrp_id != -1) {
thread->set_lgrp_id(lgrp_id);
}
}
if (os::is_primordial_thread()) { // If current thread is primordial thread, its stack is mapped on demand, // see notes about MAP_GROWSDOWN. Here we try to force kernel to map // the entire stack region to avoid SEGV in stack banging. // It is also useful to get around the heap-stack-gap problem on SuSE // kernel (see 4821821 for details). We first expand stack to the top // of yellow zone, then enable stack yellow zone (order is significant, // enabling yellow zone first will crash JVM on SuSE Linux), so there // is no gap between the last two virtual memory regions.
StackOverflow* overflow_state = thread->stack_overflow_state();
address addr = overflow_state->stack_reserved_zone_base();
assert(addr != NULL, "initialization problem?");
assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled");
// Free Linux resources related to the OSThread void os::free_thread(OSThread* osthread) {
assert(osthread != NULL, "osthread not set");
// We are told to free resources of the argument thread, // but we can only really operate on the current thread.
assert(Thread::current()->osthread() == osthread, "os::free_thread but not current thread");
#ifdef ASSERT
sigset_t current;
sigemptyset(¤t);
pthread_sigmask(SIG_SETMASK, NULL, ¤t);
assert(!sigismember(¤t, PosixSignals::SR_signum), "SR signal should not be blocked!"); #endif
// Check if current thread is the primordial thread, similar to Solaris thr_main. bool os::is_primordial_thread(void) { if (suppress_primordial_thread_resolution) { returnfalse;
} char dummy; // If called before init complete, thread stack bottom will be null. // Can be called if fatal error occurs before initialization. if (os::Linux::initial_thread_stack_bottom() == NULL) returnfalse;
assert(os::Linux::initial_thread_stack_bottom() != NULL &&
os::Linux::initial_thread_stack_size() != 0, "os::init did not locate primordial thread's stack region"); if ((address)&dummy >= os::Linux::initial_thread_stack_bottom() &&
(address)&dummy < os::Linux::initial_thread_stack_bottom() +
os::Linux::initial_thread_stack_size()) { returntrue;
} else { returnfalse;
}
}
// Find the virtual memory area that contains addr staticbool find_vma(address addr, address* vma_low, address* vma_high) {
FILE *fp = os::fopen("/proc/self/maps", "r"); if (fp) {
address low, high; while (!feof(fp)) { if (fscanf(fp, "%p-%p", &low, &high) == 2) { if (low <= addr && addr < high) { if (vma_low) *vma_low = low; if (vma_high) *vma_high = high;
fclose(fp); returntrue;
}
} for (;;) { int ch = fgetc(fp); if (ch == EOF || ch == (int)'\n') break;
}
}
fclose(fp);
} returnfalse;
}
// Locate primordial thread stack. This special handling of primordial thread stack // is needed because pthread_getattr_np() on most (all?) Linux distros returns // bogus value for the primordial process thread. While the launcher has created // the VM in a new thread since JDK 6, we still have to allow for the use of the // JNI invocation API from a primordial thread. void os::Linux::capture_initial_stack(size_t max_size) {
// max_size is either 0 (which means accept OS default for thread stacks) or // a user-specified value known to be at least the minimum needed. If we // are actually on the primordial thread we can make it appear that we have a // smaller max_size stack by inserting the guard pages at that location. But we // cannot do anything to emulate a larger stack than what has been provided by // the OS or threading library. In fact if we try to use a stack greater than // what is set by rlimit then we will crash the hosting process.
// Maximum stack size is the easy part, get it from RLIMIT_STACK. // If this is "unlimited" then it will be a huge value. struct rlimit rlim;
getrlimit(RLIMIT_STACK, &rlim);
size_t stack_size = rlim.rlim_cur;
// 6308388: a bug in ld.so will relocate its own .data section to the // lower end of primordial stack; reduce ulimit -s value a little bit // so we won't install guard page on ld.so's data section. // But ensure we don't underflow the stack size - allow 1 page spare if (stack_size >= (size_t)(3 * os::vm_page_size())) {
stack_size -= 2 * os::vm_page_size();
}
// Try to figure out where the stack base (top) is. This is harder. // // When an application is started, glibc saves the initial stack pointer in // a global variable "__libc_stack_end", which is then used by system // libraries. __libc_stack_end should be pretty close to stack top. The // variable is available since the very early days. However, because it is // a private interface, it could disappear in the future. // // Linux kernel saves start_stack information in /proc/<pid>/stat. Similar // to __libc_stack_end, it is very close to stack top, but isn't the real // stack top. Note that /proc may not exist if VM is running as a chroot // program, so reading /proc/<pid>/stat could fail. Also the contents of // /proc/<pid>/stat could change in the future (though unlikely). // // We try __libc_stack_end first. If that doesn't work, look for // /proc/<pid>/stat. If neither of them works, we use current stack pointer // as a hint, which should work well in most cases.
uintptr_t stack_start;
// try __libc_stack_end first
uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end"); if (p && *p) {
stack_start = *p;
} else { // see if we can get the start_stack field from /proc/self/stat
FILE *fp; int pid; char state; int ppid; int pgrp; int session; int nr; int tpgrp; unsignedlong flags; unsignedlong minflt; unsignedlong cminflt; unsignedlong majflt; unsignedlong cmajflt; unsignedlong utime; unsignedlong stime; long cutime; long cstime; long prio; long nice; long junk; long it_real;
uintptr_t start;
uintptr_t vsize;
intptr_t rss;
uintptr_t rsslim;
uintptr_t scodes;
uintptr_t ecode; int i;
// Figure what the primordial thread stack base is. Code is inspired // by email from Hans Boehm. /proc/self/stat begins with current pid, // followed by command name surrounded by parentheses, state, etc. char stat[2048]; int statlen;
// Skip pid and the command string. Note that we could be dealing with // weird command names, e.g. user could decide to rename java launcher // to "java 1.4.2 :)", then the stat file would look like // 1234 (java 1.4.2 :)) R ... ... // We don't really need to know the command string, just find the last // occurrence of ")" and then start parsing from there. See bug 4726580. char * s = strrchr(stat, ')');
i = 0; if (s) { // Skip blank chars do { s++; } while (s && isspace(*s));
if (i != 28 - 2) {
assert(false, "Bad conversion from /proc/self/stat"); // product mode - assume we are the primordial thread, good luck in the // embedded case.
warning("Can't detect primordial thread stack location - bad conversion");
stack_start = (uintptr_t) &rlim;
}
} else { // For some reason we can't open /proc/self/stat (for example, running on // FreeBSD with a Linux emulator, or inside chroot), this should work for // most cases, so don't abort:
warning("Can't detect primordial thread stack location - no /proc/self/stat");
stack_start = (uintptr_t) &rlim;
}
}
// Now we have a pointer (stack_start) very close to the stack top, the // next thing to do is to figure out the exact location of stack top. We // can find out the virtual memory area that contains stack_start by // reading /proc/self/maps, it should be the last vma in /proc/self/maps, // and its upper limit is the real stack top. (again, this would fail if // running inside chroot, because /proc may not exist.)
uintptr_t stack_top;
address low, high; if (find_vma((address)stack_start, &low, &high)) { // success, "high" is the true stack top. (ignore "low", because initial // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
stack_top = (uintptr_t)high;
} else { // failed, likely because /proc/self/maps does not exist
warning("Can't detect primordial thread stack location - find_vma failed"); // best effort: stack_start is normally within a few pages below the real // stack top, use it as stack top, and reduce stack size so we won't put // guard page outside stack.
stack_top = stack_start;
stack_size -= 16 * os::vm_page_size();
}
// stack_top could be partially down the page so align it
stack_top = align_up(stack_top, os::vm_page_size());
// Allowed stack value is minimum of max_size and what we derived from rlimit if (max_size > 0) {
_initial_thread_stack_size = MIN2(max_size, stack_size);
} else { // Accept the rlimit max, but if stack is unlimited then it will be huge, so // clamp it at 8MB as we do on Solaris
_initial_thread_stack_size = MIN2(stack_size, 8*M);
}
_initial_thread_stack_size = align_down(_initial_thread_stack_size, os::vm_page_size());
_initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
if (log_is_enabled(Info, os, thread)) { // See if we seem to be on primordial process thread bool primordial = uintptr_t(&rlim) > uintptr_t(_initial_thread_stack_bottom) &&
uintptr_t(&rlim) < stack_top;
// Switch to using fast clocks for thread cpu time if // the clock_getres() returns 0 error code. // Note, that some kernels may support the current thread // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks // returned by the pthread_getcpuclockid(). // If the fast Posix clocks are supported then the clock_getres() // must return at least tp.tv_sec == 0 which means a resolution // better than 1 sec. This is extra check for reliability.
// thread_id is kernel thread id (similar to Solaris LWP id)
intx os::current_thread_id() { return os::Linux::gettid(); } int os::current_process_id() { return ::getpid();
}
// DLL functions
// This must be hard coded because it's the system's temporary // directory not the java application's temp directory, ala java.io.tmpdir. constchar* os::get_temp_directory() { return"/tmp"; }
// check if addr is inside libjvm.so bool os::address_is_in_vm(address addr) { static address libjvm_base_addr;
Dl_info dlinfo;
if (libjvm_base_addr == NULL) { if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
libjvm_base_addr = (address)dlinfo.dli_fbase;
}
assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
}
if (dladdr((void *)addr, &dlinfo) != 0) { if (libjvm_base_addr == (address)dlinfo.dli_fbase) returntrue;
}
returnfalse;
}
bool os::dll_address_to_function_name(address addr, char *buf, int buflen, int *offset, bool demangle) { // buf is not optional, but offset is optional
assert(buf != NULL, "sanity check");
Dl_info dlinfo;
if (dladdr((void*)addr, &dlinfo) != 0) { // see if we have a matching symbol if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
} if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; returntrue;
} // no matching symbol so try for just file info if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
buf, buflen, offset, dlinfo.dli_fname, demangle)) { returntrue;
}
}
}
bool os::dll_address_to_library_name(address addr, char* buf, int buflen, int* offset) { // buf is not optional, but offset is optional
assert(buf != nullptr, "sanity check");
// Remember the stack's state. The Linux dynamic linker will change // the stack to 'executable' at most once, so we must safepoint only once. bool os::Linux::_stack_is_executable = false;
// VM operation that loads a library. This is necessary if stack protection // of the Java stacks can be lost during loading the library. If we // do not stop the Java threads, they can stack overflow before the stacks // are protected again. class VM_LinuxDllLoad: public VM_Operation { private: constchar *_filename; char *_ebuf; int _ebuflen; void *_lib; public:
VM_LinuxDllLoad(constchar *fn, char *ebuf, int ebuflen) :
_filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {}
VMOp_Type type() const { return VMOp_LinuxDllLoad; } void doit() {
_lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen);
os::Linux::_stack_is_executable = true;
} void* loaded_library() { return _lib; }
};
void * os::dll_load(constchar *filename, char *ebuf, int ebuflen) { void * result = NULL; bool load_attempted = false;
log_info(os)("attempting shared library load of %s", filename);
// Check whether the library to load might change execution rights // of the stack. If they are changed, the protection of the stack // guard pages will be lost. We need a safepoint to fix this. // // See Linux man page execstack(8) for more info. if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) { if (!ElfFile::specifies_noexecstack(filename)) { if (!is_init_completed()) {
os::Linux::_stack_is_executable = true; // This is OK - No Java threads have been created yet, and hence no // stack guard pages to fix. // // Dynamic loader will make all stacks executable after // this function returns, and will not do that again.
assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");
} else {
warning("You have loaded library %s which might have disabled stack guard. " "The VM will try to fix the stack guard now.\n" "It's highly recommended that you fix the library with " "'execstack -c ', or link it with '-z noexecstack'.",
filename);
JavaThread *jt = JavaThread::current(); if (jt->thread_state() != _thread_in_native) { // This happens when a compiler thread tries to load a hsdis-<arch>.so file // that requires ExecStack. Cannot enter safe point. Let's give up.
warning("Unable to fix stack guard. Giving up.");
} else { if (!LoadExecStackDllInVMThread) { // This is for the case where the DLL has an static // constructor function that executes JNI code. We cannot // load such DLLs in the VMThread.
result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
}
if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) { // handle invalid/out of range endianness values if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) { return NULL;
}
#ifdefined(VM_LITTLE_ENDIAN) // VM is LE, shared object BE
elf_head.e_machine = be16toh(elf_head.e_machine); #else // VM is BE, shared object LE
elf_head.e_machine = le16toh(elf_head.e_machine); #endif
}
typedefstruct {
Elf32_Half code; // Actual value as defined in elf.h
Elf32_Half compat_class; // Compatibility of archs at VM's sense unsignedchar elf_class; // 32 or 64 bit unsignedchar endianness; // MSB or LSB char* name; // String representation
} arch_t;
// Identify compatibility class for VM's architecture and library's architecture // Obtain string descriptions for architectures
arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; int running_arch_index=-1;
for (unsignedint i=0; i < ARRAY_SIZE(arch_array); i++) { if (running_arch_code == arch_array[i].code) {
running_arch_index = i;
} if (lib_arch.code == arch_array[i].code) {
lib_arch.compat_class = arch_array[i].compat_class;
lib_arch.name = arch_array[i].name;
}
}
assert(running_arch_index != -1, "Didn't find running architecture code (running_arch_code) in arch_array"); if (running_arch_index == -1) { // Even though running architecture detection failed // we may still continue with reporting dlerror() message return NULL;
}
if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { if (lib_arch.name != NULL) {
::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: can't load %s .so on a %s platform)",
lib_arch.name, arch_array[running_arch_index].name);
} else {
::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",
lib_arch.code, arch_array[running_arch_index].name);
} return NULL;
}
if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",
(int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32); return NULL;
}
return NULL;
}
void * os::Linux::dlopen_helper(constchar *filename, char *ebuf, int ebuflen) { void * result = ::dlopen(filename, RTLD_LAZY); if (result == NULL) { constchar* error_report = ::dlerror(); if (error_report == NULL) {
error_report = "dlerror returned no error description";
} if (ebuf != NULL && ebuflen > 0) {
::strncpy(ebuf, error_report, ebuflen-1);
ebuf[ebuflen-1]='\0';
}
Events::log_dll_message(NULL, "Loading shared library %s failed, %s", filename, error_report);
log_info(os)("shared library load of %s failed, %s", filename, error_report);
} else {
Events::log_dll_message(NULL, "Loaded shared library %s", filename);
log_info(os)("shared library load of %s was successful", filename);
} return result;
}
void * os::Linux::dll_load_in_vmthread(constchar *filename, char *ebuf, int ebuflen) { void * result = NULL; if (LoadExecStackDllInVMThread) {
result = dlopen_helper(filename, ebuf, ebuflen);
}
// Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a // library that requires an executable stack, or which does not have this // stack attribute set, dlopen changes the stack attribute to executable. The // read protection of the guard pages gets lost. // // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad // may have been queued at the same time.
if (!_stack_is_executable) { for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
StackOverflow* overflow_state = jt->stack_overflow_state(); if (!overflow_state->stack_guard_zone_unused() && // Stack not yet fully initialized
overflow_state->stack_guards_enabled()) { // No pending stack overflow exceptions if (!os::guard_memory((char *)jt->stack_end(), StackOverflow::stack_guard_zone_size())) {
warning("Attempt to reguard stack yellow zone failed.");
}
}
}
}
return result;
}
constchar* os::Linux::dll_path(void* lib) { struct link_map *lmap; constchar* l_path = NULL;
assert(lib != NULL, "dll_path parameter must not be NULL");
int res_dli = ::dlinfo(lib, RTLD_DI_LINKMAP, &lmap); if (res_dli == 0) {
l_path = lmap->l_name;
} return l_path;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.