/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved. * Copyright (c) 2008-2021 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice.
*/
#if ((defined(LINUX_STACKBOTTOM) || defined(NEED_PROC_MAPS) \
|| defined(PROC_VDB) || defined(SOFT_VDB)) && !defined(PROC_READ)) \
|| defined(CPPCHECK) # define PROC_READ read /* Should probably call the real read, if read is wrapped. */ #endif
#ifdefined(LINUX_STACKBOTTOM) || defined(NEED_PROC_MAPS) /* Repeatedly perform a read call until the buffer is filled */ /* up, or we encounter EOF or an error. */ STATIC ssize_t GC_repeat_read(int fd, char *buf, size_t count)
{
size_t num_read = 0;
ASSERT_CANCEL_DISABLED(); while (num_read < count) {
ssize_t result = PROC_READ(fd, buf + num_read, count - num_read);
#ifdef NEED_PROC_MAPS /* We need to parse /proc/self/maps, either to find dynamic libraries, */ /* and/or to find the register backing store base (IA64). Do it once */ /* here. */
#ifdef THREADS /* Determine the length of a file by incrementally reading it into a */ /* buffer. This would be silly to use it on a file supporting lseek, */ /* but Linux /proc files usually do not. */ /* As of Linux 4.15.0, lseek(SEEK_END) fails for /proc/self/maps. */ STATIC size_t GC_get_file_len(int f)
{
size_t total = 0;
ssize_t result; # define GET_FILE_LEN_BUF_SZ 500 char buf[GET_FILE_LEN_BUF_SZ];
do {
result = PROC_READ(f, buf, sizeof(buf)); if (result == -1) return 0;
total += result;
} while (result > 0); return total;
}
STATIC size_t GC_get_maps_len(void)
{ int f = open("/proc/self/maps", O_RDONLY);
size_t result; if (f < 0) return 0; /* treat missing file as empty */
result = GC_get_file_len(f);
close(f); return result;
} #endif/* THREADS */
/* Copy the contents of /proc/self/maps to a buffer in our address */ /* space. Return the address of the buffer. */
GC_INNER constchar * GC_get_maps(void)
{
ssize_t result; staticchar *maps_buf = NULL; static size_t maps_buf_sz = 1;
size_t maps_size; # ifdef THREADS
size_t old_maps_size = 0; # endif
/* The buffer is essentially static, so there must be a single client. */
GC_ASSERT(I_HOLD_LOCK());
/* Note that in the presence of threads, the maps file can */ /* essentially shrink asynchronously and unexpectedly as */ /* threads that we already think of as dead release their */ /* stacks. And there is no easy way to read the entire */ /* file atomically. This is arguably a misfeature of the */ /* /proc/self/maps interface. */ /* Since we expect the file can grow asynchronously in rare */ /* cases, it should suffice to first determine */ /* the size (using read), and then to reread the file. */ /* If the size is inconsistent we have to retry. */ /* This only matters with threads enabled, and if we use */ /* this to locate roots (not the default). */
# ifdef THREADS /* Determine the initial size of /proc/self/maps. */
maps_size = GC_get_maps_len(); if (0 == maps_size)
ABORT("Cannot determine length of /proc/self/maps"); # else
maps_size = 4000; /* Guess */ # endif
/* Read /proc/self/maps, growing maps_buf as necessary. */ /* Note that we may not allocate conventionally, and */ /* thus can't use stdio. */ do { int f;
while (maps_size >= maps_buf_sz) { # ifdef LINT2 /* Workaround passing tainted maps_buf to a tainted sink. */
GC_noop1((word)maps_buf); # else
GC_scratch_recycle_no_gww(maps_buf, maps_buf_sz); # endif /* Grow only by powers of 2, since we leak "too small" buffers.*/ while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
maps_buf = GC_scratch_alloc(maps_buf_sz); if (NULL == maps_buf)
ABORT_ARG1("Insufficient space for /proc/self/maps buffer", ", %lu bytes requested", (unsignedlong)maps_buf_sz); # ifdef THREADS /* Recompute initial length, since we allocated. */ /* This can only happen a few times per program */ /* execution. */
maps_size = GC_get_maps_len(); if (0 == maps_size)
ABORT("Cannot determine length of /proc/self/maps"); # endif
}
GC_ASSERT(maps_buf_sz >= maps_size + 1);
f = open("/proc/self/maps", O_RDONLY); if (-1 == f)
ABORT_ARG1("Cannot open /proc/self/maps", ": errno= %d", errno); # ifdef THREADS
old_maps_size = maps_size; # endif
maps_size = 0; do {
result = GC_repeat_read(f, maps_buf, maps_buf_sz-1); if (result < 0) {
ABORT_ARG1("Failed to read /proc/self/maps", ": errno= %d", errno);
}
maps_size += result;
} while ((size_t)result == maps_buf_sz-1);
close(f); if (0 == maps_size)
ABORT("Empty /proc/self/maps"); # ifdef THREADS if (maps_size > old_maps_size) { /* This might be caused by e.g. thread creation. */
WARN("Unexpected asynchronous /proc/self/maps growth" " (to %" WARN_PRIdPTR " bytes)\n", maps_size);
} # endif
} while (maps_size >= maps_buf_sz # ifdef THREADS
|| maps_size < old_maps_size # endif
);
maps_buf[maps_size] = '\0'; return maps_buf;
}
/* * GC_parse_map_entry parses an entry from /proc/self/maps so we can * locate all writable data segments that belong to shared libraries. * The format of one of these entries and the fields we care about * is as follows: * XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n * ^^^^^^^^ ^^^^^^^^ ^^^^ ^^ * start end prot maj_dev * * Note that since about august 2003 kernels, the columns no longer have * fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets * anywhere, which is safer anyway.
*/
/* Assign various fields of the first line in maps_ptr to (*start), */ /* (*end), (*prot), (*maj_dev) and (*mapping_name). mapping_name may */ /* be NULL. (*prot) and (*mapping_name) are assigned pointers into the */ /* original buffer. */ #if (defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES)) \
|| defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) \
|| (defined(REDIRECT_MALLOC) && defined(GC_LINUX_THREADS))
GC_INNER constchar *GC_parse_map_entry(constchar *maps_ptr,
ptr_t *start, ptr_t *end, constchar **prot, unsigned *maj_dev, constchar **mapping_name)
{ constunsignedchar *start_start, *end_start, *maj_dev_start; constunsignedchar *p; /* unsigned for isspace, isxdigit */
while (isspace(*p)) ++p;
GC_ASSERT(*p == 'r' || *p == '-');
*prot = (constchar *)p; /* Skip past protection field to offset field */ while (!isspace(*p)) ++p; while (isspace(*p)) p++;
GC_ASSERT(isxdigit(*p)); /* Skip past offset field, which we ignore */ while (!isspace(*p)) ++p; while (isspace(*p)) p++;
maj_dev_start = p;
GC_ASSERT(isxdigit(*maj_dev_start));
*maj_dev = strtoul((constchar *)maj_dev_start, NULL, 16);
#ifdefined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) /* Try to read the backing store base from /proc/self/maps. */ /* Return the bounds of the writable mapping with a 0 major device, */ /* which includes the address passed as data. */ /* Return FALSE if there is no such mapping. */
GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr, ptr_t *startp,
ptr_t *endp)
{ constchar *prot;
ptr_t my_start, my_end; unsignedint maj_dev; constchar *maps_ptr = GC_get_maps();
for (;;) {
maps_ptr = GC_parse_map_entry(maps_ptr, &my_start, &my_end,
&prot, &maj_dev, 0); if (NULL == maps_ptr) break;
/* Set p to point just past last slash, if any. */ while (*p != '\0' && *p != '\n' && *p != ' ' && *p != '\t') ++p; while (*p != '/' && (word)p >= (word)map_path) --p;
++p; if (strncmp(nm, p, nm_len) == 0) {
*startp = my_start;
*endp = my_end; returnTRUE;
}
}
} returnFALSE;
} #endif/* REDIRECT_MALLOC */
#ifdef IA64 static ptr_t backing_store_base_from_proc(void)
{
ptr_t my_start, my_end; if (!GC_enclosing_mapping(GC_save_regs_in_stack(), &my_start, &my_end)) {
GC_COND_LOG_PRINTF("Failed to find backing store base from /proc\n"); return 0;
} return my_start;
} #endif
#endif/* NEED_PROC_MAPS */
#ifdefined(SEARCH_FOR_DATA_START) /* The I386 case can be handled without a search. The Alpha case */ /* used to be handled differently as well, but the rules changed */ /* for recent Linux versions. This seems to be the easiest way to */ /* cover all versions. */
# ifdefined(LINUX) || defined(HURD) /* Some Linux distributions arrange to define __data_start. Some */ /* define data_start as a weak symbol. The latter is technically */ /* broken, since the user program may define data_start, in which */ /* case we lose. Nonetheless, we try both, preferring __data_start.*/ /* We assume gcc-compatible pragmas. */
EXTERN_C_BEGIN # pragma weak __data_start # pragma weak data_start externint __data_start[], data_start[];
EXTERN_C_END # endif /* LINUX */
# if (defined(LINUX) || defined(HURD)) && defined(USE_PROG_DATA_START) /* Try the easy approaches first: */ /* However, this may lead to wrong data start value if libgc */ /* code is put into a shared library (directly or indirectly) */ /* which is linked with -Bsymbolic-functions option. Thus, */ /* the following is not used by default. */ if (COVERT_DATAFLOW(__data_start) != 0) {
GC_data_start = (ptr_t)(__data_start);
} else {
GC_data_start = (ptr_t)(data_start);
} if (COVERT_DATAFLOW(GC_data_start) != 0) { if ((word)GC_data_start > (word)data_end)
ABORT_ARG2("Wrong __data_start/_end pair", ": %p .. %p", (void *)GC_data_start, (void *)data_end); return;
} # ifdef DEBUG_ADD_DEL_ROOTS
GC_log_printf("__data_start not provided\n"); # endif # endif /* LINUX */
if (GC_no_dls) { /* Not needed, avoids the SIGSEGV caused by */ /* GC_find_limit which complicates debugging. */
GC_data_start = data_end; /* set data root size to 0 */ return;
}
/* TODO: This is a simple way of allocating memory which is */ /* compatible with ECOS early releases. Later releases use a more */ /* sophisticated means of allocating memory than this simple static */ /* allocator, but this method is at least bound to work. */ staticchar ecos_gc_memory[ECOS_GC_MEMORY_SIZE]; staticchar *ecos_gc_brk = ecos_gc_memory;
GC_INNER void GC_init_netbsd_elf(void)
{ /* This may need to be environ, without the underscore, for */ /* some versions. */
GC_data_start = (ptr_t)GC_find_limit(&environ, FALSE);
} #endif/* NETBSD */
#ifdefined(ADDRESS_SANITIZER) && (defined(UNIX_LIKE) \
|| defined(NEED_FIND_LIMIT) || defined(MPROTECT_VDB)) \
&& !defined(CUSTOM_ASAN_DEF_OPTIONS) /* To tell ASan to allow GC to use its own SIGBUS/SEGV handlers. */ /* The function is exported just to be visible to ASan library. */
GC_API constchar *__asan_default_options(void)
{ return"allow_user_segv_handler=1";
} #endif
/* Don't use GC_find_limit() because siglongjmp() outside of the */ /* signal handler by-passes our userland pthreads lib, leaving */ /* SIGSEGV and SIGPROF masked. Instead, use this custom one that */ /* works-around the issues. */
/* Return the first non-addressable location > p or bound. */ /* Requires the allocation lock. */ STATIC ptr_t GC_find_limit_openbsd(ptr_t p, ptr_t bound)
{ staticvolatile ptr_t result; /* Safer if static, since otherwise it may not be */ /* preserved across the longjmp. Can safely be */ /* static since it's only called with the */ /* allocation lock held. */
struct sigaction act;
word pgsz = (word)sysconf(_SC_PAGESIZE);
act.sa_handler = GC_fault_handler_openbsd;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_NODEFER | SA_RESTART; /* act.sa_restorer is deprecated and should not be initialized. */
sigaction(SIGSEGV, &act, &old_segv_act);
if (SETJMP(GC_jmp_buf_openbsd) == 0) {
result = (ptr_t)((word)p & ~(pgsz-1)); for (;;) { if ((word)result >= (word)bound - pgsz) {
result = bound; break;
}
result += pgsz; /* no overflow expected */
GC_noop1((word)(*result));
}
}
# ifdef THREADS /* Due to the siglongjump we need to manually unmask SIGPROF. */
__syscall(SYS_sigprocmask, SIG_UNBLOCK, sigmask(SIGPROF)); # endif
act.sa_handler = GC_fault_handler_openbsd;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_NODEFER | SA_RESTART; /* act.sa_restorer is deprecated and should not be initialized. */
sigaction(SIGSEGV, &act, &old_segv_act);
firstpass = 1;
result = (ptr_t)((word)p & ~(pgsz-1)); if (SETJMP(GC_jmp_buf_openbsd) != 0 || firstpass) {
firstpass = 0; if ((word)result >= (word)bound - pgsz) {
result = bound;
} else {
result += pgsz; /* no overflow expected */
GC_noop1((word)(*result));
}
}
/* A kludge to get around what appears to be a header file bug */ # ifndef WORD # define WORD unsignedshort # endif # ifndef DWORD # define DWORD unsignedlong # endif
# define EXE386 1 # include <newexe.h> # include <exe386.h>
GC_INNER void GC_setpagesize(void)
{
GetSystemInfo(&GC_sysinfo); # ifdefined(CYGWIN32) && (defined(MPROTECT_VDB) || defined(USE_MUNMAP)) /* Allocations made with mmap() are aligned to the allocation */ /* granularity, which (at least on 64-bit Windows OS) is not the */ /* same as the page size. Probably a separate variable could */ /* be added to distinguish the allocation granularity from the */ /* actual page size, but in practice there is no good reason to */ /* make allocations smaller than dwAllocationGranularity, so we */ /* just use it instead of the actual page size here (as Cygwin */ /* itself does in many cases). */
GC_page_size = (size_t)GC_sysinfo.dwAllocationGranularity;
GC_ASSERT(GC_page_size >= (size_t)GC_sysinfo.dwPageSize); # else
GC_page_size = (size_t)GC_sysinfo.dwPageSize; # endif # ifdefined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
{
OSVERSIONINFO verInfo; /* Check the current WinCE version. */
verInfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); if (!GetVersionEx(&verInfo))
ABORT("GetVersionEx failed"); if (verInfo.dwPlatformId == VER_PLATFORM_WIN32_CE &&
verInfo.dwMajorVersion < 6) { /* Only the first 32 MB of address space belongs to the */ /* current process (unless WinCE 6.0+ or emulation). */
GC_sysinfo.lpMaximumApplicationAddress = (LPVOID)((word)32 << 20); # ifdef THREADS /* On some old WinCE versions, it's observed that */ /* VirtualQuery calls don't work properly when used to */ /* get thread current stack committed minimum. */ if (verInfo.dwMajorVersion < 5)
GC_dont_query_stack_min = TRUE; # endif
}
} # endif
}
# ifndef CYGWIN32 # define is_writable(prot) ((prot) == PAGE_READWRITE \
|| (prot) == PAGE_WRITECOPY \
|| (prot) == PAGE_EXECUTE_READWRITE \
|| (prot) == PAGE_EXECUTE_WRITECOPY) /* Return the number of bytes that are writable starting at p. */ /* The pointer p is assumed to be page aligned. */ /* If base is not 0, *base becomes the beginning of the */ /* allocation region containing p. */ STATIC word GC_get_writable_length(ptr_t p, ptr_t *base)
{
MEMORY_BASIC_INFORMATION buf;
word result;
word protect;
result = VirtualQuery(p, &buf, sizeof(buf)); if (result != sizeof(buf)) ABORT("Weird VirtualQuery result"); if (base != 0) *base = (ptr_t)(buf.AllocationBase);
protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE)); if (!is_writable(protect)) { return(0);
} if (buf.State != MEM_COMMIT) return(0); return(buf.RegionSize);
}
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
{
ptr_t trunc_sp;
word size;
/* Set page size if it is not ready (so client can use this */ /* function even before GC is initialized). */ if (!GC_page_size) GC_setpagesize();
trunc_sp = (ptr_t)((word)GC_approx_sp() & ~(GC_page_size - 1)); /* FIXME: This won't work if called from a deeply recursive */ /* client code (and the committed stack space has grown). */
size = GC_get_writable_length(trunc_sp, 0);
GC_ASSERT(size != 0);
sb -> mem_base = trunc_sp + size; return GC_SUCCESS;
} # else/* CYGWIN32 */ /* An alternate version for Cygwin (adapted from Dave Korn's */ /* gcc version of boehm-gc). */
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
{ # ifdef X86_64
sb -> mem_base = ((NT_TIB*)NtCurrentTeb())->StackBase; # else void * _tlsbase;
STATICvoid GC_fault_handler(int sig GC_ATTR_UNUSED)
{
LONGJMP(GC_jmp_buf, 1);
}
GC_INNER void GC_setup_temporary_fault_handler(void)
{ /* Handler is process-wide, so this should only happen in */ /* one thread at a time. */
GC_ASSERT(I_HOLD_LOCK());
GC_set_and_save_fault_handler(GC_fault_handler);
}
/* Return the first non-addressable location > p (up) or */ /* the smallest location q s.t. [q,p) is addressable (!up). */ /* We assume that p (up) or p-1 (!up) is addressable. */ /* Requires allocation lock. */
GC_ATTR_NO_SANITIZE_ADDR STATIC ptr_t GC_find_limit_with_bound(ptr_t p, GC_bool up, ptr_t bound)
{ staticvolatile ptr_t result; /* Safer if static, since otherwise it may not be */ /* preserved across the longjmp. Can safely be */ /* static since it's only called with the */ /* allocation lock held. */
GC_ASSERT(up ? (word)bound >= MIN_PAGE_SIZE
: (word)bound <= ~(word)MIN_PAGE_SIZE);
GC_ASSERT(I_HOLD_LOCK());
GC_setup_temporary_fault_handler(); if (SETJMP(GC_jmp_buf) == 0) {
result = (ptr_t)(((word)(p))
& ~(MIN_PAGE_SIZE-1)); for (;;) { if (up) { if ((word)result >= (word)bound - MIN_PAGE_SIZE) {
result = bound; break;
}
result += MIN_PAGE_SIZE; /* no overflow expected */
} else { if ((word)result <= (word)bound + MIN_PAGE_SIZE) {
result = bound - MIN_PAGE_SIZE; /* This is to compensate */ /* further result increment (we */ /* do not modify "up" variable */ /* since it might be clobbered */ /* by setjmp otherwise). */ break;
}
result -= MIN_PAGE_SIZE; /* no underflow expected */
}
GC_noop1((word)(*result));
}
}
GC_reset_fault_handler(); if (!up) {
result += MIN_PAGE_SIZE;
} return(result);
}
int i = 0; while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) { if (vm_status.pst_type == PS_RSESTACK) { return (ptr_t) vm_status.pst_vaddr;
}
}
/* old way to get the register stackbottom */ return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
& ~(BACKING_STORE_ALIGNMENT - 1));
}
#endif/* HPUX_STACK_BOTTOM */
#ifdef LINUX_STACKBOTTOM
# include <sys/types.h> # include <sys/stat.h>
# define STAT_SKIP 27 /* Number of fields preceding startstack */ /* field in /proc/self/stat */
# ifdef USE_LIBC_PRIVATES if (0 != &__libc_ia64_register_backing_store_base
&& 0 != __libc_ia64_register_backing_store_base) { /* Glibc 2.2.4 has a bug such that for dynamically linked */ /* executables __libc_ia64_register_backing_store_base is */ /* defined but uninitialized during constructor calls. */ /* Hence we check for both nonzero address and value. */ return __libc_ia64_register_backing_store_base;
} # endif
result = backing_store_base_from_proc(); if (0 == result) {
result = (ptr_t)GC_find_limit(GC_save_regs_in_stack(), FALSE); /* This works better than a constant displacement heuristic. */
} return result;
} # endif /* IA64 */
STATIC ptr_t GC_linux_main_stack_base(void)
{ /* We read the stack bottom value from /proc/self/stat. We do this */ /* using direct I/O system calls in order to avoid calling malloc */ /* in case REDIRECT_MALLOC is defined. */ # define STAT_BUF_SIZE 4096 char stat_buf[STAT_BUF_SIZE]; int f;
word result;
ssize_t i, buf_offset = 0, len;
/* First try the easy way. This should work for glibc 2.2 */ /* This fails in a prelinked ("prelink" command) executable */ /* since the correct value of __libc_stack_end never */ /* becomes visible to us. The second test works around */ /* this. */ # ifdef USE_LIBC_PRIVATES if (0 != &__libc_stack_end && 0 != __libc_stack_end ) { # ifdefined(IA64) /* Some versions of glibc set the address 16 bytes too */ /* low while the initialization code is running. */ if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) { return __libc_stack_end + 0x10;
} /* Otherwise it's not safe to add 16 bytes and we fall */ /* back to using /proc. */ # elif defined(SPARC) /* Older versions of glibc for 64-bit SPARC do not set this */ /* variable correctly, it gets set to either zero or one. */ if (__libc_stack_end != (ptr_t) (unsignedlong)0x1) return __libc_stack_end; # else return __libc_stack_end; # endif
} # endif
f = open("/proc/self/stat", O_RDONLY); if (-1 == f)
ABORT_ARG1("Could not open /proc/self/stat", ": errno= %d", errno);
len = GC_repeat_read(f, stat_buf, sizeof(stat_buf)); if (len < 0)
ABORT_ARG1("Failed to read /proc/self/stat", ": errno= %d", errno);
close(f);
/* Skip the required number of fields. This number is hopefully */ /* constant across all Linux implementations. */ for (i = 0; i < STAT_SKIP; ++i) { while (buf_offset < len && isspace(stat_buf[buf_offset++])) { /* empty */
} while (buf_offset < len && !isspace(stat_buf[buf_offset++])) { /* empty */
}
} /* Skip spaces. */ while (buf_offset < len && isspace(stat_buf[buf_offset])) {
buf_offset++;
} /* Find the end of the number and cut the buffer there. */ for (i = 0; buf_offset + i < len; i++) { if (!isdigit(stat_buf[buf_offset + i])) break;
} if (buf_offset + i >= len) ABORT("Could not parse /proc/self/stat");
stat_buf[buf_offset + i] = '\0';
# if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
&& (defined(THREADS) || defined(USE_GET_STACKBASE_FOR_MAIN)) # include <pthread.h> # ifdef HAVE_PTHREAD_NP_H # include <pthread_np.h> /* for pthread_attr_get_np() */ # endif # elif defined(DARWIN) && !defined(NO_PTHREAD_GET_STACKADDR_NP) /* We could use pthread_get_stackaddr_np even in case of a */ /* single-threaded gclib (there is no -lpthread on Darwin). */ # include <pthread.h> # undef STACKBOTTOM # define STACKBOTTOM (ptr_t)pthread_get_stackaddr_np(pthread_self()) # endif
# include <thread.h> # include <signal.h> # include <pthread.h>
/* These variables are used to cache ss_sp value for the primordial */ /* thread (it's better not to call thr_stksegment() twice for this */ /* thread - see JDK bug #4352906). */ static pthread_t stackbase_main_self = 0; /* 0 means stackbase_main_ss_sp value is unset. */ staticvoid *stackbase_main_ss_sp = NULL;
if (self == stackbase_main_self)
{ /* If the client calls GC_get_stack_base() from the main thread */ /* then just return the cached value. */
b -> mem_base = stackbase_main_ss_sp;
GC_ASSERT(b -> mem_base != NULL); return GC_SUCCESS;
}
if (thr_stksegment(&s)) { /* According to the manual, the only failure error code returned */ /* is EAGAIN meaning "the information is not available due to the */ /* thread is not yet completely initialized or it is an internal */ /* thread" - this shouldn't happen here. */
ABORT("thr_stksegment failed");
} /* s.ss_sp holds the pointer to the stack bottom. */
GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)s.ss_sp);
if (!stackbase_main_self && thr_main() != 0)
{ /* Cache the stack bottom pointer for the primordial thread */ /* (this is done during GC_init, so there is no race). */
stackbase_main_ss_sp = s.ss_sp;
stackbase_main_self = self;
}
#ifndef HAVE_GET_STACK_BASE # ifdef NEED_FIND_LIMIT /* Retrieve the stack bottom. */ /* Using the GC_find_limit version is risky. */ /* On IA64, for example, there is no guard page between the */ /* stack of one thread and the register backing store of the */ /* next. Thus this is likely to identify way too large a */ /* "stack" and thus at least result in disastrous performance. */ /* TODO: Implement better strategies here. */
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
{
IF_CANCEL(int cancel_state;)
DCL_LOCK_STATE;
#ifndef GET_MAIN_STACKBASE_SPECIAL /* This is always called from the main thread. Default implementation. */
ptr_t GC_get_main_stack_base(void)
{ struct GC_stack_base sb;
/* Register static data segment(s) as roots. If more data segments are */ /* added later then they need to be registered at that point (as we do */ /* with SunOS dynamic loading), or GC_mark_roots needs to check for */ /* them (as we do with PCR). Called with allocator lock held. */ # ifdef OS2
void GC_register_data_segments(void)
{
PTIB ptib;
PPIB ppib;
HMODULE module_handle; # define PBUFSIZ 512
UCHAR path[PBUFSIZ];
FILE * myexefile; struct exe_hdr hdrdos; /* MSDOS header. */ struct e32_exe hdr386; /* Real header for my executable */ struct o32_obj seg; /* Current segment */ int nsegs;
# ifdefined(CPPCHECK)
hdrdos.padding[0] = 0; /* to prevent "field unused" warnings */
hdr386.exe_format_level = 0;
hdr386.os = 0;
hdr386.padding1[0] = 0;
hdr386.padding2[0] = 0;
seg.pagemap = 0;
seg.mapsize = 0;
seg.reserved = 0; # endif if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
ABORT("DosGetInfoBlocks failed");
}
module_handle = ppib -> pib_hmte; if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
ABORT("DosQueryModuleName failed");
}
myexefile = fopen(path, "rb"); if (myexefile == 0) {
ABORT_ARG1("Failed to open executable", ": %s", path);
} if (fread((char *)(&hdrdos), 1, sizeof(hdrdos), myexefile)
< sizeof(hdrdos)) {
ABORT_ARG1("Could not read MSDOS header", " from: %s", path);
} if (E_MAGIC(hdrdos) != EMAGIC) {
ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
} if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
} if (fread((char *)(&hdr386), 1, sizeof(hdr386), myexefile)
< sizeof(hdr386)) {
ABORT_ARG1("Could not read OS/2 header", " from: %s", path);
} if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
ABORT_ARG1("Bad OS/2 magic number", " in file: %s", path);
} if (E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
ABORT_ARG1("Bad byte order in executable", " file: %s", path);
} if (E32_CPU(hdr386) == E32CPU286) {
ABORT_ARG1("GC cannot handle 80286 executables", ": %s", path);
} if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
SEEK_SET) != 0) {
ABORT_ARG1("Seek to object table failed", " in file: %s", path);
} for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) { int flags; if (fread((char *)(&seg), 1, sizeof(seg), myexefile) < sizeof(seg)) {
ABORT_ARG1("Could not read obj table entry", " from file: %s", path);
}
flags = O32_FLAGS(seg); if (!(flags & OBJWRITE)) continue; if (!(flags & OBJREAD)) continue; if (flags & OBJINVALID) {
GC_err_printf("Object with invalid pages?\n"); continue;
}
GC_add_roots_inner((ptr_t)O32_BASE(seg),
(ptr_t)(O32_BASE(seg)+O32_SIZE(seg)), FALSE);
}
(void)fclose(myexefile);
}
/* Since we can't easily check whether ULONG_PTR and SIZE_T are */ /* defined in Win32 basetsd.h, we define own ULONG_PTR. */ # define GC_ULONG_PTR word
# ifdef MSWIN32 /* Unfortunately, we have to handle win32s very differently from NT, */ /* Since VirtualQuery has very different semantics. In particular, */ /* under win32s a VirtualQuery call on an unmapped page returns an */ /* invalid result. Under NT, GC_register_data_segments is a no-op */ /* and all real work is done by GC_register_dynamic_libraries. Under */ /* win32s, we cannot find the data segments associated with dll's. */ /* We register the main data segment here. */
GC_INNER GC_bool GC_no_win32_dlls = FALSE; /* This used to be set for gcc, to avoid dealing with */ /* the structured exception handling issues. But we now have */ /* assembly code to do that right. */
GC_INNER GC_bool GC_wnt = FALSE; /* This is a Windows NT derivative, i.e. NT, Win2K, XP or later. */
GC_INNER void GC_init_win32(void)
{ # ifdefined(_WIN64) || (defined(_MSC_VER) && _MSC_VER >= 1800) /* MS Visual Studio 2013 deprecates GetVersion, but on the other */ /* hand it cannot be used to target pre-Win2K. */
GC_wnt = TRUE; # else /* Set GC_wnt. If we're running under win32s, assume that no */ /* DLLs will be loaded. I doubt anyone still runs win32s, but... */
DWORD v = GetVersion();
GC_wnt = !(v & 0x80000000);
GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3); # endif # ifdef USE_MUNMAP if (GC_no_win32_dlls) { /* Turn off unmapping for safety (since may not work well with */ /* GlobalAlloc). */
GC_unmap_threshold = 0;
} # endif
}
/* Return the smallest address a such that VirtualQuery */ /* returns correct results for all addresses between a and start. */ /* Assumes VirtualQuery returns correct information for start. */ STATIC ptr_t GC_least_described_address(ptr_t start)
{
MEMORY_BASIC_INFORMATION buf;
LPVOID limit = GC_sysinfo.lpMinimumApplicationAddress;
ptr_t p = (ptr_t)((word)start & ~(GC_page_size - 1));
if ((word)q > (word)p /* underflow */ || (word)q < (word)limit) break;
result = VirtualQuery(q, &buf, sizeof(buf)); if (result != sizeof(buf) || buf.AllocationBase == 0) break;
p = (ptr_t)(buf.AllocationBase);
} return p;
} # endif /* MSWIN32 */
# ifdefined(USE_WINALLOC) && !defined(REDIRECT_MALLOC) /* We maintain a linked list of AllocationBase values that we know */ /* correspond to malloc heap sections. Currently this is only called */ /* during a GC. But there is some hope that for long running */ /* programs we will eventually see most heap sections. */
/* In the long run, it would be more reliable to occasionally walk */ /* the malloc heap with HeapWalk on the default heap. But that */ /* apparently works only for NT-based Windows. */
/* In the long run, a better data structure would also be nice ... */ STATICstruct GC_malloc_heap_list { void * allocation_base; struct GC_malloc_heap_list *next;
} *GC_malloc_heap_l = 0;
/* Is p the base of one of the malloc heap sections we already know */ /* about? */ STATIC GC_bool GC_is_malloc_heap_base(void *p)
{ struct GC_malloc_heap_list *q = GC_malloc_heap_l;
while (0 != q) { if (q -> allocation_base == p) returnTRUE;
q = q -> next;
} returnFALSE;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.