/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. * Copyright (c) 2008-2021 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice.
*/
#ifdef GC_SOLARIS_THREADS # include <sys/syscall.h> #endif
#ifdefined(UNIX_LIKE) || defined(CYGWIN32) || defined(SYMBIAN) \
|| (defined(CONSOLE_LOG) && defined(MSWIN32)) # include <fcntl.h> # include <sys/types.h> # include <sys/stat.h> #endif
#ifdefined(CONSOLE_LOG) && defined(MSWIN32) && !defined(__GNUC__) # include <io.h> #endif
#ifdef NONSTOP # include <floss.h> #endif
#ifdef THREADS # ifdef PCR # include "il/PCR_IL.h"
GC_INNER PCR_Th_ML GC_allocate_ml; # elif defined(SN_TARGET_PSP2)
GC_INNER WapiMutex GC_allocate_ml_PSP2 = { 0, NULL }; # elif defined(GC_DEFN_ALLOCATE_ML) || defined(SN_TARGET_PS3) # include <pthread.h>
GC_INNER pthread_mutex_t GC_allocate_ml; # endif /* For other platforms with threads, the lock and possibly */ /* GC_lock_holder variables are defined in the thread support code. */ #endif/* THREADS */
#ifdef DYNAMIC_LOADING /* We need to register the main data segment. Returns TRUE unless */ /* this is done implicitly as part of dynamic library registration. */ # define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data() #elifdefined(GC_DONT_REGISTER_MAIN_STATIC_DATA) # define GC_REGISTER_MAIN_STATIC_DATA() FALSE #else /* Don't unnecessarily call GC_register_main_static_data() in case */ /* dyn_load.c isn't linked in. */ # define GC_REGISTER_MAIN_STATIC_DATA() TRUE #endif
/* All accesses to it should be synchronized to avoid data races. */
GC_oom_func GC_oom_fn = GC_default_oom_fn;
#ifdef CAN_HANDLE_FORK # ifdef HANDLE_FORK
GC_INNER int GC_handle_fork = 1; /* The value is examined by GC_thr_init. */ # else
GC_INNER int GC_handle_fork = FALSE; # endif
/* Overrides the default automatic handle-fork mode. Has effect only */ /* if called before GC_INIT. */
GC_API void GC_CALL GC_set_handle_fork(int value GC_ATTR_UNUSED)
{ # ifdef CAN_HANDLE_FORK if (!GC_is_initialized)
GC_handle_fork = value >= -1 ? value : 1; /* Map all negative values except for -1 to a positive one. */ # elif defined(THREADS) || (defined(DARWIN) && defined(MPROTECT_VDB)) if (!GC_is_initialized && value) { # ifndef SMALL_CONFIG
GC_init(); /* to initialize GC_manual_vdb and GC_stderr */ # ifndef THREADS if (GC_manual_vdb) return; # endif # endif
ABORT("fork() handling unsupported");
} # else /* No at-fork handler is needed in the single-threaded mode. */ # endif
}
/* Set things up so that GC_size_map[i] >= granules(i), */ /* but not too much bigger */ /* and so that size_map contains relatively few distinct entries */ /* This was originally stolen from Russ Atkinson's Cedar */ /* quantization algorithm (but we precompute it). */ STATICvoid GC_init_size_map(void)
{
size_t i;
/* Map size 0 to something bigger. */ /* This avoids problems at lower levels. */
GC_size_map[0] = 1; for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) {
GC_size_map[i] = ROUNDED_UP_GRANULES(i); # ifndef _MSC_VER
GC_ASSERT(GC_size_map[i] < TINY_FREELISTS); /* Seems to tickle bug in VC++ 2008 for AMD64 */ # endif
} /* We leave the rest of the array to be filled in on demand. */
}
/* * The following is a gross hack to deal with a problem that can occur * on machines that are sloppy about stack frame sizes, notably SPARC. * Bogus pointers may be written to the stack and not cleared for * a LONG time, because they always fall into holes in stack frames * that are not written. We partially address this by clearing * sections of the stack whenever we get control.
*/
#ifndef SMALL_CLEAR_SIZE # define SMALL_CLEAR_SIZE 256 /* Clear this much every time. */ #endif
# ifdef THREADS # define BIG_CLEAR_SIZE 2048 /* Clear this much now and then. */ # else STATIC word GC_stack_last_cleared = 0; /* GC_no when we last did this */ STATIC ptr_t GC_min_sp = NULL; /* Coolest stack pointer value from which */ /* we've already cleared the stack. */ STATIC ptr_t GC_high_water = NULL; /* "hottest" stack pointer value we have seen */ /* recently. Degrades over time. */ STATIC word GC_bytes_allocd_at_reset = 0; # define DEGRADE_RATE 50 # endif
# ifdefined(ASM_CLEAR_CODE) void *GC_clear_stack_inner(void *, ptr_t); # else /* Clear the stack up to about limit. Return arg. This function */ /* is not static because it could also be erroneously defined in .S */ /* file, so this error would be caught by the linker. */ void *GC_clear_stack_inner(void *arg, # ifdefined(__APPLE_CC__) && !GC_CLANG_PREREQ(6, 0) volatile/* to workaround some bug */ # endif
ptr_t limit)
{ # define CLEAR_SIZE 213 /* granularity */ volatile word dummy[CLEAR_SIZE];
BZERO((/* no volatile */ void *)dummy, sizeof(dummy)); if ((word)GC_approx_sp() COOLER_THAN (word)limit) {
(void)GC_clear_stack_inner(arg, limit);
} /* Make sure the recursive call is not a tail call, and the bzero */ /* call is not recognized as dead code. */ # ifdefined(CPPCHECK)
GC_noop1(dummy[0]); # else
GC_noop1(COVERT_DATAFLOW(dummy)); # endif return(arg);
} # endif /* !ASM_CLEAR_CODE */
# ifdef THREADS /* Used to occasionally clear a bigger chunk. */ /* TODO: Should be more random than it is ... */
GC_ATTR_NO_SANITIZE_THREAD staticunsigned next_random_no(void)
{ staticunsigned random_no = 0; return ++random_no % 13;
} # endif /* THREADS */
/* Clear some of the inaccessible part of the stack. Returns its */ /* argument, so it can be used in a tail call position, hence clearing */ /* another frame. */
GC_API void * GC_CALL GC_clear_stack(void *arg)
{
ptr_t sp = GC_approx_sp(); /* Hotter than actual sp */ # ifdef THREADS
word volatile dummy[SMALL_CLEAR_SIZE]; # endif
# define SLOP 400 /* Extra bytes we clear every time. This clears our own */ /* activation record, and should cause more frequent */ /* clearing near the cold end of the stack, a good thing. */ # define GC_SLOP 4000 /* We make GC_high_water this much hotter than we really saw */ /* it, to cover for GC noise etc. above our current frame. */ # define CLEAR_THRESHOLD 100000 /* We restart the clearing process after this many bytes of */ /* allocation. Otherwise very heavily recursive programs */ /* with sparse stacks may result in heaps that grow almost */ /* without bounds. As the heap gets larger, collection */ /* frequency decreases, thus clearing frequency would decrease, */ /* thus more junk remains accessible, thus the heap gets */ /* larger ... */ # ifdef THREADS if (next_random_no() == 0) {
ptr_t limit = sp;
MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));
limit = (ptr_t)((word)limit & ~0xf); /* Make it sufficiently aligned for assembly */ /* implementations of GC_clear_stack_inner. */ return GC_clear_stack_inner(arg, limit);
}
BZERO((void *)dummy, SMALL_CLEAR_SIZE*sizeof(word)); # else if (GC_gc_no > GC_stack_last_cleared) { /* Start things over, so we clear the entire stack again */ if (GC_stack_last_cleared == 0)
GC_high_water = (ptr_t)GC_stackbottom;
GC_min_sp = GC_high_water;
GC_stack_last_cleared = GC_gc_no;
GC_bytes_allocd_at_reset = GC_bytes_allocd;
} /* Adjust GC_high_water */
MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP); if ((word)sp HOTTER_THAN (word)GC_high_water) {
GC_high_water = sp;
}
MAKE_HOTTER(GC_high_water, GC_SLOP);
{
ptr_t limit = GC_min_sp;
MAKE_HOTTER(limit, SLOP); if ((word)sp COOLER_THAN (word)limit) {
limit = (ptr_t)((word)limit & ~0xf); /* Make it sufficiently aligned for assembly */ /* implementations of GC_clear_stack_inner. */
GC_min_sp = sp; return GC_clear_stack_inner(arg, limit);
}
} if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) { /* Restart clearing process, but limit how much clearing we do. */
GC_min_sp = sp;
MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4); if ((word)GC_min_sp HOTTER_THAN (word)GC_high_water)
GC_min_sp = GC_high_water;
GC_bytes_allocd_at_reset = GC_bytes_allocd;
} # endif return arg;
}
/* Return a pointer to the base address of p, given a pointer to a */ /* an address within an object. Return 0 o.w. */
GC_API void * GC_CALL GC_base(void * p)
{
ptr_t r; struct hblk *h;
bottom_index *bi;
hdr *candidate_hdr;
r = (ptr_t)p; if (!EXPECT(GC_is_initialized, TRUE)) return 0;
h = HBLKPTR(r);
GET_BI(r, bi);
candidate_hdr = HDR_FROM_BI(bi, r); if (candidate_hdr == 0) return(0); /* If it's a pointer to the middle of a large object, move it */ /* to the beginning. */ while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
h = FORWARDED_ADDR(h,candidate_hdr);
r = (ptr_t)h;
candidate_hdr = HDR(h);
} if (HBLK_IS_FREE(candidate_hdr)) return(0); /* Make sure r points to the beginning of the object */
r = (ptr_t)((word)r & ~(WORDS_TO_BYTES(1) - 1));
{
size_t offset = HBLKDISPL(r);
word sz = candidate_hdr -> hb_sz;
size_t obj_displ = offset % sz;
ptr_t limit;
r -= obj_displ;
limit = r + sz; if ((word)limit > (word)(h + 1) && sz <= HBLKSIZE) { return(0);
} if ((word)p >= (word)limit) return(0);
} return((void *)r);
}
/* Return TRUE if and only if p points to somewhere in GC heap. */
GC_API int GC_CALL GC_is_heap_ptr(constvoid *p)
{
bottom_index *bi;
/* Return the size of an object, given a pointer to its base. */ /* (For small objects this also happens to work from interior pointers, */ /* but that shouldn't be relied upon.) */
GC_API size_t GC_CALL GC_size(constvoid * p)
{
hdr * hhdr = HDR(p);
return (size_t)hhdr->hb_sz;
}
/* These getters remain unsynchronized for compatibility (since some */ /* clients could call some of them from a GC callback holding the */ /* allocator lock). */
GC_API size_t GC_CALL GC_get_heap_size(void)
{ /* ignore the memory space returned to OS (i.e. count only the */ /* space owned by the garbage collector) */ return (size_t)(GC_heapsize - GC_unmapped_bytes);
}
GC_API size_t GC_CALL GC_get_free_bytes(void)
{ /* ignore the memory space returned to OS */ return (size_t)(GC_large_free_bytes - GC_unmapped_bytes);
}
/* Return the heap usage information. This is a thread-safe (atomic) */ /* alternative for the five above getters. NULL pointer is allowed for */ /* any argument. Returned (filled in) values are of word type. */
GC_API void GC_CALL GC_get_heap_usage_safe(GC_word *pheap_size,
GC_word *pfree_bytes, GC_word *punmapped_bytes,
GC_word *pbytes_since_gc, GC_word *ptotal_bytes)
{
DCL_LOCK_STATE;
LOCK(); if (pheap_size != NULL)
*pheap_size = GC_heapsize - GC_unmapped_bytes; if (pfree_bytes != NULL)
*pfree_bytes = GC_large_free_bytes - GC_unmapped_bytes; if (punmapped_bytes != NULL)
*punmapped_bytes = GC_unmapped_bytes; if (pbytes_since_gc != NULL)
*pbytes_since_gc = GC_bytes_allocd; if (ptotal_bytes != NULL)
*ptotal_bytes = GC_bytes_allocd + GC_bytes_allocd_before_gc;
UNLOCK();
}
GC_INNER word GC_reclaimed_bytes_before_gc = 0;
/* Fill in GC statistics provided the destination is of enough size. */ staticvoid fill_prof_stats(struct GC_prof_stats_s *pstats)
{
pstats->heapsize_full = GC_heapsize;
pstats->free_bytes_full = GC_large_free_bytes;
pstats->unmapped_bytes = GC_unmapped_bytes;
pstats->bytes_allocd_since_gc = GC_bytes_allocd;
pstats->allocd_bytes_before_gc = GC_bytes_allocd_before_gc;
pstats->non_gc_bytes = GC_non_gc_bytes;
pstats->gc_no = GC_gc_no; /* could be -1 */ # ifdef PARALLEL_MARK
pstats->markers_m1 = (word)((signed_word)GC_markers_m1); # else
pstats->markers_m1 = 0; /* one marker */ # endif
pstats->bytes_reclaimed_since_gc = GC_bytes_found > 0 ?
(word)GC_bytes_found : 0;
pstats->reclaimed_bytes_before_gc = GC_reclaimed_bytes_before_gc;
pstats->expl_freed_bytes_since_gc = GC_bytes_freed; /* since gc-7.7 */
pstats->obtained_from_os_bytes = GC_our_mem_bytes; /* since gc-8.2 */
}
#ifdef GC_READ_ENV_FILE /* This works for Win32/WinCE for now. Really useful only for WinCE. */ STATICchar *GC_envfile_content = NULL; /* The content of the GC "env" file with CR and */ /* LF replaced to '\0'. NULL if the file is */ /* missing or empty. Otherwise, always ends */ /* with '\0'. */ STATICunsigned GC_envfile_length = 0; /* Length of GC_envfile_content (if non-NULL). */
len = (unsigned)GetModuleFileName(NULL /* hModule */, path,
_MAX_PATH + 1); /* If GetModuleFileName() has failed then len is 0. */ if (len > 4 && path[len - 4] == (TCHAR)'.') {
len -= 4; /* strip executable file extension */
}
BCOPY(TEXT(GC_ENV_FILE_EXT), &path[len], sizeof(TEXT(GC_ENV_FILE_EXT)));
hFile = CreateFile(path, GENERIC_READ,
FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL /* lpSecurityAttributes */, OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL, NULL /* hTemplateFile */); if (hFile == INVALID_HANDLE_VALUE) return; /* the file is absent or the operation is failed */
len = (unsigned)GetFileSize(hFile, NULL); if (len <= 1 || len >= GC_ENVFILE_MAXLEN) {
CloseHandle(hFile); return; /* invalid file length - ignoring the file content */
} /* At this execution point, GC_setpagesize() and GC_init_win32() */ /* must already be called (for GET_MEM() to work correctly). */
GC_ASSERT(GC_page_size != 0);
bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP((size_t)len + 1);
content = (char *)GET_MEM(bytes_to_get); if (content == NULL) {
CloseHandle(hFile); return; /* allocation failure */
}
GC_add_to_our_memory(content, bytes_to_get);
ofs = 0;
nBytesRead = (DWORD)-1L; /* Last ReadFile() call should clear nBytesRead on success. */ while (ReadFile(hFile, content + ofs, len - ofs + 1, &nBytesRead,
NULL /* lpOverlapped */) && nBytesRead != 0) { if ((ofs += nBytesRead) > len) break;
}
CloseHandle(hFile); if (ofs != len || nBytesRead != 0) { /* TODO: recycle content */ return; /* read operation is failed - ignoring the file content */
}
content[ofs] = '\0'; while (ofs-- > 0) { if (content[ofs] == '\r' || content[ofs] == '\n')
content[ofs] = '\0';
}
GC_ASSERT(NULL == GC_envfile_content);
GC_envfile_length = len + 1;
GC_envfile_content = content; # endif
}
/* This routine scans GC_envfile_content for the specified */ /* environment variable (and returns its value if found). */
GC_INNER char * GC_envfile_getenv(constchar *name)
{ char *p; char *end_of_content;
size_t namelen;
# ifndef NO_GETENV
p = getenv(name); /* try the standard getenv() first */ if (p != NULL) return *p != '\0' ? p : NULL; # endif
p = GC_envfile_content; if (p == NULL) return NULL; /* "env" file is absent (or empty) */
namelen = strlen(name); if (namelen == 0) /* a sanity check */ return NULL; for (end_of_content = p + GC_envfile_length;
p != end_of_content; p += strlen(p) + 1) { if (strncmp(p, name, namelen) == 0 && *(p += namelen) == '=') {
p++; /* the match is found; skip '=' */ return *p != '\0' ? p : NULL;
} /* If not matching then skip to the next line. */
} return NULL; /* no match found */
} #endif/* GC_READ_ENV_FILE */
GC_INNER GC_bool GC_is_initialized = FALSE;
GC_API int GC_CALL GC_is_init_called(void)
{ return GC_is_initialized;
}
#ifndef DONT_USE_ATEXIT # if !defined(PCR) && !defined(SMALL_CONFIG) /* A dedicated variable to avoid a garbage collection on abort. */ /* GC_find_leak cannot be used for this purpose as otherwise */ /* TSan finds a data race (between GC_default_on_abort and, e.g., */ /* GC_finish_collection). */ static GC_bool skip_gc_atexit = FALSE; # else # define skip_gc_atexit FALSE # endif
STATICvoid GC_exit_check(void)
{ if (GC_find_leak && !skip_gc_atexit) { # ifdef THREADS
GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
GC_gcollect();
GC_in_thread_creation = FALSE; # else
GC_gcollect(); # endif
}
} #endif
#ifdefined(UNIX_LIKE) && !defined(NO_DEBUGGING) staticvoid looping_handler(int sig)
{
GC_err_printf("Caught signal %d: looping in handler\n", sig); for (;;) { /* empty */
}
}
static GC_bool installed_looping_handler = FALSE;
staticvoid maybe_install_looping_handler(void)
{ /* Install looping handler before the write fault handler, so we */ /* handle write faults correctly. */ if (!installed_looping_handler && 0 != GETENV("GC_LOOP_ON_ABORT")) {
GC_set_and_save_fault_handler(looping_handler);
installed_looping_handler = TRUE;
}
}
DISABLE_CANCEL(cancel_state); /* Note that although we are nominally called with the */ /* allocation lock held, the allocation lock is now */ /* only really acquired once a second thread is forked.*/ /* And the initialization code needs to run before */ /* then. Thus we really don't hold any locks, and can */ /* in fact safely initialize them here. */ # ifdef THREADS # ifndef GC_ALWAYS_MULTITHREADED
GC_ASSERT(!GC_need_to_lock); # endif # ifdef SN_TARGET_PS3
{
pthread_mutexattr_t mattr;
if (0 != pthread_mutexattr_init(&mattr)) {
ABORT("pthread_mutexattr_init failed");
} if (0 != pthread_mutex_init(&GC_allocate_ml, &mattr)) {
ABORT("pthread_mutex_init failed");
}
(void)pthread_mutexattr_destroy(&mattr);
} # endif # endif /* THREADS */ # ifdefined(GC_WIN32_THREADS) && !defined(GC_PTHREADS) # ifndef SPIN_COUNT # define SPIN_COUNT 4000 # endif # ifdef MSWINRT_FLAVOR
InitializeCriticalSectionAndSpinCount(&GC_allocate_ml, SPIN_COUNT); # else
{ # ifndef MSWINCE
FARPROC pfn = 0;
HMODULE hK32 = GetModuleHandle(TEXT("kernel32.dll")); if (hK32)
pfn = GetProcAddress(hK32, "InitializeCriticalSectionAndSpinCount"); if (pfn) {
(*(BOOL (WINAPI *)(LPCRITICAL_SECTION, DWORD))(word)pfn)(
&GC_allocate_ml, SPIN_COUNT);
} else # endif /* !MSWINCE */ /* else */ InitializeCriticalSection(&GC_allocate_ml);
} # endif # endif /* GC_WIN32_THREADS && !GC_PTHREADS */ # ifdefined(GC_WIN32_THREADS) \
&& ((defined(MSWIN32) && !defined(CONSOLE_LOG)) || defined(MSWINCE))
InitializeCriticalSection(&GC_write_cs); # endif
GC_setpagesize(); # ifdef MSWIN32
GC_init_win32(); # endif # ifdef GC_READ_ENV_FILE
GC_envfile_init(); # endif # if !defined(NO_CLOCK) || !defined(SMALL_CONFIG) # ifdef GC_PRINT_VERBOSE_STATS /* This is useful for debugging and profiling on platforms with */ /* missing getenv() (like WinCE). */
GC_print_stats = VERBOSE; # else if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) {
GC_print_stats = VERBOSE;
} elseif (0 != GETENV("GC_PRINT_STATS")) {
GC_print_stats = 1;
} # endif # endif # if ((defined(UNIX_LIKE) && !defined(GC_ANDROID_LOG)) \
|| (defined(CONSOLE_LOG) && defined(MSWIN32)) \
|| defined(CYGWIN32) || defined(SYMBIAN)) && !defined(SMALL_CONFIG)
{ char * file_name = TRUSTED_STRING(GETENV("GC_LOG_FILE")); # ifdef GC_LOG_TO_FILE_ALWAYS if (NULL == file_name)
file_name = GC_LOG_STD_NAME; # else if (0 != file_name) # endif
{ # ifdefined(_MSC_VER) int log_d = _open(file_name, O_CREAT | O_WRONLY | O_APPEND); # else int log_d = open(file_name, O_CREAT | O_WRONLY | O_APPEND, 0644); # endif if (log_d < 0) {
GC_err_printf("Failed to open %s as log file\n", file_name);
} else { char *str;
GC_log = log_d;
str = GETENV("GC_ONLY_LOG_TO_FILE"); # ifdef GC_ONLY_LOG_TO_FILE /* The similar environment variable set to "0" */ /* overrides the effect of the macro defined. */ if (str != NULL && *str == '0' && *(str + 1) == '\0') # else /* Otherwise setting the environment variable */ /* to anything other than "0" will prevent from */ /* redirecting stdout/err to the log file. */ if (str == NULL || (*str == '0' && *(str + 1) == '\0')) # endif
{
GC_stdout = log_d;
GC_stderr = log_d;
}
}
}
} # endif # if !defined(NO_DEBUGGING) && !defined(GC_DUMP_REGULARLY) if (0 != GETENV("GC_DUMP_REGULARLY")) {
GC_dump_regularly = TRUE;
} # endif # ifdef KEEP_BACK_PTRS
{ char * backtraces_string = GETENV("GC_BACKTRACES"); if (0 != backtraces_string) {
GC_backtraces = atol(backtraces_string); if (backtraces_string[0] == '\0') GC_backtraces = 1;
}
} # endif if (0 != GETENV("GC_FIND_LEAK")) {
GC_find_leak = 1;
} # ifndef SHORT_DBG_HDRS if (0 != GETENV("GC_FINDLEAK_DELAY_FREE")) {
GC_findleak_delay_free = TRUE;
} # endif if (0 != GETENV("GC_ALL_INTERIOR_POINTERS")) {
GC_all_interior_pointers = 1;
} if (0 != GETENV("GC_DONT_GC")) { # ifdef LINT2
GC_disable(); # else
GC_dont_gc = 1; # endif
} if (0 != GETENV("GC_PRINT_BACK_HEIGHT")) {
GC_print_back_height = TRUE;
} if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) {
GC_large_alloc_warn_interval = LONG_MAX;
}
{ char * addr_string = GETENV("GC_TRACE"); if (0 != addr_string) { # ifndef ENABLE_TRACE
WARN("Tracing not enabled: Ignoring GC_TRACE value\n", 0); # else
word addr = (word)STRTOULL(addr_string, NULL, 16); if (addr < 0x1000)
WARN("Unlikely trace address: %p\n", (void *)addr);
GC_trace_addr = (ptr_t)addr; # endif
}
} # ifdef GC_COLLECT_AT_MALLOC
{ char * string = GETENV("GC_COLLECT_AT_MALLOC"); if (0 != string) {
size_t min_lb = (size_t)STRTOULL(string, NULL, 10); if (min_lb > 0)
GC_dbg_collect_at_malloc_min_lb = min_lb;
}
} # endif # if !defined(GC_DISABLE_INCREMENTAL) && !defined(NO_CLOCK)
{ char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET"); if (0 != time_limit_string) { long time_limit = atol(time_limit_string); if (time_limit > 0) {
GC_time_limit = time_limit;
}
}
} # endif # ifndef SMALL_CONFIG
{ char * full_freq_string = GETENV("GC_FULL_FREQUENCY"); if (full_freq_string != NULL) { int full_freq = atoi(full_freq_string); if (full_freq > 0)
GC_full_freq = full_freq;
}
} # endif
{ char * interval_string = GETENV("GC_LARGE_ALLOC_WARN_INTERVAL"); if (0 != interval_string) { long interval = atol(interval_string); if (interval <= 0) {
WARN("GC_LARGE_ALLOC_WARN_INTERVAL environment variable has " "bad value: Ignoring\n", 0);
} else {
GC_large_alloc_warn_interval = interval;
}
}
}
{ char * space_divisor_string = GETENV("GC_FREE_SPACE_DIVISOR"); if (space_divisor_string != NULL) { int space_divisor = atoi(space_divisor_string); if (space_divisor > 0)
GC_free_space_divisor = (unsigned)space_divisor;
}
} # ifdef USE_MUNMAP
{ char * string = GETENV("GC_UNMAP_THRESHOLD"); if (string != NULL) { if (*string == '0' && *(string + 1) == '\0') { /* "0" is used to disable unmapping. */
GC_unmap_threshold = 0;
} else { int unmap_threshold = atoi(string); if (unmap_threshold > 0)
GC_unmap_threshold = unmap_threshold;
}
}
}
{ char * string = GETENV("GC_FORCE_UNMAP_ON_GCOLLECT"); if (string != NULL) { if (*string == '0' && *(string + 1) == '\0') { /* "0" is used to turn off the mode. */
GC_force_unmap_on_gcollect = FALSE;
} else {
GC_force_unmap_on_gcollect = TRUE;
}
}
}
{ char * string = GETENV("GC_USE_ENTIRE_HEAP"); if (string != NULL) { if (*string == '0' && *(string + 1) == '\0') { /* "0" is used to turn off the mode. */
GC_use_entire_heap = FALSE;
} else {
GC_use_entire_heap = TRUE;
}
}
} # endif # if !defined(NO_DEBUGGING) && !defined(NO_CLOCK)
GET_TIME(GC_init_time); # endif
maybe_install_looping_handler(); # if ALIGNMENT > GC_DS_TAGS /* Adjust normal object descriptor for extra allocation. */ if (EXTRA_BYTES != 0)
GC_obj_kinds[NORMAL].ok_descriptor = (word)(-ALIGNMENT) | GC_DS_LENGTH; # endif
GC_exclude_static_roots_inner(beginGC_arrays, endGC_arrays);
GC_exclude_static_roots_inner(beginGC_obj_kinds, endGC_obj_kinds); # ifdef SEPARATE_GLOBALS
GC_exclude_static_roots_inner(beginGC_objfreelist, endGC_objfreelist);
GC_exclude_static_roots_inner(beginGC_aobjfreelist, endGC_aobjfreelist); # endif # ifdefined(USE_PROC_FOR_LIBRARIES) && defined(GC_LINUX_THREADS)
WARN("USE_PROC_FOR_LIBRARIES + GC_LINUX_THREADS performs poorly.\n", 0); /* If thread stacks are cached, they tend to be scanned in */ /* entirety as part of the root set. This will grow them to */ /* maximum size, and is generally not desirable. */ # endif # if !defined(THREADS) || defined(GC_PTHREADS) \
|| defined(NN_PLATFORM_CTR) || defined(NINTENDO_SWITCH) \
|| defined(GC_WIN32_THREADS) || defined(GC_SOLARIS_THREADS) if (GC_stackbottom == 0) {
GC_stackbottom = GC_get_main_stack_base(); # if (defined(LINUX) || defined(HPUX)) && defined(IA64)
GC_register_stackbottom = GC_get_register_stack_base(); # endif
} else { # if (defined(LINUX) || defined(HPUX)) && defined(IA64) if (GC_register_stackbottom == 0) {
WARN("GC_register_stackbottom should be set with GC_stackbottom\n", 0); /* The following may fail, since we may rely on */ /* alignment properties that may not hold with a user set */ /* GC_stackbottom. */
GC_register_stackbottom = GC_get_register_stack_base();
} # endif
} # endif # if !defined(CPPCHECK)
GC_STATIC_ASSERT(sizeof(ptr_t) == sizeof(word));
GC_STATIC_ASSERT(sizeof(signed_word) == sizeof(word)); # if !defined(_AUX_SOURCE) || defined(__GNUC__)
GC_STATIC_ASSERT((word)(-1) > (word)0); /* word should be unsigned */ # endif /* We no longer check for ((void*)(-1) > NULL) since all pointers */ /* are explicitly cast to word in every less/greater comparison. */
GC_STATIC_ASSERT((signed_word)(-1) < (signed_word)0); # endif
GC_STATIC_ASSERT(sizeof (struct hblk) == HBLKSIZE); # ifndef THREADS
GC_ASSERT(!((word)GC_stackbottom HOTTER_THAN (word)GC_approx_sp())); # endif
GC_init_headers(); # ifdefined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED) \
&& (defined(SEARCH_FOR_DATA_START) || defined(NETBSD))
LOCK(); /* just to set GC_lock_holder */ # endif # ifdef SEARCH_FOR_DATA_START /* For MPROTECT_VDB, the temporary fault handler should be */ /* installed first, before the write fault one in GC_dirty_init. */ if (GC_REGISTER_MAIN_STATIC_DATA()) GC_init_linux_data_start(); # elif defined(NETBSD) && defined(__ELF__) if (GC_REGISTER_MAIN_STATIC_DATA()) GC_init_netbsd_elf(); # endif # ifdefined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED) \
&& (defined(SEARCH_FOR_DATA_START) || defined(NETBSD))
UNLOCK(); # endif # ifndef GC_DISABLE_INCREMENTAL if (GC_incremental || 0 != GETENV("GC_ENABLE_INCREMENTAL")) { # ifdefined(BASE_ATOMIC_OPS_EMULATED) || defined(CHECKSUMS) \
|| defined(REDIRECT_MALLOC) || defined(REDIRECT_MALLOC_IN_HEADER) \
|| defined(SMALL_CONFIG) /* TODO: Implement CHECKSUMS for manual VDB. */ # else if (manual_vdb_allowed) {
GC_manual_vdb = TRUE;
GC_incremental = TRUE;
} else # endif /* else */ { /* For GWW_VDB on Win32, this needs to happen before any */ /* heap memory is allocated. */
GC_incremental = GC_dirty_init();
GC_ASSERT(GC_bytes_allocd == 0);
}
} # endif
/* Add initial guess of root sets. Do this first, since sbrk(0) */ /* might be used. */ if (GC_REGISTER_MAIN_STATIC_DATA()) GC_register_data_segments();
GC_bl_init();
GC_mark_init();
{ char * sz_str = GETENV("GC_INITIAL_HEAP_SIZE"); if (sz_str != NULL) {
initial_heap_sz = GC_parse_mem_size_arg(sz_str); if (initial_heap_sz <= MINHINCR * HBLKSIZE) {
WARN("Bad initial heap size %s - ignoring it.\n", sz_str);
}
}
}
{ char * sz_str = GETENV("GC_MAXIMUM_HEAP_SIZE"); if (sz_str != NULL) {
word max_heap_sz = GC_parse_mem_size_arg(sz_str); if (max_heap_sz < initial_heap_sz) {
WARN("Bad maximum heap size %s - ignoring it.\n", sz_str);
} if (0 == GC_max_retries) GC_max_retries = 2;
GC_set_max_heap_size(max_heap_sz);
}
} # ifdefined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
LOCK(); /* just to set GC_lock_holder */ # endif if (!GC_expand_hp_inner(divHBLKSZ(initial_heap_sz))) {
GC_err_printf("Can't start up: not enough memory\n"); EXIT();
} else {
GC_requested_heapsize += initial_heap_sz;
} if (GC_all_interior_pointers)
GC_initialize_offsets();
GC_register_displacement_inner(0L); # ifdefined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC) if (!GC_all_interior_pointers) { /* TLS ABI uses pointer-sized offsets for dtv. */
GC_register_displacement_inner(sizeof(void *));
} # endif
GC_init_size_map(); # ifdef PCR if (PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever)
!= PCR_ERes_okay) {
ABORT("Can't lock load state");
} elseif (PCR_IL_Unlock() != PCR_ERes_okay) {
ABORT("Can't unlock load state");
}
PCR_IL_Unlock();
GC_pcr_install(); # endif
GC_is_initialized = TRUE; # ifdefined(GC_PTHREADS) || defined(GC_WIN32_THREADS) # ifdefined(LINT2) \
&& !(defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED))
LOCK();
GC_thr_init();
UNLOCK(); # else
GC_thr_init(); # endif # endif
COND_DUMP; /* Get black list set up and/or incremental GC started */ if (!GC_dont_precollect || GC_incremental) {
GC_gcollect_inner();
} # ifdefined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
UNLOCK(); # endif # ifdefined(THREADS) && defined(UNIX_LIKE) && !defined(NO_GETCONTEXT) /* Ensure getcontext_works is set to avoid potential data race. */ if (GC_dont_gc || GC_dont_precollect)
GC_with_callee_saves_pushed(callee_saves_pushed_dummy_fn, NULL); # endif # ifndef DONT_USE_ATEXIT if (GC_find_leak) { /* This is to give us at least one chance to detect leaks. */ /* This may report some very benign leaks, but ... */
atexit(GC_exit_check);
} # endif
/* The rest of this again assumes we don't really hold */ /* the allocation lock. */ # ifdefined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) \
|| (defined(GC_ALWAYS_MULTITHREADED) && defined(GC_WIN32_THREADS) \
&& !defined(GC_NO_THREADS_DISCOVERY)) /* Make sure thread local allocation is initialized, in */ /* case we did not get called from GC_init_parallel(). */
GC_init_parallel(); # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
# ifdefined(DYNAMIC_LOADING) && defined(DARWIN) /* This must be called WITHOUT the allocation lock held */ /* and before any threads are created. */
GC_init_dyld(); # endif
RESTORE_CANCEL(cancel_state);
}
GC_API void GC_CALL GC_enable_incremental(void)
{ # if !defined(GC_DISABLE_INCREMENTAL) && !defined(KEEP_BACK_PTRS)
DCL_LOCK_STATE; /* If we are keeping back pointers, the GC itself dirties all */ /* pages on which objects have been marked, making */ /* incremental GC pointless. */ if (!GC_find_leak && 0 == GETENV("GC_DISABLE_INCREMENTAL")) {
LOCK(); if (!GC_incremental) {
GC_setpagesize(); /* if (GC_no_win32_dlls) goto out; Should be win32S test? */
maybe_install_looping_handler(); /* Before write fault handler! */ if (!GC_is_initialized) {
UNLOCK();
GC_incremental = TRUE; /* indicate intention to turn it on */
GC_init();
LOCK();
} else { # if !defined(BASE_ATOMIC_OPS_EMULATED) && !defined(CHECKSUMS) \
&& !defined(REDIRECT_MALLOC) \
&& !defined(REDIRECT_MALLOC_IN_HEADER) && !defined(SMALL_CONFIG) if (manual_vdb_allowed) {
GC_manual_vdb = TRUE;
GC_incremental = TRUE;
} else # endif /* else */ {
GC_incremental = GC_dirty_init();
}
} if (GC_incremental && !GC_dont_gc) { /* Can't easily do it if GC_dont_gc. */
IF_CANCEL(int cancel_state;)
DISABLE_CANCEL(cancel_state); if (GC_bytes_allocd > 0) { /* There may be unmarked reachable objects. */
GC_gcollect_inner();
} /* else we're OK in assuming everything's */ /* clean since nothing can point to an */ /* unmarked object. */
GC_read_dirty(FALSE);
RESTORE_CANCEL(cancel_state);
}
}
UNLOCK(); return;
} # endif
GC_init();
}
# ifdef THREADS # ifdefined(PARALLEL_MARK) && !defined(GC_ALWAYS_MULTITHREADED) # define IF_NEED_TO_LOCK(x) if (GC_parallel || GC_need_to_lock) x # else # define IF_NEED_TO_LOCK(x) if (GC_need_to_lock) x # endif # else # define IF_NEED_TO_LOCK(x) # endif /* !THREADS */
# ifdef MSWINRT_FLAVOR # include <windows.storage.h>
/* This API is defined in roapi.h, but we cannot include it here */ /* since it does not compile in C. */
DECLSPEC_IMPORT HRESULT WINAPI RoGetActivationFactory(
HSTRING activatableClassId,
REFIID iid, void** factory);
#ifdefined(DJGPP) || defined(__STRICT_ANSI__) /* vsnprintf is missing in DJGPP (v2.0.3) */ # define GC_VSNPRINTF(buf, bufsz, format, args) vsprintf(buf, format, args) #elifdefined(_MSC_VER) # ifdef MSWINCE /* _vsnprintf is deprecated in WinCE */ # define GC_VSNPRINTF StringCchVPrintfA # else # define GC_VSNPRINTF _vsnprintf # endif #else # define GC_VSNPRINTF vsnprintf #endif
/* A version of printf that is unlikely to call malloc, and is thus safer */ /* to call from the collector in case malloc has been bound to GC_malloc. */ /* Floating point arguments and formats should be avoided, since FP */ /* conversion is more likely to allocate memory. */ /* Assumes that no more than BUFSZ-1 characters are written at once. */ #define GC_PRINTF_FILLBUF(buf, format) \ do { \
va_list args; \
va_start(args, format); \
(buf)[sizeof(buf) - 1] = 0x15; /* guard */ \
(void)GC_VSNPRINTF(buf, sizeof(buf) - 1, format, args); \
va_end(args); \ if ((buf)[sizeof(buf) - 1] != 0x15) \
ABORT("GC_printf clobbered stack"); \
} while (0)
/* This is recommended for production code (release). */
GC_API void GC_CALLBACK GC_ignore_warn_proc(char *msg, GC_word arg)
{ if (GC_print_stats) { /* Don't ignore warnings if stats printing is on. */
GC_default_warn_proc(msg, arg);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.