/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. * Copyright (c) 2008-2021 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice.
*/
#if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__) \
|| defined(__CYGWIN__)) && !defined(_GNU_SOURCE) /* Can't test LINUX, since this must be defined before other includes. */ # define _GNU_SOURCE 1 #endif
#ifdefined(__MINGW32__) && !defined(__MINGW_EXCPT_DEFINE_PSDK) \
&& defined(__i386__) && defined(GC_EXTERN) /* defined in gc.c */ /* See the description in mark.c. */ # define __MINGW_EXCPT_DEFINE_PSDK 1 #endif
# ifdefined(NO_DEBUGGING) && !defined(GC_ASSERTIONS) && !defined(NDEBUG) /* To turn off assertion checking (in atomic_ops.h). */ # define NDEBUG 1 # endif
#ifndef GC_H # include "../gc.h" #endif
#include <stdlib.h> #if !defined(sony_news) # include <stddef.h> #endif
#ifdef DGUX # include <sys/types.h> # include <sys/time.h> # include <sys/resource.h> #endif/* DGUX */
#ifdef BSD_TIME # include <sys/types.h> # include <sys/time.h> # include <sys/resource.h> #endif/* BSD_TIME */
#ifndef PTR_T_DEFINED typedefchar * ptr_t; /* A generic pointer to which we can add */ /* byte displacements and which can be used */ /* for address comparisons. */ # define PTR_T_DEFINED #endif
#ifndef SIZE_MAX # include <limits.h> #endif #ifdefined(SIZE_MAX) && !defined(CPPCHECK) # define GC_SIZE_MAX ((size_t)SIZE_MAX) /* Extra cast to workaround some buggy SIZE_MAX definitions. */ #else # define GC_SIZE_MAX (~(size_t)0) #endif
#if GC_GNUC_PREREQ(3, 0) && !defined(LINT2) # define EXPECT(expr, outcome) __builtin_expect(expr,outcome) /* Equivalent to (expr), but predict that usually (expr)==outcome. */ #else # define EXPECT(expr, outcome) (expr) #endif/* __GNUC__ */
/* Saturated addition of size_t values. Used to avoid value wrap */ /* around on overflow. The arguments should have no side effects. */ #define SIZET_SAT_ADD(a, b) \
(EXPECT((a) < GC_SIZE_MAX - (b), TRUE) ? (a) + (b) : GC_SIZE_MAX)
#include"gcconfig.h"
#if !defined(GC_ATOMIC_UNCOLLECTABLE) && defined(ATOMIC_UNCOLLECTABLE) /* For compatibility with old-style naming. */ # define GC_ATOMIC_UNCOLLECTABLE #endif
#ifndef GC_INNER /* This tagging macro must be used at the start of every variable */ /* definition which is declared with GC_EXTERN. Should be also used */ /* for the GC-scope function definitions and prototypes. Must not be */ /* used in gcconfig.h. Shouldn't be used for the debugging-only */ /* functions. Currently, not used for the functions declared in or */ /* called from the "dated" source files (located in "extra" folder). */ # ifdefined(GC_DLL) && defined(__GNUC__) && !defined(MSWIN32) \
&& !defined(MSWINCE) && !defined(CYGWIN32) # if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY) /* See the corresponding GC_API definition. */ # define GC_INNER __attribute__((__visibility__("hidden"))) # else /* The attribute is unsupported. */ # define GC_INNER /* empty */ # endif # else # define GC_INNER /* empty */ # endif
# define GC_EXTERN extern GC_INNER /* Used only for the GC-scope variables (prefixed with "GC_") */ /* declared in the header files. Must not be used for thread-local */ /* variables. Must not be used in gcconfig.h. Shouldn't be used for */ /* the debugging-only or profiling-only variables. Currently, not */ /* used for the variables accessed from the "dated" source files */ /* (specific.c/h, and in the "extra" folder). */ /* The corresponding variable definition must start with GC_INNER. */ #endif/* !GC_INNER */
#ifdef __cplusplus /* Register storage specifier is deprecated in C++11. */ # define REGISTER/* empty */ #else /* Used only for several local variables in the performance-critical */ /* functions. Should not be used for new code. */ # define REGISTERregister #endif
#ifdefined(M68K) && defined(__GNUC__) /* By default, __alignof__(word) is 2 on m68k. Use this attribute to */ /* have proper word alignment (i.e. 4-byte on a 32-bit arch). */ # define GC_ATTR_WORD_ALIGNED __attribute__((__aligned__(sizeof(word)))) #else # define GC_ATTR_WORD_ALIGNED /* empty */ #endif
#ifndef GC_API_OSCALL /* This is used to identify GC routines called by name from OS. */ # ifdefined(__GNUC__) # if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY) /* Same as GC_API if GC_DLL. */ # define GC_API_OSCALL extern __attribute__((__visibility__("default"))) # else /* The attribute is unsupported. */ # define GC_API_OSCALL extern # endif # else # define GC_API_OSCALL GC_API # endif #endif
/* #define ALL_INTERIOR_POINTERS */ /* Forces all pointers into the interior of an */ /* object to be considered valid. Also causes the */ /* sizes of all objects to be inflated by at least */ /* one byte. This should suffice to guarantee */ /* that in the presence of a compiler that does */ /* not perform garbage-collector-unsafe */ /* optimizations, all portable, strictly ANSI */ /* conforming C programs should be safely usable */ /* with malloc replaced by GC_malloc and free */ /* calls removed. There are several disadvantages: */ /* 1. There are probably no interesting, portable, */ /* strictly ANSI conforming C programs. */ /* 2. This option makes it hard for the collector */ /* to allocate space that is not "pointed to" */ /* by integers, etc. Under SunOS 4.X with a */ /* statically linked libc, we empirically */ /* observed that it would be difficult to */ /* allocate individual objects > 100 KB. */ /* Even if only smaller objects are allocated, */ /* more swap space is likely to be needed. */ /* Fortunately, much of this will never be */ /* touched. */ /* If you can easily avoid using this option, do. */ /* If not, try to keep individual objects small. */ /* This is now really controlled at startup, */ /* through GC_all_interior_pointers. */
EXTERN_C_BEGIN
#ifndef GC_NO_FINALIZATION # define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
GC_INNER void GC_notify_or_invoke_finalizers(void); /* If GC_finalize_on_demand is not set, invoke */ /* eligible finalizers. Otherwise: */ /* Call *GC_finalizer_notifier if there are */ /* finalizers to be run, and we haven't called */ /* this procedure yet this GC cycle. */
GC_INNER void GC_finalize(void); /* Perform all indicated finalization actions */ /* on unmarked objects. */ /* Unreachable finalizable objects are enqueued */ /* for processing by GC_invoke_finalizers. */ /* Invoked with lock. */
#if !defined(DONT_ADD_BYTE_AT_END) # ifdef LINT2 /* Explicitly instruct the code analysis tool that */ /* GC_all_interior_pointers is assumed to have only 0 or 1 value. */ # define EXTRA_BYTES ((size_t)(GC_all_interior_pointers? 1 : 0)) # else # define EXTRA_BYTES (size_t)GC_all_interior_pointers # endif # define MAX_EXTRA_BYTES 1 #else # define EXTRA_BYTES 0 # define MAX_EXTRA_BYTES 0 #endif
# ifndef LARGE_CONFIG # define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */ /* Must be multiple of largest page size. */ # define MAXHINCR 2048 /* Maximum heap increment, in blocks */ # else # define MINHINCR 64 # define MAXHINCR 4096 # endif
# define BL_LIMIT GC_black_list_spacing /* If we need a block of N bytes, and we have */ /* a block of N + BL_LIMIT bytes available, */ /* and N > BL_LIMIT, */ /* but all possible positions in it are */ /* blacklisted, we just use it anyway (and */ /* print a warning, if warnings are enabled). */ /* This risks subsequently leaking the block */ /* due to a false reference. But not using */ /* the block risks unreasonable immediate */ /* heap growth. */
#ifdef NEED_CALLINFO struct callinfo {
word ci_pc; /* Caller, not callee, pc */ # if NARGS > 0
word ci_arg[NARGS]; /* bit-wise complement to avoid retention */ # endif # if (NFRAMES * (NARGS + 1)) % 2 == 1 /* Likely alignment problem. */
word ci_dummy; # endif
}; #endif
#ifdef SAVE_CALL_CHAIN /* Fill in the pc and argument information for up to NFRAMES of my */ /* callers. Ignore my frame and my callers frame. */
GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]);
GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]); #endif
#elifdefined(NINTENDO_SWITCH) \
|| (((defined(LINUX) && defined(__USE_POSIX199309)) \
|| defined(CYGWIN32)) && defined(_POSIX_TIMERS)) # include <time.h> # define HAVE_CLOCK_GETTIME 1 # define CLOCK_TYPE struct timespec # define CLOCK_TYPE_INITIALIZER { 0, 0 } # ifdefined(_POSIX_MONOTONIC_CLOCK) && !defined(NINTENDO_SWITCH) # define GET_TIME(x) \ do { \ if (clock_gettime(CLOCK_MONOTONIC, &x) == -1) \
ABORT("clock_gettime failed"); \
} while (0) # else # define GET_TIME(x) \ do { \ if (clock_gettime(CLOCK_REALTIME, &x) == -1) \
ABORT("clock_gettime failed"); \
} while (0) # endif # define MS_TIME_DIFF(a, b) \ /* a.tv_nsec - b.tv_nsec is in range -1e9 to 1e9 exclusively */ \
((unsignedlong)((a).tv_nsec + (1000000L*1000 - (b).tv_nsec)) / 1000000UL \
+ ((unsignedlong)((a).tv_sec - (b).tv_sec) * 1000UL) - 1000UL) # define NS_FRAC_TIME_DIFF(a, b) \
((unsignedlong)((a).tv_nsec + (1000000L*1000 - (b).tv_nsec)) % 1000000UL)
#else/* !BSD_TIME && !LINUX && !NN_PLATFORM_CTR && !MSWIN32 */ # include <time.h> # ifdefined(FREEBSD) && !defined(CLOCKS_PER_SEC) # include <machine/limits.h> # define CLOCKS_PER_SEC CLK_TCK # endif # if !defined(CLOCKS_PER_SEC) # define CLOCKS_PER_SEC 1000000 /* This is technically a bug in the implementation. */ /* ANSI requires that CLOCKS_PER_SEC be defined. But at least */ /* under SunOS 4.1.1, it isn't. Also note that the combination of */ /* ANSI C and POSIX is incredibly gross here. The type clock_t */ /* is used by both clock() and times(). But on some machines */ /* these use different notions of a clock tick, CLOCKS_PER_SEC */ /* seems to apply only to clock. Hence we use it here. On many */ /* machines, including SunOS, clock actually uses units of */ /* microseconds (which are not really clock ticks). */ # endif # define CLOCK_TYPE clock_t # define GET_TIME(x) (void)(x = clock()) # define MS_TIME_DIFF(a,b) (CLOCKS_PER_SEC % 1000 == 0 ? \
(unsignedlong)((a) - (b)) / (unsignedlong)(CLOCKS_PER_SEC / 1000) \
: ((unsignedlong)((a) - (b)) * 1000) / (unsignedlong)CLOCKS_PER_SEC) /* Avoid using double type since some targets (like ARM) might */ /* require -lm option for double-to-long conversion. */ # define NS_FRAC_TIME_DIFF(a, b) (CLOCKS_PER_SEC <= 1000 ? 0UL \
: (unsignedlong)(CLOCKS_PER_SEC <= (clock_t)1000000UL \
? (((a) - (b)) * ((clock_t)1000000UL / CLOCKS_PER_SEC) % 1000) * 1000 \
: (CLOCKS_PER_SEC <= (clock_t)1000000UL * 1000 \
? ((a) - (b)) * ((clock_t)1000000UL * 1000 / CLOCKS_PER_SEC) \
: (((a) - (b)) * (clock_t)1000000UL * 1000) / CLOCKS_PER_SEC) \
% (clock_t)1000000UL)) #endif/* !BSD_TIME && !MSWIN32 */ # ifndef CLOCK_TYPE_INITIALIZER /* This is used to initialize CLOCK_TYPE variables (to some value) */ /* to avoid "variable might be uninitialized" compiler warnings. */ # define CLOCK_TYPE_INITIALIZER 0 # endif #endif/* !NO_CLOCK */
/* We use bzero and bcopy internally. They may not be available. */ # ifdefined(SPARC) && defined(SUNOS4) \
|| (defined(M68K) && defined(NEXT)) || defined(VAX) # define BCOPY_EXISTS # elif defined(AMIGA) || defined(DARWIN) # include <string.h> # define BCOPY_EXISTS # elif defined(MACOS) && defined(POWERPC) # include <MacMemory.h> # define bcopy(x,y,n) BlockMoveData(x, y, n) # define bzero(x,n) BlockZero(x, n) # define BCOPY_EXISTS # endif
/* Abandon ship */ # ifdefined(SMALL_CONFIG) || defined(PCR) # define GC_on_abort(msg) (void)0 /* be silent on abort */ # else
GC_API_PRIV GC_abort_func GC_on_abort; # endif # ifdefined(CPPCHECK) # define ABORT(msg) { GC_on_abort(msg); abort(); } # elif defined(PCR) # define ABORT(s) PCR_Base_Panic(s) # else # ifdefined(MSWIN_XBOX1) && !defined(DebugBreak) # define DebugBreak() __debugbreak() # elif defined(MSWINCE) && !defined(DebugBreak) \
&& (!defined(UNDER_CE) || (defined(__MINGW32CE__) && !defined(ARM32))) /* This simplifies linking for WinCE (and, probably, doesn't */ /* hurt debugging much); use -DDebugBreak=DebugBreak to override */ /* this behavior if really needed. This is also a workaround for */ /* x86mingw32ce toolchain (if it is still declaring DebugBreak() */ /* instead of defining it as a macro). */ # define DebugBreak() _exit(-1) /* there is no abort() in WinCE */ # endif # ifdefined(MSWIN32) && (defined(NO_DEBUGGING) || defined(LINT2)) /* A more user-friendly abort after showing fatal message. */ # define ABORT(msg) (GC_on_abort(msg), _exit(-1)) /* Exit on error without running "at-exit" callbacks. */ # elif defined(MSWINCE) && defined(NO_DEBUGGING) # define ABORT(msg) (GC_on_abort(msg), ExitProcess(-1)) # elif defined(MSWIN32) || defined(MSWINCE) # ifdefined(_CrtDbgBreak) && defined(_DEBUG) && defined(_MSC_VER) # define ABORT(msg) { GC_on_abort(msg); \
_CrtDbgBreak() /* __debugbreak() */; } # else # define ABORT(msg) { GC_on_abort(msg); DebugBreak(); } /* Note that: on a WinCE box, this could be silently */ /* ignored (i.e., the program is not aborted); */ /* DebugBreak is a statement in some toolchains. */ # endif # else # define ABORT(msg) (GC_on_abort(msg), abort()) # endif /* !MSWIN32 */ # endif /* !PCR */
/* For abort message with 1-3 arguments. C_msg and C_fmt should be */ /* literals. C_msg should not contain format specifiers. Arguments */ /* should match their format specifiers. */ #define ABORT_ARG1(C_msg, C_fmt, arg1) \
MACRO_BLKSTMT_BEGIN \
GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", arg1); \
ABORT(C_msg); \
MACRO_BLKSTMT_END #define ABORT_ARG2(C_msg, C_fmt, arg1, arg2) \
MACRO_BLKSTMT_BEGIN \
GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", arg1, arg2); \
ABORT(C_msg); \
MACRO_BLKSTMT_END #define ABORT_ARG3(C_msg, C_fmt, arg1, arg2, arg3) \
MACRO_BLKSTMT_BEGIN \
GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", \
arg1, arg2, arg3); \
ABORT(C_msg); \
MACRO_BLKSTMT_END
/* Same as ABORT but does not have 'no-return' attribute. */ /* ABORT on a dummy condition (which is always true). */ #define ABORT_RET(msg) \ if ((signed_word)GC_current_warn_proc == -1) {} else ABORT(msg)
/* Exit abnormally, but without making a mess (e.g. out of memory) */ # ifdef PCR # define EXIT() PCR_Base_Exit(1,PCR_waitForever) # else # define EXIT() (GC_on_abort(NULL), exit(1 /* EXIT_FAILURE */)) # endif
/* Print warning message, e.g. almost out of memory. */ /* The argument (if any) format specifier should be: */ /* "%s", "%p" or "%"WARN_PRIdPTR. */ #define WARN(msg, arg) \
(*GC_current_warn_proc)((/* no const */ char *)("GC Warning: " msg), \
(word)(arg))
GC_EXTERN GC_warn_proc GC_current_warn_proc;
/* Print format type macro for decimal signed_word value passed WARN(). */ /* This could be redefined for Win64 or LLP64, but typically should */ /* not be done as the WARN format string is, possibly, processed on the */ /* client side, so non-standard print type modifiers (like MS "I64d") */ /* should be avoided here if possible. */ #ifndef WARN_PRIdPTR /* Assume sizeof(void *) == sizeof(long) (or a little-endian machine) */ # define WARN_PRIdPTR "ld" #endif
/* A tagging macro (for a code static analyzer) to indicate that the */ /* string obtained from an untrusted source (e.g., argv[], getenv) is */ /* safe to use in a vulnerable operation (e.g., open, exec). */ #define TRUSTED_STRING(s) (char*)COVERT_DATAFLOW(s)
/* Try to work out the right way to access thread state structure */ /* members. The structure has changed its definition in different */ /* Darwin versions. This now defaults to the (older) names */ /* without __, thus hopefully, not breaking any existing */ /* Makefile.direct builds. */ # if __DARWIN_UNIX03 # define THREAD_FLD_NAME(x) __ ## x # else # define THREAD_FLD_NAME(x) x # endif # ifdefined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE) # define THREAD_FLD(x) ts_32.THREAD_FLD_NAME(x) # else # define THREAD_FLD(x) THREAD_FLD_NAME(x) # endif #endif/* DARWIN */
#include <setjmp.h>
#if __STDC_VERSION__ >= 201112L # include <assert.h> /* for static_assert */ #endif
#if CPP_WORDSZ == 32 # define WORDS_TO_BYTES(x) ((x)<<2) # define BYTES_TO_WORDS(x) ((x)>>2) # define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */ # define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */ # if ALIGNMENT != 4 # define UNALIGNED_PTRS # endif #endif
#if CPP_WORDSZ == 64 # define WORDS_TO_BYTES(x) ((x)<<3) # define BYTES_TO_WORDS(x) ((x)>>3) # define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */ # define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */ # if ALIGNMENT != 8 # define UNALIGNED_PTRS # endif #endif
/* The first TINY_FREELISTS free lists correspond to the first */ /* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep */ /* separate free lists for each multiple of GRANULE_BYTES */ /* up to (TINY_FREELISTS-1) * GRANULE_BYTES. After that they */ /* may be spread out further. */
/* Max size objects supported by freelist (larger objects are */ /* allocated directly with allchblk(), by rounding to the next */ /* multiple of HBLKSIZE). */ #define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2) #define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES) #define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES) #define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS) #define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES) #define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES)
# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q) /* Equivalent to subtracting 2 hblk pointers. */ /* We do it this way because a compiler should */ /* find it hard to use an integer division */ /* instead of a shift. The bundled SunOS 4.1 */ /* o.w. sometimes pessimizes the subtraction to */ /* involve a call to .div. */
/* Round up allocation size (in bytes) to a multiple of a granule. */ #define ROUNDUP_GRANULE_SIZE(lb) /* lb should have no side-effect */ \
(SIZET_SAT_ADD(lb, GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1))
/* Round up byte allocation requests to integral number of words, etc. */ # define ROUNDED_UP_GRANULES(lb) /* lb should have no side-effect */ \
BYTES_TO_GRANULES(SIZET_SAT_ADD(lb, GRANULE_BYTES - 1 + EXTRA_BYTES)) # if MAX_EXTRA_BYTES == 0 # define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), TRUE) # else # define SMALL_OBJ(bytes) \
(EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), TRUE) \
|| (bytes) <= MAXOBJBYTES - EXTRA_BYTES) /* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES. */ /* But we try to avoid looking up EXTRA_BYTES. */ # endif # define ADD_SLOP(lb) /* lb should have no side-effect */ \
SIZET_SAT_ADD(lb, EXTRA_BYTES)
/* * Hash table representation of sets of pages. * Implements a map from aligned HBLKSIZE chunks of the address space to one * bit each. * This assumes it is OK to spuriously set bits, e.g. because multiple * addresses are represented by a single location. * Used by black-listing code, and perhaps by dirty bit maintenance code.
*/
#ifndef LOG_PHT_ENTRIES # ifdef LARGE_CONFIG # if CPP_WORDSZ == 32 # define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */ /* which is >= 4 GB. Each table takes */ /* 128 KB, some of which may never be */ /* touched. */ # else # define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks, */ /* which is >= 8 GB. Each table takes */ /* 256 KB, some of which may never be */ /* touched. */ # endif # elif !defined(SMALL_CONFIG) # define LOG_PHT_ENTRIES 18 /* Collisions are likely if heap grows */ /* to more than 256K hblks >= 1 GB. */ /* Each hash table occupies 32 KB. */ /* Even for somewhat smaller heaps, */ /* say half that, collisions may be an */ /* issue because we blacklist */ /* addresses outside the heap. */ # else # define LOG_PHT_ENTRIES 15 /* Collisions are likely if heap grows */ /* to more than 32K hblks (128 MB). */ /* Each hash table occupies 4 KB. */ # endif #endif/* !LOG_PHT_ENTRIES */
#define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES) /* upper bound */ /* We allocate 1 bit per allocation granule. */ /* If MARK_BIT_PER_GRANULE is defined, we use */ /* every nth bit, where n is the number of */ /* allocation granules per object. If */ /* MARK_BIT_PER_OBJ is defined, we only use the */ /* initial group of mark bits, and it is safe */ /* to allocate smaller header for large objects. */
union word_ptr_ao_u {
word w;
signed_word sw; void *vp; # ifdef PARALLEL_MARK volatile AO_t ao; # endif
};
/* We maintain layout maps for heap blocks containing objects of a given */ /* size. Each entry in this map describes a byte offset and has the */ /* following type. */ struct hblkhdr { struct hblk * hb_next; /* Link field for hblk free list */ /* and for lists of chunks waiting to be */ /* reclaimed. */ struct hblk * hb_prev; /* Backwards link for free list. */ struct hblk * hb_block; /* The corresponding block. */ unsignedchar hb_obj_kind; /* Kind of objects in the block. Each kind */ /* identifies a mark procedure and a set of */ /* list headers. Sometimes called regions. */ unsignedchar hb_flags; # define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */ /* point to the first page of */ /* this object. */ # define WAS_UNMAPPED 2 /* This is a free block, which has */ /* been unmapped from the address */ /* space. */ /* GC_remap must be invoked on it */ /* before it can be reallocated. */ /* Only set with USE_MUNMAP. */ # define FREE_BLK 4 /* Block is free, i.e. not in use. */ # ifdef ENABLE_DISCLAIM # define HAS_DISCLAIM 8 /* This kind has a callback on reclaim. */ # define MARK_UNCONDITIONALLY 0x10 /* Mark from all objects, marked or */ /* not. Used to mark objects needed by */ /* reclaim notifier. */ # endif # ifdef MARK_BIT_PER_GRANULE # define LARGE_BLOCK 0x20 # endif unsignedshort hb_last_reclaimed; /* Value of GC_gc_no when block was */ /* last allocated or swept. May wrap. */ /* For a free block, this is maintained */ /* only for USE_MUNMAP, and indicates */ /* when the header was allocated, or */ /* when the size of the block last */ /* changed. */ # ifdef MARK_BIT_PER_OBJ
unsigned32 hb_inv_sz; /* A good upper bound for 2**32/hb_sz. */ /* For large objects, we use */ /* LARGE_INV_SZ. */ # define LARGE_INV_SZ (1 << 16) # endif
word hb_sz; /* If in use, size in bytes, of objects in the block. */ /* if free, the size in bytes of the whole block. */ /* We assume that this is convertible to signed_word */ /* without generating a negative result. We avoid */ /* generating free blocks larger than that. */
word hb_descr; /* object descriptor for marking. See */ /* gc_mark.h. */ # ifdef MARK_BIT_PER_GRANULE unsignedshort * hb_map; /* Essentially a table of remainders */ /* mod BYTES_TO_GRANULES(hb_sz), except */ /* for large blocks. See GC_obj_map. */ # endif # ifdef PARALLEL_MARK volatile AO_t hb_n_marks; /* Number of set mark bits, excluding */ /* the one always set at the end. */ /* Currently it is concurrently */ /* updated and hence only approximate. */ /* But a zero value does guarantee that */ /* the block contains no marked */ /* objects. */ /* Ensuring this property means that we */ /* never decrement it to zero during a */ /* collection, and hence the count may */ /* be one too high. Due to concurrent */ /* updates, an arbitrary number of */ /* increments, but not all of them (!) */ /* may be lost, hence it may in theory */ /* be much too low. */ /* The count may also be too high if */ /* multiple mark threads mark the */ /* same object due to a race. */ # else
size_t hb_n_marks; /* Without parallel marking, the count */ /* is accurate. */ # endif # ifdef USE_MARK_BYTES # define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1) /* Unlike the other case, this is in units of bytes. */ /* Since we force double-word alignment, we need at most one */ /* mark bit per 2 words. But we do allocate and set one */ /* extra mark bit to avoid an explicit check for the */ /* partial object at the end of each block. */ union { char _hb_marks[MARK_BITS_SZ]; /* The i'th byte is 1 if the object */ /* starting at granule i or object i is */ /* marked, 0 o.w. */ /* The mark bit for the "one past the */ /* end" object is always set to avoid a */ /* special case test in the marker. */
word dummy; /* Force word alignment of mark bytes. */
} _mark_byte_union; # define hb_marks _mark_byte_union._hb_marks # else # define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1)
word hb_marks[MARK_BITS_SZ]; # endif /* !USE_MARK_BYTES */
};
# define ANY_INDEX 23 /* "Random" mark bit index for assertions */
/* The number of objects in a block dedicated to a certain size. */ /* may erroneously yield zero (instead of one) for large objects. */ # define HBLK_OBJS(sz_in_bytes) (HBLKSIZE/(sz_in_bytes))
# define OBJ_SZ_TO_BLOCKS(lb) divHBLKSZ((lb) + HBLKSIZE-1) # define OBJ_SZ_TO_BLOCKS_CHECKED(lb) /* lb should have no side-effect */ \
divHBLKSZ(SIZET_SAT_ADD(lb, HBLKSIZE - 1)) /* Size of block (in units of HBLKSIZE) needed to hold objects of */ /* given lb (in bytes). The checked variant prevents wrap around. */
/* Object free list link */ # define obj_link(p) (*(void **)(p))
/* Root sets. Logically private to mark_rts.c. But we don't want the */ /* tables scanned, so we put them here. */ /* MAX_ROOT_SETS is the maximum number of ranges that can be */ /* registered as static roots. */ # ifdef LARGE_CONFIG # define MAX_ROOT_SETS 8192 # elif !defined(SMALL_CONFIG) # define MAX_ROOT_SETS 2048 # else # define MAX_ROOT_SETS 512 # endif
# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4) /* Maximum number of segments that can be excluded from root sets. */
/* * Data structure for excluded static roots.
*/ struct exclusion {
ptr_t e_start;
ptr_t e_end;
};
/* Data structure for list of root sets. */ /* We keep a hash table, so that we can filter out duplicate additions. */ /* Under Win32, we need to do a better job of filtering overlaps, so */ /* we resort to sequential search, and pay the price. */ struct roots {
ptr_t r_start;/* multiple of word size */
ptr_t r_end; /* multiple of word size and greater than r_start */ # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) struct roots * r_next; # endif
GC_bool r_tmp; /* Delete before registering new dynamic libraries */
};
#if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) /* Size of hash table index to roots. */ # define LOG_RT_SIZE 6 # define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */ #endif
typedefstruct GC_ms_entry {
ptr_t mse_start; /* First word of object, word aligned. */ union word_ptr_ao_u mse_descr; /* Descriptor; low order two bits are tags, */ /* as described in gc_mark.h. */
} mse;
typedefint mark_state_t; /* Current state of marking. */ /* Used to remember where we are during */ /* concurrent marking. */
struct fnlz_roots_s { struct finalizable_object **fo_head; /* List of objects that should be finalized now: */ struct finalizable_object *finalize_now;
};
union toggle_ref_u { /* The lowest bit is used to distinguish between choices. */ void *strong_ref;
GC_hidden_pointer weak_ref;
};
/* Extended descriptors. GC_typed_mark_proc understands these. */ /* These are used for simple objects that are larger than what */ /* can be described by a BITMAP_BITS sized bitmap. */ typedefstruct {
word ed_bitmap; /* lsb corresponds to first word. */
GC_bool ed_continued; /* next entry is continuation. */
} typed_ext_descr_t;
/* Lists of all heap blocks and free lists */ /* as well as other random data structures */ /* that should not be scanned by the */ /* collector. */ /* These are grouped together in a struct */ /* so that they can be easily skipped by the */ /* GC_mark routine. */ /* The ordering is weird to make GC_malloc */ /* faster by keeping the important fields */ /* sufficiently close together that a */ /* single load of a base register will do. */ /* Scalars that could easily appear to */ /* be pointers are also put here. */ /* The main fields should precede any */ /* conditionally included fields, so that */ /* gc_inline.h will work even if a different */ /* set of macros is defined when the client is */ /* compiled. */
struct _GC_arrays {
word _heapsize; /* Heap size in bytes (value never goes down). */
word _requested_heapsize; /* Heap size due to explicit expansion. */
ptr_t _last_heap_addr;
word _large_free_bytes; /* Total bytes contained in blocks on large object free */ /* list. */
word _large_allocd_bytes; /* Total number of bytes in allocated large objects blocks. */ /* For the purposes of this counter and the next one only, a */ /* large object is one that occupies a block of at least */ /* 2*HBLKSIZE. */
word _max_large_allocd_bytes; /* Maximum number of bytes that were ever allocated in */ /* large object blocks. This is used to help decide when it */ /* is safe to split up a large block. */
word _bytes_allocd_before_gc; /* Number of bytes allocated before this */ /* collection cycle. */ # define GC_our_mem_bytes GC_arrays._our_mem_bytes
word _our_mem_bytes; # ifndef SEPARATE_GLOBALS # define GC_bytes_allocd GC_arrays._bytes_allocd
word _bytes_allocd; /* Number of bytes allocated during this collection cycle. */ # endif
word _bytes_dropped; /* Number of black-listed bytes dropped during GC cycle */ /* as a result of repeated scanning during allocation */ /* attempts. These are treated largely as allocated, */ /* even though they are not useful to the client. */
word _bytes_finalized; /* Approximate number of bytes in objects (and headers) */ /* that became ready for finalization in the last */ /* collection. */
word _bytes_freed; /* Number of explicitly deallocated bytes of memory */ /* since last collection. */
word _finalizer_bytes_freed; /* Bytes of memory explicitly deallocated while */ /* finalizers were running. Used to approximate memory */ /* explicitly deallocated by finalizers. */
bottom_index *_all_bottom_indices; /* Pointer to the first (lowest address) bottom_index; */ /* assumes the lock is held. */
bottom_index *_all_bottom_indices_end; /* Pointer to the last (highest address) bottom_index; */ /* assumes the lock is held. */
ptr_t _scratch_free_ptr;
hdr *_hdr_free_list;
ptr_t _scratch_end_ptr; /* GC_scratch_end_ptr is end point of the current scratch area. */ # ifdefined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX)) # define USE_SCRATCH_LAST_END_PTR # define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
ptr_t _scratch_last_end_ptr; /* GC_scratch_last_end_ptr is the end point of the last */ /* obtained scratch area. */ /* Used by GC_register_dynamic_libraries(). */ # endif
mse *_mark_stack; /* Limits of stack for GC_mark routine. All ranges */ /* between GC_mark_stack (incl.) and GC_mark_stack_top */ /* (incl.) still need to be marked from. */
mse *_mark_stack_limit; # ifdef PARALLEL_MARK
mse *volatile _mark_stack_top; /* Updated only with mark lock held, but read asynchronously. */ /* TODO: Use union to avoid casts to AO_t */ # else
mse *_mark_stack_top; # endif
word _composite_in_use; /* Number of bytes in the accessible */ /* composite objects. */
word _atomic_in_use; /* Number of bytes in the accessible */ /* atomic objects. */ # ifdef USE_MUNMAP # define GC_unmapped_bytes GC_arrays._unmapped_bytes
word _unmapped_bytes; # ifdef COUNT_UNMAPPED_REGIONS # define GC_num_unmapped_regions GC_arrays._num_unmapped_regions
signed_word _num_unmapped_regions; # endif # else # define GC_unmapped_bytes 0 # endif
bottom_index * _all_nils; # define GC_scan_ptr GC_arrays._scan_ptr struct hblk * _scan_ptr; # ifdef PARALLEL_MARK # define GC_main_local_mark_stack GC_arrays._main_local_mark_stack
mse *_main_local_mark_stack; # define GC_first_nonempty GC_arrays._first_nonempty volatile AO_t _first_nonempty; /* Lowest entry on mark stack that may be */ /* nonempty. Updated only by initiating thread. */ # endif # define GC_mark_stack_size GC_arrays._mark_stack_size
size_t _mark_stack_size; # define GC_mark_state GC_arrays._mark_state
mark_state_t _mark_state; /* Initialized to MS_NONE (0). */ # define GC_mark_stack_too_small GC_arrays._mark_stack_too_small
GC_bool _mark_stack_too_small; /* We need a larger mark stack. May be set by */ /* client supplied mark routines. */ # define GC_objects_are_marked GC_arrays._objects_are_marked
GC_bool _objects_are_marked; /* Are there collectible marked objects in the heap? */ # ifdef ENABLE_TRACE # define GC_trace_addr GC_arrays._trace_addr
ptr_t _trace_addr; # endif # define GC_capacity_heap_sects GC_arrays._capacity_heap_sects
size_t _capacity_heap_sects; # define GC_n_heap_sects GC_arrays._n_heap_sects
word _n_heap_sects; /* Number of separately added heap sections. */ # ifdefined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32) # define GC_n_heap_bases GC_arrays._n_heap_bases
word _n_heap_bases; /* See GC_heap_bases. */ # endif # ifdef USE_PROC_FOR_LIBRARIES # define GC_n_memory GC_arrays._n_memory
word _n_memory; /* Number of GET_MEM allocated memory sections. */ # endif # ifdef GC_GCJ_SUPPORT # define GC_gcjobjfreelist GC_arrays._gcjobjfreelist
ptr_t *_gcjobjfreelist; # endif # define GC_fo_entries GC_arrays._fo_entries
word _fo_entries; # ifndef GC_NO_FINALIZATION # define GC_dl_hashtbl GC_arrays._dl_hashtbl # define GC_fnlz_roots GC_arrays._fnlz_roots # define GC_log_fo_table_size GC_arrays._log_fo_table_size # ifndef GC_LONG_REFS_NOT_NEEDED # define GC_ll_hashtbl GC_arrays._ll_hashtbl struct dl_hashtbl_s _ll_hashtbl; # endif struct dl_hashtbl_s _dl_hashtbl; struct fnlz_roots_s _fnlz_roots; unsigned _log_fo_table_size; # ifndef GC_TOGGLE_REFS_NOT_NEEDED # define GC_toggleref_arr GC_arrays._toggleref_arr # define GC_toggleref_array_size GC_arrays._toggleref_array_size # define GC_toggleref_array_capacity GC_arrays._toggleref_array_capacity union toggle_ref_u *_toggleref_arr;
size_t _toggleref_array_size;
size_t _toggleref_array_capacity; # endif # endif # ifdef TRACE_BUF # define GC_trace_buf_ptr GC_arrays._trace_buf_ptr int _trace_buf_ptr; # endif # ifdef ENABLE_DISCLAIM # define GC_finalized_kind GC_arrays._finalized_kind int _finalized_kind; # endif # define n_root_sets GC_arrays._n_root_sets # define GC_excl_table_entries GC_arrays._excl_table_entries int _n_root_sets; /* GC_static_roots[0..n_root_sets) contains the */ /* valid root sets. */
size_t _excl_table_entries; /* Number of entries in use. */ # ifdef THREADS # define GC_roots_were_cleared GC_arrays._roots_were_cleared
GC_bool _roots_were_cleared; # endif # define GC_explicit_typing_initialized GC_arrays._explicit_typing_initialized # define GC_ed_size GC_arrays._ed_size # define GC_avail_descr GC_arrays._avail_descr # define GC_ext_descriptors GC_arrays._ext_descriptors # ifdef AO_HAVE_load_acquire volatile AO_t _explicit_typing_initialized; # else
GC_bool _explicit_typing_initialized; # endif
size_t _ed_size; /* Current size of above arrays. */
size_t _avail_descr; /* Next available slot. */
typed_ext_descr_t *_ext_descriptors; /* Points to array of extended */ /* descriptors. */
GC_mark_proc _mark_procs[MAX_MARK_PROCS]; /* Table of user-defined mark procedures. There is */ /* a small number of these, which can be referenced */ /* by DS_PROC mark descriptors. See gc_mark.h. */ char _modws_valid_offsets[sizeof(word)]; /* GC_valid_offsets[i] ==> */ /* GC_modws_valid_offsets[i%sizeof(word)] */ # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) # define GC_root_index GC_arrays._root_index struct roots * _root_index[RT_SIZE]; # endif # ifdef SAVE_CALL_CHAIN # define GC_last_stack GC_arrays._last_stack struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection. Useful for */ /* debugging mysterious object disappearances. In the */ /* multi-threaded case, we currently only save the */ /* calling stack. */ # endif # ifndef SEPARATE_GLOBALS # define GC_objfreelist GC_arrays._objfreelist void *_objfreelist[MAXOBJGRANULES+1]; /* free list for objects */ # define GC_aobjfreelist GC_arrays._aobjfreelist void *_aobjfreelist[MAXOBJGRANULES+1]; /* free list for atomic objects */ # endif void *_uobjfreelist[MAXOBJGRANULES+1]; /* Uncollectible but traced objects. */ /* Objects on this and _auobjfreelist */ /* are always marked, except during */ /* garbage collections. */ # ifdef GC_ATOMIC_UNCOLLECTABLE # define GC_auobjfreelist GC_arrays._auobjfreelist void *_auobjfreelist[MAXOBJGRANULES+1]; /* Atomic uncollectible but traced objects. */ # endif
size_t _size_map[MAXOBJBYTES+1]; /* Number of granules to allocate when asked for a certain */ /* number of bytes. Should be accessed with the allocation */ /* lock held. */ # ifdef MARK_BIT_PER_GRANULE # define GC_obj_map GC_arrays._obj_map unsignedshort * _obj_map[MAXOBJGRANULES + 1]; /* If not NULL, then a pointer to a map of valid */ /* object addresses. */ /* _obj_map[sz_in_granules][i] is */ /* i % sz_in_granules. */ /* This is now used purely to replace a */ /* division in the marker by a table lookup. */
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.9 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.