/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996 by Silicon Graphics. All rights reserved. * Copyright (c) 2009-2021 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice.
*/ /* An incomplete test for the garbage collector. */ /* Some more obscure entry points are not tested at all. */ /* This must be compiled with the same flags used to build the */ /* GC. It uses GC internals to allow more precise results */ /* checking for some of the tests. */
# ifdef HAVE_CONFIG_H # include "config.h" # endif
#ifndef NTHREADS /* Number of additional threads to fork. */ # define NTHREADS 5 /* Excludes main thread, which also runs a test. */ /* In the single-threaded case, the number of times to rerun it. */ /* Not respected by PCR test. */ #endif
# ifdefined(mips) && defined(SYSTYPE_BSD43) /* MIPS RISCOS 4 */ # else # include <stdlib.h> # endif # include <stdio.h> # ifdefined(_WIN32_WCE) && !defined(__GNUC__) # include <winbase.h> /* # define assert ASSERT */ # else # include <assert.h> /* Not normally used, but handy for debugging. */ # endif
/* Call GC_INIT only on platforms on which we think we really need it, */ /* so that we can test automatic initialization on the rest. */ #ifdefined(TEST_EXPLICIT_GC_INIT) || defined(AIX) || defined(CYGWIN32) \
|| defined(DARWIN) || defined(HOST_ANDROID) \
|| (defined(MSWINCE) && !defined(GC_WINMAIN_REDIRECT)) # define GC_OPT_INIT GC_INIT() #else # define GC_OPT_INIT /* empty */ #endif
#define INIT_FIND_LEAK \ if (!GC_get_find_leak()) {} else \
GC_printf("This test program is not designed for leak detection mode\n")
#define CHECK_OUT_OF_MEMORY(p) \ if ((p) == NULL) { \
GC_printf("Out of memory\n"); \ exit(1); \
}
/* Define AO primitives for a single-threaded mode. */ #ifndef AO_HAVE_compiler_barrier /* AO_t not defined. */ # define AO_t GC_word #endif #ifndef AO_HAVE_load_acquire static AO_t AO_load_acquire(constvolatile AO_t *addr)
{
AO_t result;
FINALIZER_LOCK();
result = *addr;
FINALIZER_UNLOCK(); return result;
} #endif #ifndef AO_HAVE_store_release /* Not a macro as new_val argument should be evaluated before the lock. */ staticvoid AO_store_release(volatile AO_t *addr, AO_t new_val)
{
FINALIZER_LOCK();
*addr = new_val;
FINALIZER_UNLOCK();
} #endif #ifndef AO_HAVE_fetch_and_add1 # define AO_fetch_and_add1(p) ((*(p))++) /* This is used only to update counters. */ #endif
/* AT_END may be defined to exercise the interior pointer test */ /* if the collector is configured with ALL_INTERIOR_POINTERS. */ /* As it stands, this test should succeed with either */ /* configuration. In the FIND_LEAK configuration, it should */ /* find lots of leaks, since we free almost nothing. */
/* The following struct emulates the vtable in gcj. */ /* This assumes the default value of MARK_DESCR_OFFSET. */ struct fake_vtable { void * dummy; /* class pointer in real gcj. */
GC_word descr;
};
/* To check uncollectible allocation we build lists with disguised cdr */ /* pointers, and make sure they don't go away. */
sexpr uncollectable_ints(int low, int up)
{ if (low > up) { return(nil);
} else { return(small_cons_uncollectable(small_cons_leaf(low),
uncollectable_ints(low+1, up)));
}
}
void check_ints(sexpr list, int low, int up)
{ if (is_nil(list)) {
GC_printf("list is nil\n");
FAIL;
} if (SEXPR_TO_INT(car(car(list))) != low) {
GC_printf( "List reversal produced incorrect list - collector is broken\n");
FAIL;
} if (low == up) { if (cdr(list) != nil) {
GC_printf("List too long - collector is broken\n");
FAIL;
}
} else {
check_ints(cdr(list), low+1, up);
}
}
void check_uncollectable_ints(sexpr list, int low, int up)
{ if (SEXPR_TO_INT(car(car(list))) != low) {
GC_printf("Uncollectable list corrupted - collector is broken\n");
FAIL;
} if (low == up) { if (UNCOLLECTABLE_CDR(list) != nil) {
GC_printf("Uncollectable list too long - collector is broken\n");
FAIL;
}
} else {
check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
}
}
/* Not used, but useful for debugging: */ void print_int_list(sexpr x)
{ if (is_nil(x)) {
GC_printf("NIL\n");
} else {
GC_printf("(%d)", SEXPR_TO_INT(car(car(x)))); if (!is_nil(cdr(x))) {
GC_printf(", ");
print_int_list(cdr(x));
} else {
GC_printf("\n");
}
}
}
if (size != GC_size(p)) {
GC_printf("GC_get_kind_and_size returned size not matching GC_size\n");
FAIL;
}
p2 = GC_GENERIC_OR_SPECIAL_MALLOC(10, kind);
CHECK_OUT_OF_MEMORY(p2); if (GC_get_kind_and_size(p2, NULL) != kind) {
GC_printf("GC_generic_or_special_malloc:" " unexpected kind of returned object\n");
FAIL;
}
GC_FREE(p2);
}
/* Try to force a to be strangely aligned */ volatilestruct A_s { char dummy;
AO_t aa;
} A; #define a_set(p) AO_store_release(&A.aa, (AO_t)(p)) #define a_get() (sexpr)AO_load_acquire(&A.aa)
/* * Repeatedly reverse lists built out of very different sized cons cells. * Check that we didn't lose anything.
*/ void *GC_CALLBACK reverse_test_inner(void *data)
{ int i;
sexpr b;
sexpr c;
sexpr d;
sexpr e;
sexpr *f, *g, *h;
if (data == 0) { /* This stack frame is not guaranteed to be scanned. */ return GC_call_with_gc_active(reverse_test_inner, (void*)(word)1);
}
# ifndef BIG # ifdefined(MACOS) \
|| (defined(UNIX_LIKE) && defined(NO_GETCONTEXT)) /* e.g. musl */ /* Assume 128 KB stacks at least. */ # ifdefined(__s390x__) # define BIG 600 # else # define BIG 1000 # endif # elif defined(PCR) /* PCR default stack is 100 KB. Stack frames are up to 120 bytes. */ # define BIG 700 # elif defined(MSWINCE) || defined(RTEMS) /* WinCE only allows 64 KB stacks. */ # define BIG 500 # elif defined(EMSCRIPTEN) || defined(OSF1) /* Wasm reports "Maximum call stack size exceeded" error otherwise. */ /* OSF has limited stack space by default, and large frames. */ # define BIG 200 # elif defined(__MACH__) && defined(__ppc64__) # define BIG 2500 # else # define BIG 4500 # endif # endif
a_set(ints(1, 49));
b = ints(1, 50);
c = ints(1, BIG);
d = uncollectable_ints(1, 100);
test_generic_malloc_or_special(d);
e = uncollectable_ints(1, 1); /* Check that realloc updates object descriptors correctly */
AO_fetch_and_add1(&collectable_count);
f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
f = (sexpr *)GC_REALLOC((void *)f, 6 * sizeof(sexpr));
CHECK_OUT_OF_MEMORY(f);
AO_fetch_and_add1(&realloc_count);
GC_PTR_STORE_AND_DIRTY(f + 5, ints(1, 17));
AO_fetch_and_add1(&collectable_count);
g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
test_generic_malloc_or_special(g);
g = (sexpr *)GC_REALLOC((void *)g, 800 * sizeof(sexpr));
CHECK_OUT_OF_MEMORY(g);
AO_fetch_and_add1(&realloc_count);
GC_PTR_STORE_AND_DIRTY(g + 799, ints(1, 18));
AO_fetch_and_add1(&collectable_count);
h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
h = (sexpr *)GC_REALLOC((void *)h, 2000 * sizeof(sexpr));
CHECK_OUT_OF_MEMORY(h);
AO_fetch_and_add1(&realloc_count); # ifdef GC_GCJ_SUPPORT
GC_PTR_STORE_AND_DIRTY(h + 1999, gcj_ints(1, 200)); for (i = 0; i < 51; ++i) {
GC_PTR_STORE_AND_DIRTY(h + 1999, gcj_reverse(h[1999]));
} /* Leave it as the reversed list for now. */ # else
GC_PTR_STORE_AND_DIRTY(h + 1999, ints(1, 200)); # endif /* Try to force some collections and reuse of small list elements */ for (i = 0; i < 10; i++) {
(void)ints(1, BIG);
} /* Superficially test interior pointer recognition on stack */
c = (sexpr)((char *)c + sizeof(char *));
d = (sexpr)((char *)d + sizeof(char *));
GC_FREE((void *)e);
check_ints(b,1,50); # ifndef EMSCRIPTEN
check_ints(a_get(),1,49); # else /* FIXME: gctest fails unless check_ints(a_get(), ...) are skipped. */ # endif for (i = 0; i < 50; i++) {
check_ints(b,1,50);
b = reverse(reverse(b));
}
check_ints(b,1,50); # ifndef EMSCRIPTEN
check_ints(a_get(),1,49); # endif for (i = 0; i < 10 * (NTHREADS+1); i++) { # if (defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)) \
&& (NTHREADS > 0) if (i % 10 == 0) fork_a_thread(); # endif /* This maintains the invariant that a always points to a list */ /* of 49 integers. Thus, this is thread safe without locks, */ /* assuming acquire/release barriers in a_get/set() and atomic */ /* pointer assignments (otherwise, e.g., check_ints() may see */ /* an uninitialized object returned by GC_MALLOC). */
a_set(reverse(reverse(a_get()))); # if !defined(AT_END) && !defined(THREADS) /* This is not thread safe, since realloc explicitly deallocates */
a_set(GC_REALLOC(a_get(), (i & 1) != 0 ? 500 : 8200));
AO_fetch_and_add1(&realloc_count); # endif
} # ifndef EMSCRIPTEN
check_ints(a_get(),1,49); # endif
check_ints(b,1,50);
/* Restore c and d values. */
c = (sexpr)((char *)c - sizeof(char *));
d = (sexpr)((char *)d - sizeof(char *));
void reverse_test(void)
{ /* Test GC_do_blocking/GC_call_with_gc_active. */
(void)GC_do_blocking(reverse_test_inner, 0);
}
/* * The rest of this builds balanced binary trees, checks that they don't * disappear, and tests finalization.
*/ typedefstruct treenode { int level; struct treenode * lchild; struct treenode * rchild;
} tn;
#ifndef GC_NO_FINALIZATION int finalizable_count = 0; #endif
int finalized_count = 0; int dropped_something = 0;
void set_print_procs(void)
{ /* Set these global variables just once to avoid TSan false positives. */
A.dummy = 17;
GC_is_valid_displacement_print_proc = fail_proc1;
GC_is_visible_print_proc = fail_proc1;
}
staticvoid uniq(void *p, ...) {
va_list a; void *q[100]; int n = 0, i, j;
q[n++] = p;
va_start(a,p); for (;(q[n] = va_arg(a,void *)) != NULL;n++) ;
va_end(a); for (i=0; i<n; i++) for (j=0; j<i; j++) if (q[i] == q[j]) {
GC_printf( "Apparently failed to mark from some function arguments.\n" "Perhaps GC_push_regs was configured incorrectly?\n"
);
FAIL;
}
}
GC_FREE(0); # ifdef THREADS if (!GC_thread_is_registered() && GC_is_init_called()) {
GC_printf("Current thread is not registered with GC\n");
FAIL;
} # endif
test_tinyfl(); # ifndef DBG_HDRS_ALL
AO_fetch_and_add1(&collectable_count); /* 1 */
AO_fetch_and_add1(&collectable_count); /* 2 */
AO_fetch_and_add1(&collectable_count); /* 3 */ if ((GC_size(GC_malloc(7)) != 8 &&
GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
|| GC_size(GC_malloc(15)) != 16) {
GC_printf("GC_size produced unexpected results\n");
FAIL;
}
AO_fetch_and_add1(&collectable_count); if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
GC_printf("GC_malloc(0) failed: GC_size returns %lu\n",
(unsignedlong)GC_size(GC_malloc(0)));
FAIL;
}
AO_fetch_and_add1(&uncollectable_count); if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
GC_printf("GC_malloc_uncollectable(0) failed\n");
FAIL;
}
AO_fetch_and_add1(&collectable_count);
x = (char*)GC_malloc(16); if (GC_base(GC_PTR_ADD(x, 13)) != x) {
GC_printf("GC_base(heap ptr) produced incorrect result\n");
FAIL;
} if (!GC_is_heap_ptr(x)) {
GC_printf("GC_is_heap_ptr(heap_ptr) produced incorrect result\n");
FAIL;
} if (GC_is_heap_ptr(&x)) {
GC_printf("GC_is_heap_ptr(&local_var) produced incorrect result\n");
FAIL;
} if (GC_is_heap_ptr((void *)&fail_count) || GC_is_heap_ptr(NULL)) {
GC_printf("GC_is_heap_ptr(&global_var) produced incorrect result\n");
FAIL;
}
(void)GC_PRE_INCR(x, 0);
(void)GC_POST_INCR(x);
(void)GC_POST_DECR(x); if (GC_base(x) != x) {
GC_printf("Bad INCR/DECR result\n");
FAIL;
} # ifndef PCR if (GC_base(y) != 0) {
GC_printf("GC_base(fn_ptr) produced incorrect result\n");
FAIL;
} # endif if (GC_same_obj(x+5, x) != x + 5) {
GC_printf("GC_same_obj produced incorrect result\n");
FAIL;
} if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
GC_printf("GC_is_visible produced incorrect result\n");
FAIL;
}
z = (char**)GC_malloc(8);
CHECK_OUT_OF_MEMORY(z);
AO_fetch_and_add1(&collectable_count);
GC_PTR_STORE(z, x);
GC_end_stubborn_change(z); if (*z != x) {
GC_printf("GC_PTR_STORE failed: %p != %p\n", (void *)(*z), (void *)x);
FAIL;
} # if !defined(IA64) && !defined(POWERPC) if (!TEST_FAIL_COUNT(1)) { /* On POWERPCs function pointers point to a descriptor in the */ /* data segment, so there should have been no failures. */ /* The same applies to IA64. */
GC_printf("GC_is_visible produced wrong failure indication\n");
FAIL;
} # endif if (GC_is_valid_displacement(y) != y
|| GC_is_valid_displacement(x) != x
|| GC_is_valid_displacement(x + 3) != x + 3) {
GC_printf("GC_is_valid_displacement produced incorrect result\n");
FAIL;
}
{
size_t i;
(void)GC_malloc(17);
AO_fetch_and_add1(&collectable_count); for (i = sizeof(GC_word); i < 512; i *= 2) {
GC_word result = (GC_word) GC_memalign(i, 17); if (result % i != 0 || result == 0 || *(int *)result != 0) FAIL;
}
} # ifndef ALL_INTERIOR_POINTERS # ifdefined(POWERPC) if (!TEST_FAIL_COUNT(1)) # else if (!TEST_FAIL_COUNT(GC_get_all_interior_pointers() ? 1 : 2)) # endif
{
GC_printf( "GC_is_valid_displacement produced wrong failure indication\n");
FAIL;
} # endif # endif /* DBG_HDRS_ALL */ /* Test floating point alignment */
{ double *dp = GC_NEW(double);
CHECK_OUT_OF_MEMORY(dp);
AO_fetch_and_add1(&collectable_count);
*dp = 1.0;
dp = GC_NEW(double);
CHECK_OUT_OF_MEMORY(dp);
AO_fetch_and_add1(&collectable_count);
*dp = 1.0;
} /* Test size 0 allocation a bit more */
{
size_t i; for (i = 0; i < 10000; ++i) {
(void)GC_MALLOC(0);
AO_fetch_and_add1(&collectable_count);
GC_FREE(GC_MALLOC(0));
(void)GC_MALLOC_ATOMIC(0);
AO_fetch_and_add1(&atomic_count);
GC_FREE(GC_MALLOC_ATOMIC(0));
test_generic_malloc_or_special(GC_malloc_atomic(1));
AO_fetch_and_add1(&atomic_count);
GC_FREE(GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(1));
GC_FREE(GC_MALLOC_IGNORE_OFF_PAGE(2));
}
}
thr_hndl_sb.gc_thread_handle = GC_get_my_stackbottom(&thr_hndl_sb.sb); # ifdef GC_GCJ_SUPPORT
GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
GC_init_gcj_malloc(0, (void *)(GC_word)fake_gcj_mark_proc); # endif /* Make sure that fn arguments are visible to the collector. */
uniq(
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
(void *)0); /* GC_malloc(0) must return NULL or something we can deallocate. */
GC_free(GC_malloc(0));
GC_free(GC_malloc_atomic(0));
GC_free(GC_malloc(0));
GC_free(GC_malloc_atomic(0)); # ifndef NO_TEST_HANDLE_FORK
GC_atfork_prepare();
pid = fork(); if (pid != 0) {
GC_atfork_parent(); if (pid == -1) {
GC_printf("Process fork failed\n");
FAIL;
} if (print_stats)
GC_log_printf("Forked child process, pid= %ld\n", (long)pid); if (waitpid(pid, &wstatus, 0) == -1) {
GC_printf("Wait for child process failed\n");
FAIL;
} if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) {
GC_printf("Child process failed, pid= %ld, status= 0x%x\n",
(long)pid, wstatus);
FAIL;
}
} else {
pid_t child_pid = getpid();
GC_atfork_child(); if (print_stats)
GC_log_printf("Started a child process, pid= %ld\n",
(long)child_pid); # ifdef PARALLEL_MARK
GC_gcollect(); /* no parallel markers */ # endif
GC_start_mark_threads();
GC_gcollect(); # ifdef THREADS /* Skip "Premature finalization" check in the */ /* child process because there could be a chance */ /* that some other thread of the parent was */ /* executing mktree at the moment of fork. */
dropped_something = 1; # endif
tree_test(); # if !defined(DBG_HDRS_ALL) && !defined(NO_TYPED_TEST)
typed_test(); # endif # ifdef THREADS if (print_stats)
GC_log_printf("Starting tiny reverse test, pid= %ld\n",
(long)child_pid);
tiny_reverse_test(0);
GC_gcollect(); # endif if (print_stats)
GC_log_printf("Finished a child process, pid= %ld\n",
(long)child_pid); exit(0);
} # endif
(void)GC_call_with_alloc_lock(set_stackbottom, &thr_hndl_sb);
/* Repeated list reversal test. */ # ifndef NO_CLOCK
GET_TIME(start_time); # endif
reverse_test(); # ifndef NO_CLOCK if (print_stats) {
GET_TIME(reverse_time);
time_diff = MS_TIME_DIFF(reverse_time, start_time);
GC_log_printf("Finished reverse_test at time %u (%p)\n",
(unsigned) time_diff, (void *)&start_time);
} # endif # if !defined(DBG_HDRS_ALL) && !defined(NO_TYPED_TEST)
typed_test(); # ifndef NO_CLOCK if (print_stats) {
CLOCK_TYPE typed_time;
GET_TIME(tree_time);
time_diff = MS_TIME_DIFF(tree_time, start_time);
GC_log_printf("Finished tree_test at time %u (%p)\n",
(unsigned) time_diff, (void *)&start_time);
} # endif /* Run reverse_test a second time, so we hopefully notice corruption. */
reverse_test(); # ifndef NO_CLOCK if (print_stats) {
GET_TIME(reverse_time);
time_diff = MS_TIME_DIFF(reverse_time, start_time);
GC_log_printf("Finished second reverse_test at time %u (%p)\n",
(unsigned)time_diff, (void *)&start_time);
} # endif /* GC_allocate_ml and GC_need_to_lock are no longer exported, and */ /* AO_fetch_and_add1() may be unavailable to update a counter. */
(void)GC_call_with_alloc_lock(inc_int_counter, &n_tests); # ifndef NO_CLOCK if (print_stats)
GC_log_printf("Finished %p\n", (void *)&start_time); # endif
}
/* Execute some tests after termination of other test threads (if any). */ void run_single_threaded_test(void) {
GC_disable();
GC_FREE(GC_MALLOC(100));
GC_enable();
}
void GC_CALLBACK reachable_objs_counter(void *obj, size_t size, void *pcounter)
{ if (0 == size) {
GC_printf("Reachable object has zero size\n");
FAIL;
} if (GC_base(obj) != obj) {
GC_printf("Invalid reachable object base passed by enumerator: %p\n",
obj);
FAIL;
} if (GC_size(obj) != size) {
GC_printf("Invalid reachable object size passed by enumerator: %lu\n",
(unsignedlong)size);
FAIL;
}
(*(unsigned *)pcounter)++;
}