/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (C) 2007 Free Software Foundation, Inc * Copyright (c) 2008-2020 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice.
*/
#include"private/gc_pmark.h"
#ifndef GC_NO_FINALIZATION # include "javaxfc.h"/* to get GC_finalize_all() as extern "C" */
/* Type of mark procedure used for marking from finalizable object. */ /* This procedure normally does not mark the object, only its */ /* descendants. */ typedefvoid (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
/* Threshold of log_size to initiate full collection before growing */ /* a hash table. */ #ifndef GC_ON_GROW_LOG_SIZE_MIN # define GC_ON_GROW_LOG_SIZE_MIN CPP_LOG_HBLKSIZE #endif
/* Double the size of a hash table. *log_size_ptr is the log of its */ /* current size. May be a no-op. */ /* *table is a pointer to an array of hash headers. If we succeed, we */ /* update both *table and *log_size_ptr. Lock is held. */ STATICvoid GC_grow_table(struct hash_chain_entry ***table, unsigned *log_size_ptr, word *entries_ptr)
{
word i; struct hash_chain_entry *p; unsigned log_old_size = *log_size_ptr; unsigned log_new_size = log_old_size + 1;
word old_size = *table == NULL ? 0 : (word)1 << log_old_size;
word new_size = (word)1 << log_new_size; /* FIXME: Power of 2 size often gets rounded up to one more page. */ struct hash_chain_entry **new_table;
GC_ASSERT(I_HOLD_LOCK()); /* Avoid growing the table in case of at least 25% of entries can */ /* be deleted by enforcing a collection. Ignored for small tables. */ /* In incremental mode we skip this optimization, as we want to */ /* avoid triggering a full GC whenever possible. */ if (log_old_size >= GC_ON_GROW_LOG_SIZE_MIN && !GC_incremental) {
IF_CANCEL(int cancel_state;)
new_table = (struct hash_chain_entry **)
GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
(size_t)new_size * sizeof(struct hash_chain_entry *),
NORMAL); if (new_table == 0) { if (*table == 0) {
ABORT("Insufficient space for initial table allocation");
} else { return;
}
} for (i = 0; i < old_size; i++) {
p = (*table)[i]; while (p != 0) {
ptr_t real_key = (ptr_t)GC_REVEAL_POINTER(p->hidden_key); struct hash_chain_entry *next = p -> next;
size_t new_hash = HASH3(real_key, new_size, log_new_size);
p -> next = new_table[new_hash];
GC_dirty(p);
new_table[new_hash] = p;
p = next;
}
}
*log_size_ptr = log_new_size;
*table = new_table;
GC_dirty(new_table); /* entire object */
}
GC_API int GC_CALL GC_register_disappearing_link(void * * link)
{
ptr_t base;
base = (ptr_t)GC_base(link); if (base == 0)
ABORT("Bad arg to GC_register_disappearing_link"); return(GC_general_register_disappearing_link(link, base));
}
staticvoid push_and_mark_object(void *p)
{
GC_normal_finalize_mark_proc((ptr_t)p); while (!GC_mark_stack_empty()) {
MARK_FROM_MARK_STACK();
}
GC_set_mark_bit(p); if (GC_mark_state != MS_NONE) { while (!GC_mark_some(0)) { /* Empty. */
}
}
}
STATICvoid GC_mark_togglerefs(void)
{
size_t i; if (NULL == GC_toggleref_arr) return;
GC_set_mark_bit(GC_toggleref_arr); for (i = 0; i < GC_toggleref_array_size; ++i) { void *obj = GC_toggleref_arr[i].strong_ref; if (obj != NULL && ((word)obj & 1) == 0) {
push_and_mark_object(obj);
}
}
}
STATICvoid GC_clear_togglerefs(void)
{
size_t i; for (i = 0; i < GC_toggleref_array_size; ++i) { if ((GC_toggleref_arr[i].weak_ref & 1) != 0) { if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
GC_toggleref_arr[i].weak_ref = 0;
} else { /* No need to copy, BDWGC is a non-moving collector. */
}
}
}
}
/* Possible finalization_marker procedures. Note that mark stack */ /* overflow is handled by the caller, and is not a disaster. */ #ifdefined(_MSC_VER) && defined(I386)
GC_ATTR_NOINLINE /* Otherwise some optimizer bug is tickled in VC for X86 (v19, at least). */ #endif STATICvoid GC_normal_finalize_mark_proc(ptr_t p)
{
GC_mark_stack_top = GC_push_obj(p, HDR(p), GC_mark_stack_top,
GC_mark_stack + GC_mark_stack_size);
}
/* This only pays very partial attention to the mark descriptor. */ /* It does the right thing for normal and atomic objects, and treats */ /* most others as normal. */ STATICvoid GC_ignore_self_finalize_mark_proc(ptr_t p)
{
hdr * hhdr = HDR(p);
word descr = hhdr -> hb_descr;
ptr_t q;
ptr_t scan_limit;
ptr_t target_limit = p + hhdr -> hb_sz - 1;
if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
scan_limit = p + descr - sizeof(word);
} else {
scan_limit = target_limit + 1 - sizeof(word);
} for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
word r = *(word *)q;
if (r < (word)p || r > (word)target_limit) {
GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
}
}
}
STATICvoid GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED) {}
/* Possible finalization_marker procedures. Note that mark stack */ /* overflow is handled by the caller, and is not a disaster. */
/* GC_unreachable_finalize_mark_proc is an alias for normal marking, */ /* but it is explicitly tested for, and triggers different */ /* behavior. Objects registered in this way are not finalized */ /* if they are reachable by other finalizable objects, even if those */ /* other objects specify no ordering. */ STATICvoid GC_unreachable_finalize_mark_proc(ptr_t p)
{
GC_normal_finalize_mark_proc(p);
}
/* Register a finalization function. See gc.h for details. */ /* The last parameter is a procedure that determines */ /* marking for finalization ordering. Any objects marked */ /* by that procedure will be guaranteed to not have been */ /* finalized when this finalizer is invoked. */ STATICvoid GC_register_finalizer_inner(void * obj,
GC_finalization_proc fn, void *cd,
GC_finalization_proc *ofn, void **ocd,
finalization_mark_proc mp)
{ struct finalizable_object * curr_fo;
size_t index; struct finalizable_object *new_fo = 0;
hdr *hhdr = NULL; /* initialized to prevent warning. */
DCL_LOCK_STATE;
if (EXPECT(GC_find_leak, FALSE)) { /* No-op. *ocd and *ofn remain unchanged. */ return;
}
LOCK(); if (EXPECT(NULL == GC_fnlz_roots.fo_head, FALSE)
|| EXPECT(GC_fo_entries > ((word)1 << GC_log_fo_table_size), FALSE)) {
GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
&GC_log_fo_table_size, &GC_fo_entries);
GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
1U << GC_log_fo_table_size);
} /* in the THREADS case we hold allocation lock. */ for (;;) { struct finalizable_object *prev_fo = NULL;
GC_oom_func oom_fn;
index = HASH2(obj, GC_log_fo_table_size);
curr_fo = GC_fnlz_roots.fo_head[index]; while (curr_fo != 0) {
GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object)); if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) { /* Interruption by a signal in the middle of this */ /* should be safe. The client may see only *ocd */ /* updated, but we'll declare that to be his problem. */ if (ocd) *ocd = (void *) (curr_fo -> fo_client_data); if (ofn) *ofn = curr_fo -> fo_fn; /* Delete the structure for obj. */ if (prev_fo == 0) {
GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
} else {
fo_set_next(prev_fo, fo_next(curr_fo));
GC_dirty(prev_fo);
} if (fn == 0) {
GC_fo_entries--; /* May not happen if we get a signal. But a high */ /* estimate will only make the table larger than */ /* necessary. */ # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
GC_free((void *)curr_fo); # endif
} else {
curr_fo -> fo_fn = fn;
curr_fo -> fo_client_data = (ptr_t)cd;
curr_fo -> fo_mark_proc = mp;
GC_dirty(curr_fo); /* Reinsert it. We deleted it first to maintain */ /* consistency in the event of a signal. */ if (prev_fo == 0) {
GC_fnlz_roots.fo_head[index] = curr_fo;
} else {
fo_set_next(prev_fo, curr_fo);
GC_dirty(prev_fo);
}
} if (NULL == prev_fo)
GC_dirty(GC_fnlz_roots.fo_head + index);
UNLOCK(); # ifndef DBG_HDRS_ALL /* Free unused new_fo returned by GC_oom_fn() */
GC_free((void *)new_fo); # endif return;
}
prev_fo = curr_fo;
curr_fo = fo_next(curr_fo);
} if (EXPECT(new_fo != 0, FALSE)) { /* new_fo is returned by GC_oom_fn(). */
GC_ASSERT(fn != 0); # ifdef LINT2 if (NULL == hhdr) ABORT("Bad hhdr in GC_register_finalizer_inner"); # endif break;
} if (fn == 0) { if (ocd) *ocd = 0; if (ofn) *ofn = 0;
UNLOCK(); return;
}
GET_HDR(obj, hhdr); if (EXPECT(0 == hhdr, FALSE)) { /* We won't collect it, hence finalizer wouldn't be run. */ if (ocd) *ocd = 0; if (ofn) *ofn = 0;
UNLOCK(); return;
}
new_fo = (struct finalizable_object *)
GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL); if (EXPECT(new_fo != 0, TRUE)) break;
oom_fn = GC_oom_fn;
UNLOCK();
new_fo = (struct finalizable_object *)
(*oom_fn)(sizeof(struct finalizable_object)); if (0 == new_fo) { /* No enough memory. *ocd and *ofn remain unchanged. */ return;
} /* It's not likely we'll make it here, but ... */
LOCK(); /* Recalculate index since the table may grow and */ /* check again that our finalizer is not in the table. */
}
GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object)); if (ocd) *ocd = 0; if (ofn) *ofn = 0;
new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj);
new_fo -> fo_fn = fn;
new_fo -> fo_client_data = (ptr_t)cd;
new_fo -> fo_object_size = hhdr -> hb_sz;
new_fo -> fo_mark_proc = mp;
fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
GC_dirty(new_fo);
GC_fo_entries++;
GC_fnlz_roots.fo_head[index] = new_fo;
GC_dirty(GC_fnlz_roots.fo_head + index);
UNLOCK();
}
#ifndef SMALL_CONFIG STATIC word GC_old_dl_entries = 0; /* for stats printing */ # ifndef GC_LONG_REFS_NOT_NEEDED STATIC word GC_old_ll_entries = 0; # endif #endif/* !SMALL_CONFIG */
#ifndef THREADS /* Global variables to minimize the level of recursion when a client */ /* finalizer allocates memory. */ STATICint GC_finalizer_nested = 0; /* Only the lowest byte is used, the rest is */ /* padding for proper global data alignment */ /* required for some compilers (like Watcom). */ STATICunsigned GC_finalizer_skipped = 0;
/* Checks and updates the level of finalizers recursion. */ /* Returns NULL if GC_invoke_finalizers() should not be called by the */ /* collector (to minimize the risk of a deep finalizers recursion), */ /* otherwise returns a pointer to GC_finalizer_nested. */ STATICunsignedchar *GC_check_finalizer_nested(void)
{ unsigned nesting_level = *(unsignedchar *)&GC_finalizer_nested; if (nesting_level) { /* We are inside another GC_invoke_finalizers(). */ /* Skip some implicitly-called GC_invoke_finalizers() */ /* depending on the nesting (recursion) level. */ if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
GC_finalizer_skipped = 0;
}
*(char *)&GC_finalizer_nested = (char)(nesting_level + 1); return (unsignedchar *)&GC_finalizer_nested;
} #endif/* THREADS */
/* Called with held lock (but the world is running). */ /* Cause disappearing links to disappear and unreachable objects to be */ /* enqueued for finalization. */
GC_INNER void GC_finalize(void)
{ struct finalizable_object * curr_fo, * prev_fo, * next_fo;
ptr_t real_ptr;
size_t i;
size_t fo_size = GC_fnlz_roots.fo_head == NULL ? 0 :
(size_t)1 << GC_log_fo_table_size;
GC_bool needs_barrier = FALSE;
GC_ASSERT(I_HOLD_LOCK()); # ifndef SMALL_CONFIG /* Save current GC_[dl/ll]_entries value for stats printing */
GC_old_dl_entries = GC_dl_hashtbl.entries; # ifndef GC_LONG_REFS_NOT_NEEDED
GC_old_ll_entries = GC_ll_hashtbl.entries; # endif # endif
/* Add to list of objects awaiting finalization. */
fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
GC_dirty(curr_fo);
SET_FINALIZE_NOW(curr_fo);
/* unhide object pointer so any future collections will */ /* see it. */
curr_fo -> fo_hidden_base =
(word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
GC_bytes_finalized +=
curr_fo -> fo_object_size + sizeof(struct finalizable_object);
curr_fo = next_fo;
}
}
GC_fo_entries = 0; /* all entries deleted from the hash table */
}
/* Invoke all remaining finalizers that haven't yet been run. * This is needed for strict compliance with the Java standard, * which can make the runtime guarantee that all finalizers are run. * Unfortunately, the Java standard implies we have to keep running * finalizers until there are no more left, a potential infinite loop. * YUCK. * Note that this is even more dangerous than the usual Java * finalizers, in that objects reachable from static variables * may have been finalized when these finalizers are run. * Finalizers run at this point must be prepared to deal with a * mostly broken world. * This routine is externally callable, so is called without * the allocation lock.
*/
GC_API void GC_CALL GC_finalize_all(void)
{
DCL_LOCK_STATE;
LOCK(); while (GC_fo_entries > 0) {
GC_enqueue_all_finalizers();
UNLOCK();
GC_invoke_finalizers(); /* Running the finalizers in this thread is arguably not a good */ /* idea when we should be notifying another thread to run them. */ /* But otherwise we don't have a great way to wait for them to */ /* run. */
LOCK();
}
UNLOCK();
}
#endif/* !JAVA_FINALIZATION_NOT_NEEDED */
/* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */ /* finalizers can only be called from some kind of "safe state" and */ /* getting into that safe state is expensive.) */
GC_API int GC_CALL GC_should_invoke_finalizers(void)
{ # ifdef AO_HAVE_load return AO_load((volatile AO_t *)&GC_fnlz_roots.finalize_now) != 0; # else return GC_fnlz_roots.finalize_now != NULL; # endif /* !THREADS */
}
/* Invoke finalizers for all objects that are ready to be finalized. */ /* Should be called without allocation lock. */
GC_API int GC_CALL GC_invoke_finalizers(void)
{ int count = 0;
word bytes_freed_before = 0; /* initialized to prevent warning. */
DCL_LOCK_STATE;
while (GC_should_invoke_finalizers()) { struct finalizable_object * curr_fo;
# ifdef THREADS
LOCK(); # endif if (count == 0) {
bytes_freed_before = GC_bytes_freed; /* Don't do this outside, since we need the lock. */
}
curr_fo = GC_fnlz_roots.finalize_now; # ifdef THREADS if (curr_fo != NULL)
SET_FINALIZE_NOW(fo_next(curr_fo));
UNLOCK(); if (curr_fo == 0) break; # else
GC_fnlz_roots.finalize_now = fo_next(curr_fo); # endif
fo_set_next(curr_fo, 0);
(*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
curr_fo -> fo_client_data);
curr_fo -> fo_client_data = 0;
++count; /* Explicit freeing of curr_fo is probably a bad idea. */ /* It throws off accounting if nearly all objects are */ /* finalizable. Otherwise it should not matter. */
} /* bytes_freed_before is initialized whenever count != 0 */ if (count != 0 # ifdefined(THREADS) && !defined(THREAD_SANITIZER) /* A quick check whether some memory was freed. */ /* The race with GC_free() is safe to be ignored */ /* because we only need to know if the current */ /* thread has deallocated something. */
&& bytes_freed_before != GC_bytes_freed # endif
) {
LOCK();
GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
UNLOCK();
} return count;
}
# ifdefined(THREADS) && !defined(KEEP_BACK_PTRS) \
&& !defined(MAKE_BACK_GRAPH) /* Quick check (while unlocked) for an empty finalization queue. */ if (!GC_should_invoke_finalizers()) return; # endif
LOCK();
/* This is a convenient place to generate backtraces if appropriate, */ /* since that code is not callable with the allocation lock. */ # ifdefined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH) if (GC_gc_no > last_back_trace_gc_no) { # ifdef KEEP_BACK_PTRS long i; /* Stops when GC_gc_no wraps; that's OK. */
last_back_trace_gc_no = GC_WORD_MAX; /* disable others. */ for (i = 0; i < GC_backtraces; ++i) { /* FIXME: This tolerates concurrent heap mutation, */ /* which may cause occasional mysterious results. */ /* We need to release the GC lock, since GC_print_callers */ /* acquires it. It probably shouldn't. */ void *current = GC_generate_random_valid_address();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.