// SPDX-License-Identifier: GPL-2.0-only /* * Stack depot - a stack trace storage that avoids duplication. * * Internally, stack depot maintains a hash table of unique stacktraces. The * stack traces themselves are stored contiguously one after another in a set * of separate page allocations. * * Author: Alexander Potapenko <glider@google.com> * Copyright (C) 2016 Google, Inc. * * Based on the code by Dmitry Chernenkov.
*/
/* * The pool_index is offset by 1 so the first record does not have a 0 handle.
*/ staticunsignedint stack_max_pools __read_mostly =
MIN((1LL << DEPOT_POOL_INDEX_BITS) - 1, 8192);
/* Use one hash table bucket per 16 KB of memory. */ #define STACK_HASH_TABLE_SCALE 14 /* Limit the number of buckets between 4K and 1M. */ #define STACK_BUCKET_NUMBER_ORDER_MIN 12 #define STACK_BUCKET_NUMBER_ORDER_MAX 20 /* Initial seed for jhash2. */ #define STACK_HASH_SEED 0x9747b28c
/* Hash table of stored stack records. */ staticstruct list_head *stack_table; /* Fixed order of the number of table buckets. Used when KASAN is enabled. */ staticunsignedint stack_bucket_number_order; /* Hash mask for indexing the table. */ staticunsignedint stack_hash_mask;
/* Array of memory regions that store stack records. */ staticvoid **stack_pools; /* Newly allocated pool that is not yet added to stack_pools. */ staticvoid *new_pool; /* Number of pools in stack_pools. */ staticint pools_num; /* Offset to the unused space in the currently used pool. */ static size_t pool_offset = DEPOT_POOL_SIZE; /* Freelist of stack records within stack_pools. */ static LIST_HEAD(free_stacks); /* The lock must be held when performing pool or freelist modifications. */ static DEFINE_RAW_SPINLOCK(pool_lock);
void __init stack_depot_request_early_init(void)
{ /* Too late to request early init now. */
WARN_ON(__stack_depot_early_init_passed);
__stack_depot_early_init_requested = true;
}
/* Initialize list_head's within the hash table. */ staticvoid init_stack_table(unsignedlong entries)
{ unsignedlong i;
for (i = 0; i < entries; i++)
INIT_LIST_HEAD(&stack_table[i]);
}
/* Allocates a hash table via memblock. Can only be used during early boot. */ int __init stack_depot_early_init(void)
{ unsignedlong entries = 0;
/* This function must be called only once, from mm_init(). */ if (WARN_ON(__stack_depot_early_init_passed)) return 0;
__stack_depot_early_init_passed = true;
/* * Print disabled message even if early init has not been requested: * stack_depot_init() will not print one.
*/ if (stack_depot_disabled) {
pr_info("disabled\n"); return 0;
}
/* * If KASAN is enabled, use the maximum order: KASAN is frequently used * in fuzzing scenarios, which leads to a large number of different * stack traces being stored in stack depot.
*/ if (kasan_enabled() && !stack_bucket_number_order)
stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
/* * Check if early init has been requested after setting * stack_bucket_number_order: stack_depot_init() uses its value.
*/ if (!__stack_depot_early_init_requested) return 0;
/* * If stack_bucket_number_order is not set, leave entries as 0 to rely * on the automatic calculations performed by alloc_large_system_hash().
*/ if (stack_bucket_number_order)
entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot", sizeof(struct list_head),
entries,
STACK_HASH_TABLE_SCALE,
HASH_EARLY,
NULL,
&stack_hash_mask,
1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
1UL << STACK_BUCKET_NUMBER_ORDER_MAX); if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true; return -ENOMEM;
} if (!entries) { /* * Obtain the number of entries that was calculated by * alloc_large_system_hash().
*/
entries = stack_hash_mask + 1;
}
init_stack_table(entries);
pr_info("allocating space for %u stack pools via memblock\n",
stack_max_pools);
stack_pools =
memblock_alloc(stack_max_pools * sizeof(void *), PAGE_SIZE); if (!stack_pools) {
pr_err("stack pools allocation failed, disabling\n");
memblock_free(stack_table, entries * sizeof(struct list_head));
stack_depot_disabled = true; return -ENOMEM;
}
return 0;
}
/* Allocates a hash table via kvcalloc. Can be used after boot. */ int stack_depot_init(void)
{ static DEFINE_MUTEX(stack_depot_init_mutex); unsignedlong entries; int ret = 0;
mutex_lock(&stack_depot_init_mutex);
if (stack_depot_disabled || stack_table) goto out_unlock;
/* * Similarly to stack_depot_early_init, use stack_bucket_number_order * if assigned, and rely on automatic scaling otherwise.
*/ if (stack_bucket_number_order) {
entries = 1UL << stack_bucket_number_order;
} else { int scale = STACK_HASH_TABLE_SCALE;
/* * Initializes new stack pool, and updates the list of pools.
*/ staticbool depot_init_pool(void **prealloc)
{
lockdep_assert_held(&pool_lock);
if (unlikely(pools_num >= stack_max_pools)) { /* Bail out if we reached the pool limit. */
WARN_ON_ONCE(pools_num > stack_max_pools); /* should never happen */
WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
WARN_ONCE(1, "Stack depot reached limit capacity"); returnfalse;
}
if (!new_pool && *prealloc) { /* We have preallocated memory, use it. */
WRITE_ONCE(new_pool, *prealloc);
*prealloc = NULL;
}
if (!new_pool) returnfalse; /* new_pool and *prealloc are NULL */
/* Save reference to the pool to be used by depot_fetch_stack(). */
stack_pools[pools_num] = new_pool;
/* * Stack depot tries to keep an extra pool allocated even before it runs * out of space in the currently used pool. * * To indicate that a new preallocation is needed new_pool is reset to * NULL; do not reset to NULL if we have reached the maximum number of * pools.
*/ if (pools_num < stack_max_pools)
WRITE_ONCE(new_pool, NULL); else
WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
/* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
WRITE_ONCE(pools_num, pools_num + 1);
ASSERT_EXCLUSIVE_WRITER(pools_num);
pool_offset = 0;
returntrue;
}
/* Keeps the preallocated memory to be used for a new stack depot pool. */ staticvoid depot_keep_new_pool(void **prealloc)
{
lockdep_assert_held(&pool_lock);
/* * If a new pool is already saved or the maximum number of * pools is reached, do not use the preallocated memory.
*/ if (new_pool) return;
/* * Try to initialize a new stack record from the current pool, a cached pool, or * the current pre-allocation.
*/ staticstruct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
{ struct stack_record *stack; void *current_pool;
u32 pool_index;
lockdep_assert_held(&pool_lock);
if (pool_offset + size > DEPOT_POOL_SIZE) { if (!depot_init_pool(prealloc)) return NULL;
}
if (WARN_ON_ONCE(pools_num < 1)) return NULL;
pool_index = pools_num - 1;
current_pool = stack_pools[pool_index]; if (WARN_ON_ONCE(!current_pool)) return NULL;
/* Try to find next free usable entry from the freelist. */ staticstruct stack_record *depot_pop_free(void)
{ struct stack_record *stack;
lockdep_assert_held(&pool_lock);
if (list_empty(&free_stacks)) return NULL;
/* * We maintain the invariant that the elements in front are least * recently used, and are therefore more likely to be associated with an * RCU grace period in the past. Consequently it is sufficient to only * check the first entry.
*/
stack = list_first_entry(&free_stacks, struct stack_record, free_list); if (!poll_state_synchronize_rcu(stack->rcu_state)) return NULL;
/* Allocates a new stack in a stack depot pool. */ staticstruct stack_record *
depot_alloc_stack(unsignedlong *entries, unsignedint nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
{ struct stack_record *stack = NULL;
size_t record_size;
lockdep_assert_held(&pool_lock);
/* This should already be checked by public API entry points. */ if (WARN_ON_ONCE(!nr_entries)) return NULL;
/* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */ if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
if (flags & STACK_DEPOT_FLAG_GET) { /* * Evictable entries have to allocate the max. size so they may * safely be re-used by differently sized allocations.
*/
record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
stack = depot_pop_free();
} else {
record_size = depot_stack_record_size(stack, nr_entries);
}
if (!stack) {
stack = depot_pop_free_pool(prealloc, record_size); if (!stack) return NULL;
}
/* Save the stack trace. */
stack->hash = hash;
stack->size = nr_entries; /* stack->handle is already filled in by depot_pop_free_pool(). */
memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));
if (flags & STACK_DEPOT_FLAG_GET) {
refcount_set(&stack->count, 1);
counters[DEPOT_COUNTER_REFD_ALLOCS]++;
counters[DEPOT_COUNTER_REFD_INUSE]++;
} else { /* Warn on attempts to switch to refcounting this entry. */
refcount_set(&stack->count, REFCOUNT_SATURATED);
counters[DEPOT_COUNTER_PERSIST_COUNT]++;
counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
}
/* * Let KMSAN know the stored stack record is initialized. This shall * prevent false positive reports if instrumented code accesses it.
*/
kmsan_unpoison_memory(stack, record_size);
if (pool_index >= pools_num_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
pool_index, pools_num_cached, handle); return NULL;
}
pool = stack_pools[pool_index]; if (WARN_ON(!pool)) return NULL;
stack = pool + offset; if (WARN_ON(!refcount_read(&stack->count))) return NULL;
return stack;
}
/* Links stack into the freelist. */ staticvoid depot_free_stack(struct stack_record *stack)
{ unsignedlong flags;
/* * Remove the entry from the hash list. Concurrent list traversal may * still observe the entry, but since the refcount is zero, this entry * will no longer be considered as valid.
*/
list_del_rcu(&stack->hash_list);
/* * Due to being used from constrained contexts such as the allocators, * NMI, or even RCU itself, stack depot cannot rely on primitives that * would sleep (such as synchronize_rcu()) or recursively call into * stack depot again (such as call_rcu()). * * Instead, get an RCU cookie, so that we can ensure this entry isn't * moved onto another list until the next grace period, and concurrent * RCU list traversal remains safe.
*/
stack->rcu_state = get_state_synchronize_rcu();
/* * Add the entry to the freelist tail, so that older entries are * considered first - their RCU cookie is more likely to no longer be * associated with the current grace period.
*/
list_add_tail(&stack->free_list, &free_stacks);
/* Calculates the hash for a stack. */ staticinline u32 hash_stack(unsignedlong *entries, unsignedint size)
{ return jhash2((u32 *)entries,
array_size(size, sizeof(*entries)) / sizeof(u32),
STACK_HASH_SEED);
}
/* * Non-instrumented version of memcmp(). * Does not check the lexicographical order, only the equality.
*/ staticinline int stackdepot_memcmp(constunsignedlong *u1, constunsignedlong *u2, unsignedint n)
{ for ( ; n-- ; u1++, u2++) { if (*u1 != *u2) return 1;
} return 0;
}
/* Finds a stack in a bucket of the hash table. */ staticinlinestruct stack_record *find_stack(struct list_head *bucket, unsignedlong *entries, int size,
u32 hash, depot_flags_t flags)
{ struct stack_record *stack, *ret = NULL;
/* * Stack depot may be used from instrumentation that instruments RCU or * tracing itself; use variant that does not call into RCU and cannot be * traced. * * Note: Such use cases must take care when using refcounting to evict * unused entries, because the stack record free-then-reuse code paths * do call into RCU.
*/
rcu_read_lock_sched_notrace();
/* * This may race with depot_free_stack() accessing the freelist * management state unioned with @entries. The refcount is zero * in that case and the below refcount_inc_not_zero() will fail.
*/ if (data_race(stackdepot_memcmp(entries, stack->entries, size))) continue;
/* * Try to increment refcount. If this succeeds, the stack record * is valid and has not yet been freed. * * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior * to then call stack_depot_put() later, and we can assume that * a stack record is never placed back on the freelist.
*/ if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count)) continue;
if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK)) return 0;
/* * If this stack trace is from an interrupt, including anything before * interrupt entry usually leads to unbounded stack depot growth. * * Since use of filter_irq_stacks() is a requirement to ensure stack * depot can efficiently deduplicate interrupt stacks, always * filter_irq_stacks() to simplify all callers' use of stack depot.
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
if (unlikely(nr_entries == 0) || stack_depot_disabled) return 0;
/* Fast path: look the stack trace up without locking. */
found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (found) gotoexit;
/* * Allocate memory for a new pool if required now: * we won't be able to do that under the lock.
*/ if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
page = alloc_pages(gfp_nested_mask(alloc_flags),
DEPOT_POOL_ORDER); if (page)
prealloc = page_address(page);
}
if (in_nmi() || !allow_spin) { /* We can never allocate in NMI context. */
WARN_ON_ONCE(can_alloc); /* Best effort; bail if we fail to take the lock. */ if (!raw_spin_trylock_irqsave(&pool_lock, flags)) gotoexit;
} else {
raw_spin_lock_irqsave(&pool_lock, flags);
}
printk_deferred_enter();
/* Try to find again, to avoid concurrently inserting duplicates. */
found = find_stack(bucket, entries, nr_entries, hash, depot_flags); if (!found) { struct stack_record *new =
depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);
if (new) { /* * This releases the stack record into the bucket and * makes it visible to readers in find_stack().
*/
list_add_rcu(&new->hash_list, bucket);
found = new;
}
}
if (prealloc) { /* * Either stack depot already contains this stack trace, or * depot_alloc_stack() did not consume the preallocated memory. * Try to keep the preallocated memory for future.
*/
depot_keep_new_pool(&prealloc);
}
printk_deferred_exit();
raw_spin_unlock_irqrestore(&pool_lock, flags); exit: if (prealloc) { /* Stack depot didn't use this memory, free it. */ if (!allow_spin)
free_pages_nolock(virt_to_page(prealloc), DEPOT_POOL_ORDER); else
free_pages((unsignedlong)prealloc, DEPOT_POOL_ORDER);
} if (found)
handle = found->handle.handle; return handle;
}
EXPORT_SYMBOL_GPL(stack_depot_save_flags);
*entries = NULL; /* * Let KMSAN know *entries is initialized. This shall prevent false * positive reports if instrumented code accesses it.
*/
kmsan_unpoison_memory(entries, sizeof(*entries));
if (!handle || stack_depot_disabled) return 0;
stack = depot_fetch_stack(handle); /* * Should never be NULL, otherwise this is a use-after-put (or just a * corrupt handle).
*/ if (WARN(!stack, "corrupt handle or use after stack_depot_put()")) return 0;
stack = depot_fetch_stack(handle); /* * Should always be able to find the stack record, otherwise this is an * unbalanced put attempt (or corrupt handle).
*/ if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()")) return;
if (refcount_dec_and_test(&stack->count))
depot_free_stack(stack);
}
EXPORT_SYMBOL_GPL(stack_depot_put);
staticint stats_show(struct seq_file *seq, void *v)
{ /* * data race ok: These are just statistics counters, and approximate * statistics are ok for debugging.
*/
seq_printf(seq, "pools: %d\n", data_race(pools_num)); for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.