/* Pool usage% threshold when currently covered allocations are skipped. */ staticunsignedlong kfence_skip_covered_thresh __read_mostly = 75;
module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
/* Allocation burst count: number of excess KFENCE allocations per sample. */ staticunsignedint kfence_burst __read_mostly;
module_param_named(burst, kfence_burst, uint, 0644);
/* If true, use a deferrable timer. */ staticbool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
module_param_named(deferrable, kfence_deferrable, bool, 0444);
/* If true, check all canary bytes on panic. */ staticbool kfence_check_on_panic __read_mostly;
module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
/* The pool of pages used for guard pages and objects. */ char *__kfence_pool __read_mostly;
EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
/* * Per-object metadata, with one-to-one mapping of object metadata to * backing pages (in __kfence_pool).
*/
static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); struct kfence_metadata *kfence_metadata __read_mostly;
/* * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache(). * So introduce kfence_metadata_init to initialize metadata, and then make * kfence_metadata visible after initialization is successful. This prevents * potential UAF or access to uninitialized metadata.
*/ staticstruct kfence_metadata *kfence_metadata_init __read_mostly;
/* Freelist with available objects. */ staticstruct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
/* * The static key to set up a KFENCE allocation; or if static keys are not used * to gate allocations, to avoid a load and compare if KFENCE is disabled.
*/
DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
/* Gates the allocation, ensuring only one succeeds in a given period. */
atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
/* * A Counting Bloom filter of allocation coverage: limits currently covered * allocations of the same source filling up the pool. * * Assuming a range of 15%-85% unique allocations in the pool at any point in * time, the below parameters provide a probablity of 0.02-0.33 for false * positive hits respectively: * * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
*/ #define ALLOC_COVERED_HNUM 2 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2) #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
/* Stack depth used to determine uniqueness of an allocation. */ #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
/* * Randomness for stack hashes, making the same collisions across reboots and * different machines less likely.
*/ static u32 stack_hash_seed __ro_after_init;
/* * Adds (or subtracts) count @val for allocation stack trace hash * @alloc_stack_hash from Counting Bloom filter.
*/ staticvoid alloc_covered_add(u32 alloc_stack_hash, int val)
{ int i;
for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
}
}
/* * Returns true if the allocation stack trace hash @alloc_stack_hash is * currently contained (non-zero count) in Counting Bloom filter.
*/ staticbool alloc_covered_contains(u32 alloc_stack_hash)
{ int i;
for (i = 0; i < ALLOC_COVERED_HNUM; i++) { if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK])) returnfalse;
alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
}
/* The checks do not affect performance; only called from slow-paths. */
/* Only call with a pointer into kfence_metadata. */ if (KFENCE_WARN_ON(meta < kfence_metadata ||
meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) return 0;
/* * This metadata object only ever maps to 1 page; verify that the stored * address is in the expected range.
*/ if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) return 0;
return pageaddr;
}
staticinlinebool kfence_obj_allocated(conststruct kfence_metadata *meta)
{ enum kfence_object_state state = READ_ONCE(meta->state);
return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING;
}
/* * Update the object's metadata state, including updating the alloc/free stacks * depending on the state transition.
*/ static noinline void
metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next, unsignedlong *stack_entries, size_t num_stack_entries)
{ struct kfence_track *track =
next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
lockdep_assert_held(&meta->lock);
/* Stack has been saved when calling rcu, skip. */ if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING) goto out;
if (stack_entries) {
memcpy(track->stack_entries, stack_entries,
num_stack_entries * sizeof(stack_entries[0]));
} else { /* * Skip over 1 (this) functions; noinline ensures we do not * accidentally skip over the caller by never inlining.
*/
num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
}
track->num_stack_entries = num_stack_entries;
track->pid = task_pid_nr(current);
track->cpu = raw_smp_processor_id();
track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
out: /* * Pairs with READ_ONCE() in * kfence_shutdown_cache(), * kfence_handle_page_fault().
*/
WRITE_ONCE(meta->state, next);
}
/* * The canary may be written to part of the object memory, but it does * not affect it. The user should initialize the object before using it.
*/ for (; addr < meta->addr; addr += sizeof(u64))
*((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
/* * We'll iterate over each canary byte per-side until a corrupted byte * is found. However, we'll still iterate over the canary bytes to the * right of the object even if there was an error in the canary bytes to * the left of the object. Specifically, if check_canary_byte() * generates an error, showing both sides might give more clues as to * what the error is about when displaying which bytes were corrupted.
*/
/* Apply to left of object. */ for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) break;
}
/* * If the canary is corrupted in a certain 64 bytes, or the canary * memory cannot be completely covered by multiple consecutive 64 bytes, * it needs to be checked one by one.
*/ for (; addr < meta->addr; addr++) { if (unlikely(!check_canary_byte((u8 *)addr))) break;
}
/* Apply to right of object. */ for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) { if (unlikely(!check_canary_byte((u8 *)addr))) return;
} for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
for (; addr - pageaddr < PAGE_SIZE; addr++) { if (!check_canary_byte((u8 *)addr)) return;
}
}
}
}
/* Try to obtain a free object. */
raw_spin_lock_irqsave(&kfence_freelist_lock, flags); if (!list_empty(&kfence_freelist)) {
meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
list_del_init(&meta->list);
}
raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); if (!meta) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); return NULL;
}
if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { /* * This is extremely unlikely -- we are reporting on a * use-after-free, which locked meta->lock, and the reporting * code via printk calls kmalloc() which ends up in * kfence_alloc() and tries to grab the same object that we're * reporting on. While it has never been observed, lockdep does * report that there is a possibility of deadlock. Fix it by * using trylock and bailing out gracefully.
*/
raw_spin_lock_irqsave(&kfence_freelist_lock, flags); /* Put the object back on the freelist. */
list_add_tail(&meta->list, &kfence_freelist);
raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
return NULL;
}
meta->addr = metadata_to_pageaddr(meta); /* Unprotect if we're reusing this page. */ if (meta->state == KFENCE_OBJECT_FREED)
kfence_unprotect(meta->addr);
/* * Note: for allocations made before RNG initialization, will always * return zero. We still benefit from enabling KFENCE as early as * possible, even when the RNG is not yet available, as this will allow * KFENCE to detect bugs due to earlier allocations. The only downside * is that the out-of-bounds accesses detected are deterministic for * such allocations.
*/ if (random_right_allocate) { /* Allocate on the "right" side, re-calculate address. */
meta->addr += PAGE_SIZE - size;
meta->addr = ALIGN_DOWN(meta->addr, cache->align);
}
/* * We check slab_want_init_on_alloc() ourselves, rather than letting * SL*B do the initialization, as otherwise we might overwrite KFENCE's * redzone.
*/ if (unlikely(slab_want_init_on_alloc(gfp, cache)))
memzero_explicit(addr, size); if (cache->ctor)
cache->ctor(addr);
if (random_fault)
kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsignedlong)addr, PAGE_SIZE), PAGE_SIZE,
KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
&assert_page_exclusive);
if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
kfence_unprotect((unsignedlong)addr); /* To check canary bytes. */
/* Restore page protection if there was an OOB access. */ if (meta->unprotected_page) {
memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
kfence_protect(meta->unprotected_page);
meta->unprotected_page = 0;
}
/* Mark the object as freed. */
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
init = slab_want_init_on_free(meta->cache);
raw_spin_unlock_irqrestore(&meta->lock, flags);
alloc_covered_add(meta->alloc_stack_hash, -1);
/* Check canary bytes for memory corruption. */
check_canary(meta);
/* * Clear memory if init-on-free is set. While we protect the page, the * data is still there, and after a use-after-free is detected, we * unprotect the page, so the data is still accessible.
*/ if (!zombie && unlikely(init))
memzero_explicit(addr, meta->size);
/* Protect to detect use-after-frees. */
kfence_protect((unsignedlong)addr);
kcsan_end_scoped_access(&assert_page_exclusive); if (!zombie) { /* Add it to the tail of the freelist for reuse. */
raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
KFENCE_WARN_ON(!list_empty(&meta->list));
list_add_tail(&meta->list, &kfence_freelist);
raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
/* * Initialization of the KFENCE pool after its allocation. * Returns 0 on success; otherwise returns the address up to * which partial initialization succeeded.
*/ staticunsignedlong kfence_init_pool(void)
{ unsignedlong addr; struct page *pages; int i;
if (!arch_kfence_init_pool()) return (unsignedlong)__kfence_pool;
/* * Set up object pages: they must have PGTY_slab set to avoid freeing * them as real pages. * * We also want to avoid inserting kfence_free() in the kfree() * fast-path in SLUB, and therefore need to ensure kfree() correctly * enters __slab_free() slow-path.
*/ for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { struct slab *slab = page_slab(nth_page(pages, i));
/* * Protect the first 2 pages. The first page is mostly unnecessary, and * merely serves as an extended guard page. However, adding one * additional page in the beginning gives us an even number of pages, * which simplifies the mapping of address to metadata index.
*/ for (i = 0; i < 2; i++) { if (unlikely(!kfence_protect(addr))) return addr;
addr += PAGE_SIZE;
}
for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { struct kfence_metadata *meta = &kfence_metadata_init[i];
/* Protect the right redzone. */ if (unlikely(!kfence_protect(addr + PAGE_SIZE))) goto reset_slab;
addr += 2 * PAGE_SIZE;
}
/* * Make kfence_metadata visible only when initialization is successful. * Otherwise, if the initialization fails and kfence_metadata is freed, * it may cause UAF in kfence_shutdown_cache().
*/
smp_store_release(&kfence_metadata, kfence_metadata_init); return 0;
reset_slab: for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { struct slab *slab = page_slab(nth_page(pages, i));
if (!i || (i % 2)) continue; #ifdef CONFIG_MEMCG
slab->obj_exts = 0; #endif
__folio_clear_slab(slab_folio(slab));
}
if (!addr) { /* * The pool is live and will never be deallocated from this point on. * Ignore the pool object from the kmemleak phys object tree, as it would * otherwise overlap with allocations returned by kfence_alloc(), which * are registered with kmemleak through the slab post-alloc hook.
*/
kmemleak_ignore_phys(__pa(__kfence_pool)); returntrue;
}
/* * Only release unprotected pages, and do not try to go back and change * page attributes due to risk of failing to do so as well. If changing * page attributes for some pages fails, it is very likely that it also * fails for the first page, and therefore expect addr==__kfence_pool in * most failure cases.
*/
memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsignedlong)__kfence_pool));
__kfence_pool = NULL;
staticint stats_show(struct seq_file *seq, void *v)
{ int i;
seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(stats);
/* * debugfs seq_file operations for /sys/kernel/debug/kfence/objects. * start_object() and next_object() return the object index + 1, because NULL is used * to stop iteration.
*/ staticvoid *start_object(struct seq_file *seq, loff_t *pos)
{ if (*pos < CONFIG_KFENCE_NUM_OBJECTS) return (void *)((long)*pos + 1); return NULL;
}
/* * Set up delayed work, which will enable and disable the static key. We need to * use a work queue (rather than a simple timer), since enabling and disabling a * static key cannot be done from an interrupt. * * Note: Toggling a static branch currently causes IPIs, and here we'll end up * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with * more aggressive sampling intervals), we could get away with a variant that * avoids IPIs, at the cost of not immediately capturing allocations if the * instructions remain cached.
*/ staticvoid toggle_allocation_gate(struct work_struct *work)
{ if (!READ_ONCE(kfence_enabled)) return;
atomic_set(&kfence_allocation_gate, -kfence_burst); #ifdef CONFIG_KFENCE_STATIC_KEYS /* Enable static key, and await allocation to happen. */
static_branch_enable(&kfence_allocation_key);
/* === Public interface ===================================================== */
void __init kfence_alloc_pool_and_metadata(void)
{ if (!kfence_sample_interval) return;
/* * If the pool has already been initialized by arch, there is no need to * re-allocate the memory pool.
*/ if (!__kfence_pool)
__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
if (!__kfence_pool) {
pr_err("failed to allocate pool\n"); return;
}
/* The memory allocated by memblock has been zeroed out. */
kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE); if (!kfence_metadata_init) {
pr_err("failed to allocate metadata\n");
memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
__kfence_pool = NULL;
}
}
staticvoid kfence_init_enable(void)
{ if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
static_branch_enable(&kfence_allocation_key);
if (kfence_deferrable)
INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate); else
INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
if (kfence_check_on_panic)
atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
/* Pairs with release in kfence_init_pool(). */ if (!smp_load_acquire(&kfence_metadata)) return;
for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { bool in_use;
meta = &kfence_metadata[i];
/* * If we observe some inconsistent cache and state pair where we * should have returned false here, cache destruction is racing * with either kmem_cache_alloc() or kmem_cache_free(). Taking * the lock will not help, as different critical section * serialization will have the same outcome.
*/ if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta)) continue;
if (in_use) { /* * This cache still has allocations, and we should not * release them back into the freelist so they can still * safely be used and retain the kernel's default * behaviour of keeping the allocations alive (leak the * cache); however, they effectively become "zombie * allocations" as the KFENCE objects are the only ones * still in use and the owning cache is being destroyed. * * We mark them freed, so that any subsequent use shows * more useful error messages that will include stack * traces of the user of the object, the original * allocation, and caller to shutdown_cache().
*/
kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
}
}
for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
meta = &kfence_metadata[i];
/* See above. */ if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) continue;
raw_spin_lock_irqsave(&meta->lock, flags); if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
meta->cache = NULL;
raw_spin_unlock_irqrestore(&meta->lock, flags);
}
}
/* * Perform size check before switching kfence_allocation_gate, so that * we don't disable KFENCE without making an allocation.
*/ if (size > PAGE_SIZE) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); return NULL;
}
/* * Skip allocations from non-default zones, including DMA. We cannot * guarantee that pages in the KFENCE pool will have the requested * properties (e.g. reside in DMAable memory).
*/ if ((flags & GFP_ZONEMASK) ||
((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
(s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); return NULL;
}
/* * Skip allocations for this slab, if KFENCE has been disabled for * this slab.
*/ if (s->flags & SLAB_SKIP_KFENCE) return NULL;
allocation_gate = atomic_inc_return(&kfence_allocation_gate); if (allocation_gate > 1) return NULL; #ifdef CONFIG_KFENCE_STATIC_KEYS /* * waitqueue_active() is fully ordered after the update of * kfence_allocation_gate per atomic_inc_return().
*/ if (allocation_gate == 1 && waitqueue_active(&allocation_wait)) { /* * Calling wake_up() here may deadlock when allocations happen * from within timer code. Use an irq_work to defer it.
*/
irq_work_queue(&wake_up_kfence_timer_work);
} #endif
/* * Do expensive check for coverage of allocation in slow-path after * allocation_gate has already become non-zero, even though it might * mean not making any allocation within a given sample interval. * * This ensures reasonable allocation coverage when the pool is almost * full, including avoiding long-lived allocations of the same source * filling up the pool (e.g. pagecache allocations).
*/
alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]); return NULL;
}
/* * Read locklessly -- if there is a race with __kfence_alloc(), this is * either a use-after-free or invalid access.
*/ return meta ? meta->size : 0;
}
/* * Read locklessly -- if there is a race with __kfence_alloc(), this is * either a use-after-free or invalid access.
*/ return meta ? (void *)meta->addr : NULL;
}
#ifdef CONFIG_MEMCG
KFENCE_WARN_ON(meta->obj_exts.objcg); #endif /* * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing * the object, as the object page may be recycled for other-typed * objects once it has been freed. meta->cache may be NULL if the cache * was destroyed. * Save the stack trace here so that reports show where the user freed * the object.
*/ if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) { unsignedlong flags;
/* * If the object was freed before we took the look we can still * report this as an OOB -- the report will simply show the * stacktrace of the free as well.
*/
} else {
to_report = addr_to_metadata(addr); if (!to_report) goto out;
raw_spin_lock_irqsave(&to_report->lock, flags);
error_type = KFENCE_ERROR_UAF; /* * We may race with __kfence_alloc(), and it is possible that a * freed object may be reallocated. We simply report this as a * use-after-free, with the stack trace showing the place where * the object was re-allocated.
*/
}
out: if (to_report) {
kfence_report_error(addr, is_write, regs, to_report, error_type);
raw_spin_unlock_irqrestore(&to_report->lock, flags);
} else { /* This may be a UAF or OOB access, but we can't be sure. */
kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
}
return kfence_unprotect(addr); /* Unprotect and let access proceed. */
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.18 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.