#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY) /* * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions * for the sites they want to instrument. * * If we have a compiler that can instrument meminstrinsics, never override * these, so that non-instrumented files can safely consider them as builtins.
*/ #undef memset void *memset(void *addr, int c, size_t len)
{ if (!kasan_check_range(addr, len, true, _RET_IP_)) return NULL;
/* * Perform shadow offset calculation based on untagged address, as * some of the callers (e.g. kasan_poison_new_object) pass tagged * addresses to this function.
*/
addr = kasan_reset_tag(addr);
if (WARN_ON((unsignedlong)addr & KASAN_GRANULE_MASK)) return; if (WARN_ON(size & KASAN_GRANULE_MASK)) return;
/* * Perform shadow offset calculation based on untagged address, as * some of the callers (e.g. kasan_unpoison_new_object) pass tagged * addresses to this function.
*/
addr = kasan_reset_tag(addr);
if (WARN_ON((unsignedlong)addr & KASAN_GRANULE_MASK)) return;
/* Unpoison all granules that cover the object. */
kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false);
/* Partially poison the last granule for the generic mode. */ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
kasan_poison_last_granule(addr, size);
}
if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE)) return NOTIFY_BAD;
switch (action) { case MEM_GOING_ONLINE: { void *ret;
/* * If shadow is mapped already than it must have been mapped * during the boot. This could happen if we onlining previously * offlined memory.
*/ if (shadow_mapped(shadow_start)) return NOTIFY_OK;
ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
shadow_end, GFP_KERNEL,
PAGE_KERNEL, VM_NO_GUARD,
pfn_to_nid(mem_data->start_pfn),
__builtin_return_address(0)); if (!ret) return NOTIFY_BAD;
kmemleak_ignore(ret); return NOTIFY_OK;
} case MEM_CANCEL_ONLINE: case MEM_OFFLINE: { struct vm_struct *vm;
/* * shadow_start was either mapped during boot by kasan_init() * or during memory online by __vmalloc_node_range(). * In the latter case we can use vfree() to free shadow. * Non-NULL result of the find_vm_area() will tell us if * that was the second case. * * Currently it's not possible to free shadow mapped * during boot by kasan_init(). It's because the code * to do that hasn't been written yet. So we'll just * leak the memory.
*/
vm = find_vm_area((void *)shadow_start); if (vm)
vfree((void *)shadow_start);
}
}
/* * User Mode Linux maps enough shadow memory for all of virtual memory * at boot, so doesn't need to allocate more on vmalloc, just clear it. * * The remaining CONFIG_UML checks in this file exist for the same * reason.
*/ if (IS_ENABLED(CONFIG_UML)) {
__memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start); return 0;
}
ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask); if (ret) return ret;
flush_cache_vmap(shadow_start, shadow_end);
/* * We need to be careful about inter-cpu effects here. Consider: * * CPU#0 CPU#1 * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; * p[99] = 1; * * With compiler instrumentation, that ends up looking like this: * * CPU#0 CPU#1 * // vmalloc() allocates memory * // let a = area->addr * // we reach kasan_populate_vmalloc * // and call kasan_unpoison: * STORE shadow(a), unpoison_val * ... * STORE shadow(a+99), unpoison_val x = LOAD p * // rest of vmalloc process <data dependency> * STORE p, a LOAD shadow(x+99) * * If there is no barrier between the end of unpoisoning the shadow * and the store of the result to p, the stores could be committed * in a different order by CPU#0, and CPU#1 could erroneously observe * poison in the shadow. * * We need some sort of barrier between the stores. * * In the vmalloc() case, this is provided by a smp_wmb() in * clear_vm_uninitialized_flag(). In the per-cpu allocator and in * get_vm_area() and friends, the caller gets shadow allocated but * doesn't have any pages mapped into the virtual address space that * has been reserved. Mapping those pages in will involve taking and * releasing a page-table lock, which will provide the barrier.
*/
if (likely(!none))
__free_page(pfn_to_page(pte_pfn(pte)));
arch_enter_lazy_mmu_mode();
return 0;
}
/* * Release the backing for the vmalloc region [start, end), which * lies within the free region [free_region_start, free_region_end). * * This can be run lazily, long after the region was freed. It runs * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap * infrastructure. * * How does this work? * ------------------- * * We have a region that is page aligned, labeled as A. * That might not map onto the shadow in a way that is page-aligned: * * start end * v v * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc * -------- -------- -------- -------- -------- * | | | | | * | | | /-------/ | * \-------\|/------/ |/---------------/ * ||| || * |??AAAAAA|AAAAAAAA|AA??????| < shadow * (1) (2) (3) * * First we align the start upwards and the end downwards, so that the * shadow of the region aligns with shadow page boundaries. In the * example, this gives us the shadow page (2). This is the shadow entirely * covered by this allocation. * * Then we have the tricky bits. We want to know if we can free the * partially covered shadow pages - (1) and (3) in the example. For this, * we are given the start and end of the free region that contains this * allocation. Extending our previous example, we could have: * * free_region_start free_region_end * | start end | * v v v v * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc * -------- -------- -------- -------- -------- * | | | | | * | | | /-------/ | * \-------\|/------/ |/---------------/ * ||| || * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow * (1) (2) (3) * * Once again, we align the start of the free region up, and the end of * the free region down so that the shadow is page aligned. So we can free * page (1) - we know no allocation currently uses anything in that page, * because all of it is in the vmalloc free region. But we cannot free * page (3), because we can't be sure that the rest of it is unused. * * We only consider pages that contain part of the original region for * freeing: we don't try to free other pages from the free region or we'd * end up trying to free huge chunks of virtual address space. * * Concurrency * ----------- * * How do we know that we're not freeing a page that is simultaneously * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? * * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running * at the same time. While we run under free_vmap_area_lock, the population * code does not. * * free_vmap_area_lock instead operates to ensure that the larger range * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and * the per-cpu region-finding algorithm both run under free_vmap_area_lock, * no space identified as free will become used while we are running. This * means that so long as we are careful with alignment and only free shadow * pages entirely covered by the free region, we will not run in to any * trouble - any simultaneous allocations will be for disjoint regions.
*/ void kasan_release_vmalloc(unsignedlong start, unsignedlong end, unsignedlong free_region_start, unsignedlong free_region_end, unsignedlong flags)
{ void *shadow_start, *shadow_end; unsignedlong region_start, region_end; unsignedlong size;
if (shadow_end > shadow_start) {
size = shadow_end - shadow_start; if (IS_ENABLED(CONFIG_UML)) {
__memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); return;
}
if (flags & KASAN_VMALLOC_PAGE_RANGE)
apply_to_existing_page_range(&init_mm,
(unsignedlong)shadow_start,
size, kasan_depopulate_vmalloc_pte,
NULL);
if (flags & KASAN_VMALLOC_TLB_FLUSH)
flush_tlb_kernel_range((unsignedlong)shadow_start,
(unsignedlong)shadow_end);
}
}
void *__kasan_unpoison_vmalloc(constvoid *start, unsignedlong size,
kasan_vmalloc_flags_t flags)
{ /* * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored. * Software KASAN modes can't optimize zeroing memory by combining it * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
*/
if (!kasan_arch_is_ready()) return (void *)start;
if (!is_vmalloc_or_module_addr(start)) return (void *)start;
/* * Don't tag executable memory with the tag-based mode. * The kernel doesn't tolerate having the PC register tagged.
*/ if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
!(flags & KASAN_VMALLOC_PROT_NORMAL)) return (void *)start;
/* * Poison the shadow for a vmalloc region. Called as part of the * freeing process at the time the region is freed.
*/ void __kasan_poison_vmalloc(constvoid *start, unsignedlong size)
{ if (!kasan_arch_is_ready()) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.