/* May be overridden by <asm/kfence.h>. */ #ifndef ARCH_FUNC_PREFIX #define ARCH_FUNC_PREFIX "" #endif
/* Helper function to either print to a seq_file or to console. */
__printf(2, 3) staticvoid seq_con_printf(struct seq_file *seq, constchar *fmt, ...)
{
va_list args;
/* * Get the number of stack entries to skip to get out of MM internals. @type is * optional, and if set to NULL, assumes an allocation or free stack.
*/ staticint get_stack_skipnr(constunsignedlong stack_entries[], int num_entries, constenum kfence_error_type *type)
{ char buf[64]; int skipnr, fallback = 0;
if (type) { /* Depending on error type, find different stack entries. */ switch (*type) { case KFENCE_ERROR_UAF: case KFENCE_ERROR_OOB: case KFENCE_ERROR_INVALID: /* * kfence_handle_page_fault() may be called with pt_regs * set to NULL; in that case we'll simply show the full * stack trace.
*/ return 0; case KFENCE_ERROR_CORRUPTION: case KFENCE_ERROR_INVALID_FREE: break;
}
}
for (skipnr = 0; skipnr < num_entries; skipnr++) { int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
!strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) { /* * In case of tail calls from any of the below to any of * the above, optimized by the compiler such that the * stack trace would omit the initial entry point below.
*/
fallback = skipnr + 1;
}
/* * The below list should only include the initial entry points * into the slab allocators. Includes the *_bulk() variants by * checking prefixes.
*/ if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc")) goto found;
} if (fallback < num_entries) return fallback;
found:
skipnr++; return skipnr < num_entries ? skipnr : 0;
}
if (track->num_stack_entries) { /* Skip allocation/free internals stack. */ int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
/* stack_trace_seq_print() does not exist; open code our own. */ for (; i < track->num_stack_entries; i++)
seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]);
} else {
seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation");
}
}
/* * Show bytes at @addr that are different from the expected canary values, up to * @max_bytes.
*/ staticvoid print_diff_canary(unsignedlong address, size_t bytes_to_show, conststruct kfence_metadata *meta)
{ constunsignedlong show_until_addr = address + bytes_to_show; const u8 *cur, *end;
/* Do not show contents of object nor read into following guard page. */
end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr)
: min(show_until_addr, PAGE_ALIGN(address)));
pr_cont("["); for (cur = (const u8 *)address; cur < end; cur++) { if (*cur == KFENCE_CANARY_PATTERN_U8(cur))
pr_cont(" ."); elseif (no_hash_pointers)
pr_cont(" 0x%02x", *cur); else/* Do not leak kernel memory in non-debug builds. */
pr_cont(" !");
}
pr_cont(" ]");
}
/* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */ if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta)) return;
if (meta)
lockdep_assert_held(&meta->lock); /* * Because we may generate reports in printk-unfriendly parts of the * kernel, such as scheduler code, the use of printk() could deadlock. * Until such time that all printing code here is safe in all parts of * the kernel, accept the risk, and just get our message out (given the * system might already behave unpredictably due to the memory error). * As such, also disable lockdep to hide warnings, and avoid disabling * lockdep for the rest of the kernel.
*/
lockdep_off();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.