// SPDX-License-Identifier: GPL-2.0 /* * Test cases for KMSAN. * For each test case checks the presence (or absence) of generated reports. * Relies on 'console' tracepoint to capture reports as they appear in the * kernel log. * * Copyright (C) 2021-2022, Google LLC. * Author: Alexander Potapenko <glider@google.com> *
*/
if (observed.ignore) return;
spin_lock_irqsave(&observed.lock, flags);
if (strnstr(buf, "BUG: KMSAN: ", len)) { /* * KMSAN report and related to the test. * * The provided @buf is not NUL-terminated; copy no more than * @len bytes and let strscpy() add the missing NUL-terminator.
*/
strscpy(observed.header, buf,
min(len + 1, sizeof(observed.header)));
WRITE_ONCE(observed.available, true);
observed.ignore = true;
}
spin_unlock_irqrestore(&observed.lock, flags);
}
/* Check if a report related to the test exists. */ staticbool report_available(void)
{ return READ_ONCE(observed.available);
}
/* Reset observed.available, so that the test can trigger another report. */ staticvoid report_reset(void)
{ unsignedlong flags;
/* Information we expect in a report. */ struct expect_report { constchar *error_type; /* Error type. */ /* * Kernel symbol from the error header, or NULL if no report is * expected.
*/ constchar *symbol;
};
/* Check observed report matches information in @r. */ staticbool report_matches(conststruct expect_report *r)
{
typeof(observed.header) expected_header; unsignedlong flags; bool ret = false; constchar *end; char *cur;
/* Title */
cur = expected_header;
end = &expected_header[sizeof(expected_header) - 1];
cur += scnprintf(cur, end - cur, "BUG: KMSAN: %s", r->error_type);
scnprintf(cur, end - cur, " in %s", r->symbol); /* The exact offset won't match, remove it; also strip module name. */
cur = strchr(expected_header, '+'); if (cur)
*cur = '\0';
spin_lock_irqsave(&observed.lock, flags); if (!report_available()) goto out; /* A new report is being captured. */
/* Finally match expected output to what we actually observed. */
ret = strstr(observed.header, expected_header);
out:
spin_unlock_irqrestore(&observed.lock, flags);
return ret;
}
/* ===== Test cases ===== */
/* Prevent replacing branch with select in LLVM. */ static noinline void check_true(char *arg)
{
pr_info("%s is true\n", arg);
}
/* * Test case: ensure that kmalloc'ed memory becomes initialized after memset().
*/ staticvoid test_init_kmalloc(struct kunit *test)
{
EXPECTATION_NO_REPORT(expect); int *ptr;
kunit_info(test, "initialized kmalloc test (no reports)\n");
ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
memset(ptr, 0, sizeof(*ptr));
USE(*ptr);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* Test case: ensure that kzalloc() returns initialized memory. */ staticvoid test_init_kzalloc(struct kunit *test)
{
EXPECTATION_NO_REPORT(expect); int *ptr;
kunit_info(test, "initialized kzalloc test (no reports)\n");
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
USE(*ptr);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* Test case: ensure that local variables are uninitialized by default. */ staticvoid test_uninit_stack_var(struct kunit *test)
{
EXPECTATION_UNINIT_VALUE(expect); volatileint cond;
/* Test case: ensure that local variables with initializers are initialized. */ staticvoid test_init_stack_var(struct kunit *test)
{
EXPECTATION_NO_REPORT(expect); volatileint cond = 1;
kunit_info(test, "initialized stack variable (no reports)\n");
USE(cond);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
static noinline void two_param_fn(int arg1, int arg2)
{ int init = 0;
one_param_fn(init);
USE(arg1);
USE(arg2);
}
staticvoid test_params(struct kunit *test)
{ #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL /* * With eager param/retval checking enabled, KMSAN will report an error * before the call to two_param_fn().
*/
EXPECTATION_UNINIT_VALUE_FN(expect, "test_params"); #else
EXPECTATION_UNINIT_VALUE_FN(expect, "two_param_fn"); #endif volatileint uninit, init = 1;
kunit_info(test, "uninit passed through a function parameter (UMR report)\n");
two_param_fn(uninit, init);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
staticint signed_sum3(int a, int b, int c)
{ return a + b + c;
}
/* * Test case: ensure that uninitialized values are tracked through function * arguments.
*/ staticvoid test_uninit_multiple_params(struct kunit *test)
{
EXPECTATION_UNINIT_VALUE(expect); volatilechar b = 3, c; volatileint a;
kunit_info(test, "uninitialized local passed to fn (UMR report)\n");
USE(signed_sum3(a, b, c));
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* Helper function to make an array uninitialized. */ static noinline void do_uninit_local_array(char *array, int start, int stop)
{ volatilechar uninit;
for (int i = start; i < stop; i++)
array[i] = uninit;
}
/* * Test case: ensure kmsan_check_memory() reports an error when checking * uninitialized memory.
*/ staticvoid test_uninit_kmsan_check_memory(struct kunit *test)
{
EXPECTATION_UNINIT_VALUE_FN(expect, "test_uninit_kmsan_check_memory"); volatilechar local_array[8];
kunit_info(
test, "kmsan_check_memory() called on uninit local (UMR report)\n");
do_uninit_local_array((char *)local_array, 5, 7);
/* * Test case: check that a virtual memory range created with vmap() from * initialized pages is still considered as initialized.
*/ staticvoid test_init_kmsan_vmap_vunmap(struct kunit *test)
{
EXPECTATION_NO_REPORT(expect); constint npages = 2; struct page **pages; void *vbuf;
kunit_info(test, "pages initialized via vmap (no reports)\n");
pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); for (int i = 0; i < npages; i++)
pages[i] = alloc_page(GFP_KERNEL);
vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
memset(vbuf, 0xfe, npages * PAGE_SIZE); for (int i = 0; i < npages; i++)
kmsan_check_memory(page_address(pages[i]), PAGE_SIZE);
if (vbuf)
vunmap(vbuf); for (int i = 0; i < npages; i++) { if (pages[i])
__free_page(pages[i]);
}
kfree(pages);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* * Test case: ensure that memset() can initialize a buffer allocated via * vmalloc().
*/ staticvoid test_init_vmalloc(struct kunit *test)
{
EXPECTATION_NO_REPORT(expect); int npages = 8; char *buf;
kunit_info(test, "vmalloc buffer can be initialized (no reports)\n");
buf = vmalloc(PAGE_SIZE * npages);
buf[0] = 1;
memset(buf, 0xfe, PAGE_SIZE * npages);
USE(buf[0]); for (int i = 0; i < npages; i++)
kmsan_check_memory(&buf[PAGE_SIZE * i], PAGE_SIZE);
vfree(buf);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* Test case: ensure that use-after-free reporting works. */ staticvoid test_uaf(struct kunit *test)
{
EXPECTATION_USE_AFTER_FREE(expect); volatileint value; volatileint *var;
kunit_info(test, "use-after-free in kmalloc-ed buffer (UMR report)\n");
var = kmalloc(80, GFP_KERNEL);
var[3] = 0xfeedface;
kfree((int *)var); /* Copy the invalid value before checking it. */
value = var[3];
USE(value);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* * Test case: ensure that uninitialized values are propagated through per-CPU * memory.
*/ staticvoid test_percpu_propagate(struct kunit *test)
{
EXPECTATION_UNINIT_VALUE(expect); volatileint uninit, check;
kunit_info(test, "uninit local stored to per_cpu memory (UMR report)\n");
/* * Test case: ensure that passing uninitialized values to printk() leads to an * error report.
*/ staticvoid test_printk(struct kunit *test)
{ #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL /* * With eager param/retval checking enabled, KMSAN will report an error * before the call to pr_info().
*/
EXPECTATION_UNINIT_VALUE_FN(expect, "test_printk"); #else
EXPECTATION_UNINIT_VALUE_FN(expect, "number"); #endif volatileint uninit;
kunit_info(test, "uninit local passed to pr_info() (UMR report)\n");
pr_info("%px contains %d\n", &uninit, uninit);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* Prevent the compiler from inlining a memcpy() call. */ static noinline void *memcpy_noinline(volatilevoid *dst, constvolatilevoid *src, size_t size)
{ return memcpy((void *)dst, (constvoid *)src, size);
}
/* * Test case: ensure that memcpy() correctly copies uninitialized values between * aligned `src` and unaligned `dst`. * * Copying aligned 4-byte value to an unaligned one leads to touching two * aligned 4-byte values. This test case checks that KMSAN correctly reports an * error on the mentioned two values.
*/ staticvoid test_memcpy_aligned_to_unaligned(struct kunit *test)
{
EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned"); volatileint uninit_src; volatilechar dst[8] = { 0 };
/* * Test case: ensure that origin slots do not accidentally get overwritten with * zeroes during memcpy(). * * Previously, when copying memory from an aligned buffer to an unaligned one, * if there were zero origins corresponding to zero shadow values in the source * buffer, they could have ended up being copied to nonzero shadow values in the * destination buffer: * * memcpy(0xffff888080a00000, 0xffff888080900002, 8) * * src (0xffff888080900002): ..xx .... xx.. * src origins: o111 0000 o222 * dst (0xffff888080a00000): xx.. ..xx * dst origins: o111 0000 * (or 0000 o222) * * (here . stands for an initialized byte, and x for an uninitialized one. * * Ensure that this does not happen anymore, and for both destination bytes * the origin is nonzero (i.e. KMSAN reports an error).
*/ staticvoid test_memcpy_initialized_gap(struct kunit *test)
{
EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_initialized_gap"); volatilechar uninit_src[12]; volatilechar dst[8] = { 0 };
kunit_info(
test, "unaligned 4-byte initialized value gets a nonzero origin after memcpy() - (2 UMR reports)\n");
/* Test case: ensure that KMSAN does not access shadow memory out of bounds. */ staticvoid test_memset_on_guarded_buffer(struct kunit *test)
{ void *buf = vmalloc(PAGE_SIZE);
kunit_info(test, "memset() on ends of guarded buffer should not crash\n");
staticvoid test_long_origin_chain(struct kunit *test)
{
EXPECTATION_UNINIT_VALUE_FN(expect, "test_long_origin_chain"); /* (KMSAN_MAX_ORIGIN_DEPTH * 2) recursive calls to fibonacci(). */ volatileint accum[KMSAN_MAX_ORIGIN_DEPTH * 2 + 2]; int last = ARRAY_SIZE(accum) - 1;
kunit_info(
test, "origin chain exceeding KMSAN_MAX_ORIGIN_DEPTH (UMR report)\n"); /* * We do not set accum[1] to 0, so the uninitializedness will be carried * over to accum[2..last].
*/
accum[0] = 1;
fibonacci((int *)accum, ARRAY_SIZE(accum), 2);
kmsan_check_memory((void *)&accum[last], sizeof(int));
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* * Test case: ensure that saving/restoring/printing stacks to/from stackdepot * does not trigger errors. * * KMSAN uses stackdepot to store origin stack traces, that's why we do not * instrument lib/stackdepot.c. Yet it must properly mark its outputs as * initialized because other kernel features (e.g. netdev tracker) may also * access stackdepot from instrumented code.
*/ staticvoid test_stackdepot_roundtrip(struct kunit *test)
{ unsignedlong src_entries[16], *dst_entries; unsignedint src_nentries, dst_nentries;
EXPECTATION_NO_REPORT(expect);
depot_stack_handle_t handle;
kunit_info(test, "testing stackdepot roundtrip (no reports)\n");
/* * Test case: ensure that kmsan_unpoison_memory() and the instrumentation work * the same.
*/ staticvoid test_unpoison_memory(struct kunit *test)
{
EXPECTATION_UNINIT_VALUE_FN(expect, "test_unpoison_memory"); volatilechar a[4], b[4];
kunit_info(
test, "unpoisoning via the instrumentation vs. kmsan_unpoison_memory() (2 UMR reports)\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.