// SPDX-License-Identifier: GPL-2.0 /* * KCSAN test with various race scenarious to test runtime behaviour. Since the * interface with which KCSAN's reports are obtained is via the console, this is * the output we should verify. For each test case checks the presence (or * absence) of generated reports. Relies on 'console' tracepoint to capture * reports as they appear in the kernel log. * * Makes use of KUnit for test organization, and the Torture framework for test * thread control. * * Copyright (C) 2020, Google LLC. * Author: Marco Elver <elver@google.com>
*/
/* * Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at * least one race is reported.
*/
end_time = jiffies + msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS + 500);
/* Signal start; release potential initialization of shared data. */
smp_store_release(&access_kernels[0], func1);
smp_store_release(&access_kernels[1], func2);
}
/* End test checking loop. */ static __no_kcsan inlinebool
end_test_checks(bool stop)
{ if (!stop && time_before(jiffies, end_time)) { /* Continue checking */
might_sleep(); returnfalse;
}
kcsan_enable_current(); returntrue;
}
/* * Probe for console output: checks if a race was reported, and obtains observed * lines of interest.
*/
__no_kcsan staticvoid probe_console(void *ignore, constchar *buf, size_t len)
{ unsignedlong flags; int nlines;
/* * Note that KCSAN reports under a global lock, so we do not risk the * possibility of having multiple reports interleaved. If that were the * case, we'd expect tests to fail.
*/
/* Check if a report related to the test exists. */
__no_kcsan staticbool report_available(void)
{ return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
}
/* Report information we expect in a report. */ struct expect_report { /* Access information of both accesses. */ struct { void *fn; /* Function pointer to expected function of top frame. */ void *addr; /* Address of access; unchecked if NULL. */
size_t size; /* Size of access; unchecked if @addr is NULL. */ int type; /* Access type, see KCSAN_ACCESS definitions. */
} access[2];
};
/* Check observed report matches information in @r. */
__no_kcsan staticbool __report_matches(conststruct expect_report *r)
{ constbool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT; bool ret = false; unsignedlong flags;
typeof(*observed.lines) *expect; constchar *end; char *cur; int i;
/* Doubled-checked locking. */ if (!report_available()) returnfalse;
expect = kmalloc(sizeof(observed.lines), GFP_KERNEL); if (WARN_ON(!expect)) returnfalse;
/* Generate expected report contents. */
/* Title */
cur = expect[0];
end = &expect[0][sizeof(expect[0]) - 1];
cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ",
is_assert ? "assert: race" : "data-race"); if (r->access[1].fn) { char tmp[2][64]; int cmp;
/* Expect lexographically sorted function names in title. */
scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn);
scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn);
cmp = strcmp(tmp[0], tmp[1]);
cur += scnprintf(cur, end - cur, "%ps / %ps",
cmp < 0 ? r->access[0].fn : r->access[1].fn,
cmp < 0 ? r->access[1].fn : r->access[0].fn);
} else {
scnprintf(cur, end - cur, "%pS", r->access[0].fn); /* The exact offset won't match, remove it. */
cur = strchr(expect[0], '+'); if (cur)
*cur = '\0';
}
/* Access 1 */
cur = expect[1];
end = &expect[1][sizeof(expect[1]) - 1]; if (!r->access[1].fn)
cur += scnprintf(cur, end - cur, "race at unknown origin, with ");
if (i == 1) { /* Access 2 */
cur = expect[2];
end = &expect[2][sizeof(expect[2]) - 1];
if (!r->access[1].fn) { /* Dummy string if no second access is available. */
strcpy(cur, ""); break;
}
}
cur += scnprintf(cur, end - cur, "%s%s to ", access_type,
access_type_aux);
if (r->access[i].addr) /* Address is optional. */
cur += scnprintf(cur, end - cur, "0x%px of %zu bytes",
r->access[i].addr, r->access[i].size);
}
spin_lock_irqsave(&observed.lock, flags); if (!report_available()) goto out; /* A new report is being captured. */
/* Finally match expected output to what we actually observed. */
ret = strstr(observed.lines[0], expect[0]) && /* Access info may appear in any order. */
((strstr(observed.lines[1], expect[1]) &&
strstr(observed.lines[2], expect[2])) ||
(strstr(observed.lines[1], expect[2]) &&
strstr(observed.lines[2], expect[1])));
out:
spin_unlock_irqrestore(&observed.lock, flags);
kfree(expect); return ret;
}
#ifdef CONFIG_KCSAN_WEAK_MEMORY /* Due to reordering accesses, any access may appear as "(reordered)". */ #define report_matches report_matches_any_reordered #else #define report_matches __report_matches #endif
/* ===== Test kernels ===== */
staticlong test_sink; staticlong test_var; /* @test_array should be large enough to fall into multiple watchpoint slots. */ staticlong test_array[3 * PAGE_SIZE / sizeof(long)]; staticstruct { long val[8];
} test_struct; staticlong __data_racy test_data_racy; static DEFINE_SEQLOCK(test_seqlock); static DEFINE_SPINLOCK(test_spinlock); static DEFINE_MUTEX(test_mutex);
/* * Helper to avoid compiler optimizing out reads, and to generate source values * for writes.
*/
__no_kcsan static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); }
/* * Generates a delay and some accesses that enter the runtime but do not produce * data races.
*/ static noinline void test_delay(int iter)
{ while (iter--)
sink_value(READ_ONCE(test_sink));
}
static noinline void test_kernel_atomic_rmw(void)
{ /* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */
__atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED);
}
/* * Scoped assertions do trigger anywhere in scope. However, the report should * still only point at the start of the scope.
*/ static noinline void test_enter_scope(void)
{ int x = 0;
static noinline void test_kernel_xor_1bit(void)
{ /* Do not report data races between the read-writes. */
kcsan_nestable_atomic_begin();
test_var ^= 0x10000;
kcsan_nestable_atomic_end();
}
#define TEST_KERNEL_LOCKED(name, acquire, release) \ static noinline void test_kernel_##name(void) \
{ \ long *flag = &test_struct.val[0]; \ long v = 0; \ if (!(acquire)) \ return; \ while (v++ < 100) { \
test_var++; \
barrier(); \
} \
release; \
test_delay(10); \
}
/* * Tests that various barriers have the expected effect on internal state. Not * exhaustive on atomic_t operations. Unlike the selftest, also checks for * too-strict barrier instrumentation; these can be tolerated, because it does * not cause false positives, but at least we should be aware of such cases.
*/ staticvoid test_barrier_nothreads(struct kunit *test)
{ #ifdef CONFIG_KCSAN_WEAK_MEMORY struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access; #else struct kcsan_scoped_access *reorder_access = NULL; #endif
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
atomic_t dummy = ATOMIC_INIT(0);
/* * Lockdep initialization can strengthen certain locking operations due * to calling into instrumented files; "warm up" our locks.
*/
spin_lock(&test_spinlock);
spin_unlock(&test_spinlock);
mutex_lock(&test_mutex);
mutex_unlock(&test_mutex);
/* Force creating a valid entry in reorder_access first. */
test_var = 0; while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var))
__kcsan_check_read(&test_var, sizeof(test_var));
KUNIT_ASSERT_EQ(test, reorder_access->size, sizeof(test_var));
kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */
test_kernel_write_nochange(); /* Reset value. */
begin_test_checks(test_kernel_write_nochange, test_kernel_read); do {
match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
} while (!end_test_checks(match_expect)); if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY))
KUNIT_EXPECT_FALSE(test, match_expect); else
KUNIT_EXPECT_TRUE(test, match_expect);
}
/* * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should * never apply work.
*/
__no_kcsan staticvoid test_novalue_change_exception(struct kunit *test)
{ struct expect_report expect_rw = {
.access = {
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
},
}; struct expect_report expect_ww = {
.access = {
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
}; bool match_expect = false;
test_kernel_write_nochange_rcu(); /* Reset value. */
begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read); do {
match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
} while (!end_test_checks(match_expect));
KUNIT_EXPECT_TRUE(test, match_expect);
}
/* Test that data races of unknown origin are reported. */
__no_kcsan staticvoid test_unknown_origin(struct kunit *test)
{ struct expect_report expect = {
.access = {
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
{ NULL },
},
}; bool match_expect = false;
begin_test_checks(test_kernel_write_uninstrumented, test_kernel_read); do {
match_expect = report_matches(&expect);
} while (!end_test_checks(match_expect)); if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
KUNIT_EXPECT_TRUE(test, match_expect); else
KUNIT_EXPECT_FALSE(test, match_expect);
}
/* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */
__no_kcsan staticvoid test_write_write_assume_atomic(struct kunit *test)
{ struct expect_report expect = {
.access = {
{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
}; bool match_expect = false;
begin_test_checks(test_kernel_write, test_kernel_write); do {
sink_value(READ_ONCE(test_var)); /* induce value-change */
match_expect = report_matches(&expect);
} while (!end_test_checks(match_expect)); if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC))
KUNIT_EXPECT_FALSE(test, match_expect); else
KUNIT_EXPECT_TRUE(test, match_expect);
}
/* * Test that data races with writes larger than word-size are always reported, * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
*/
__no_kcsan staticvoid test_write_write_struct(struct kunit *test)
{ struct expect_report expect = {
.access = {
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
},
}; bool match_expect = false;
begin_test_checks(test_kernel_write_struct, test_kernel_write_struct); do {
match_expect = report_matches(&expect);
} while (!end_test_checks(match_expect));
KUNIT_EXPECT_TRUE(test, match_expect);
}
/* * Test that data races where only one write is larger than word-size are always * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
*/
__no_kcsan staticvoid test_write_write_struct_part(struct kunit *test)
{ struct expect_report expect = {
.access = {
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
{ test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE },
},
}; bool match_expect = false;
begin_test_checks(test_kernel_write_struct, test_kernel_write_struct_part); do {
match_expect = report_matches(&expect);
} while (!end_test_checks(match_expect));
KUNIT_EXPECT_TRUE(test, match_expect);
}
/* Test that races with atomic accesses never result in reports. */
__no_kcsan staticvoid test_read_atomic_write_atomic(struct kunit *test)
{ bool match_never = false;
begin_test_checks(test_kernel_read_atomic, test_kernel_write_atomic); do {
match_never = report_available();
} while (!end_test_checks(match_never));
KUNIT_EXPECT_FALSE(test, match_never);
}
/* Test that a race with an atomic and plain access result in reports. */
__no_kcsan staticvoid test_read_plain_atomic_write(struct kunit *test)
{ struct expect_report expect = {
.access = {
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
{ test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
},
}; bool match_expect = false;
begin_test_checks(test_kernel_assert_access_scoped, test_kernel_read);
end_time += msecs_to_jiffies(1000); /* This test requires a bit more time. */ do {
match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2);
match_expect_inscope |= report_matches(&expect_inscope);
} while (!end_test_checks(match_expect_inscope));
KUNIT_EXPECT_TRUE(test, match_expect_start);
KUNIT_EXPECT_FALSE(test, match_expect_inscope);
}
/* * jiffies is special (declared to be volatile) and its accesses are typically * not marked; this test ensures that the compiler nor KCSAN gets confused about * jiffies's declaration on different architectures.
*/
__no_kcsan staticvoid test_jiffies_noreport(struct kunit *test)
{ bool match_never = false;
begin_test_checks(test_kernel_jiffies_reader, test_kernel_jiffies_reader); do {
match_never = report_available();
} while (!end_test_checks(match_never));
KUNIT_EXPECT_FALSE(test, match_never);
}
/* Test that racing accesses in seqlock critical sections are not reported. */
__no_kcsan staticvoid test_seqlock_noreport(struct kunit *test)
{ bool match_never = false;
begin_test_checks(test_kernel_seqlock_reader, test_kernel_seqlock_writer); do {
match_never = report_available();
} while (!end_test_checks(match_never));
KUNIT_EXPECT_FALSE(test, match_never);
}
/* * Test atomic builtins work and required instrumentation functions exist. We * also test that KCSAN understands they're atomic by racing with them via * test_kernel_atomic_builtins(), and expect no reports. * * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
*/ staticvoid test_atomic_builtins(struct kunit *test)
{ bool match_never = false;
begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins); do { long tmp;
test_struct.val[0] = 0; /* init unlocked */
begin_test_checks(test_kernel_atomic_builtin_wrong_memorder,
test_kernel_atomic_builtin_wrong_memorder); do {
match_expect = report_matches_any_reordered(&expect);
} while (!end_test_checks(match_expect)); if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
KUNIT_EXPECT_TRUE(test, match_expect); else
KUNIT_EXPECT_FALSE(test, match_expect);
}
/* * Generate thread counts for all test cases. Values generated are in interval * [2, 5] followed by exponentially increasing thread counts from 8 to 32. * * The thread counts are chosen to cover potentially interesting boundaries and * corner cases (2 to 5), and then stress the system with larger counts.
*/ staticconstvoid *nthreads_gen_params(constvoid *prev, char *desc)
{ long nthreads = (long)prev;
if (!preempt_model_preemptible() ||
!IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) { /* * Without any preemption, keep 2 CPUs free for other tasks, one * of which is the main test case function checking for * completion or failure.
*/ constlong min_unused_cpus = preempt_model_none() ? 2 : 0; constlong min_required_cpus = 2 + min_unused_cpus;
if (num_online_cpus() < min_required_cpus) {
pr_err_once("Too few online CPUs (%u < %ld) for test\n",
num_online_cpus(), min_required_cpus);
nthreads = 0;
} elseif (nthreads >= num_online_cpus() - min_unused_cpus) { /* Use negative value to indicate last param. */
nthreads = -(num_online_cpus() - min_unused_cpus);
pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n",
-nthreads, num_online_cpus());
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.