// SPDX-License-Identifier: GPL-2.0 /* * Test cases for KFENCE memory safety error detector. Since the interface with * which KFENCE's reports are obtained is via the console, this is the output we * should verify. For each test case checks the presence (or absence) of * generated reports. Relies on 'console' tracepoint to capture reports as they * appear in the kernel log. * * Copyright (C) 2020, Google LLC. * Author: Alexander Potapenko <glider@google.com> * Marco Elver <elver@google.com>
*/
if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) { /* * KFENCE report and related to the test. * * The provided @buf is not NUL-terminated; copy no more than * @len bytes and let strscpy() add the missing NUL-terminator.
*/
strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
nlines = 1;
} elseif (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
}
WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
spin_unlock_irqrestore(&observed.lock, flags);
}
/* Check if a report related to the test exists. */ staticbool report_available(void)
{ return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
}
/* Information we expect in a report. */ struct expect_report { enum kfence_error_type type; /* The type or error. */ void *fn; /* Function pointer to expected function where access occurred. */ char *addr; /* Address at which the bad access occurred. */ bool is_write; /* Is access a write. */
};
/* Check observed report matches information in @r. */ staticbool report_matches(conststruct expect_report *r)
{ unsignedlong addr = (unsignedlong)r->addr; bool ret = false; unsignedlong flags;
typeof(observed.lines) expect; constchar *end; char *cur;
/* Doubled-checked locking. */ if (!report_available()) returnfalse;
/* Generate expected report contents. */
/* Title */
cur = expect[0];
end = &expect[0][sizeof(expect[0]) - 1]; switch (r->type) { case KFENCE_ERROR_OOB:
cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
get_access_type(r)); break; case KFENCE_ERROR_UAF:
cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
get_access_type(r)); break; case KFENCE_ERROR_CORRUPTION:
cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption"); break; case KFENCE_ERROR_INVALID:
cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
get_access_type(r)); break; case KFENCE_ERROR_INVALID_FREE:
cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free"); break;
}
scnprintf(cur, end - cur, " in %pS", r->fn); /* The exact offset won't match, remove it; also strip module name. */
cur = strchr(expect[0], '+'); if (cur)
*cur = '\0';
/* Access information */
cur = expect[1];
end = &expect[1][sizeof(expect[1]) - 1];
switch (r->type) { case KFENCE_ERROR_OOB:
cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
addr = arch_kfence_test_address(addr); break; case KFENCE_ERROR_UAF:
cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
addr = arch_kfence_test_address(addr); break; case KFENCE_ERROR_CORRUPTION:
cur += scnprintf(cur, end - cur, "Corrupted memory at"); break; case KFENCE_ERROR_INVALID:
cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
addr = arch_kfence_test_address(addr); break; case KFENCE_ERROR_INVALID_FREE:
cur += scnprintf(cur, end - cur, "Invalid free of"); break;
}
cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
spin_lock_irqsave(&observed.lock, flags); if (!report_available()) goto out; /* A new report is being captured. */
/* Finally match expected output to what we actually observed. */
ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
out:
spin_unlock_irqrestore(&observed.lock, flags); return ret;
}
/* ===== Test cases ===== */
#define TEST_PRIV_WANT_MEMCACHE ((void *)1)
/* Cache used by tests; if NULL, allocate from kmalloc instead. */ staticstruct kmem_cache *test_cache;
/* * Use SLAB_NO_MERGE to prevent merging with existing caches. * Use SLAB_ACCOUNT to allocate via memcg, if enabled.
*/
flags |= SLAB_NO_MERGE | SLAB_ACCOUNT;
test_cache = kmem_cache_create("test", size, 1, flags, ctor);
KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
return size;
}
staticvoid test_cache_destroy(void)
{ if (!test_cache) return;
staticinline size_t kmalloc_cache_alignment(size_t size)
{ /* just to get ->align so no need to pass in the real caller */ enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, 0); return kmalloc_caches[type][__kmalloc_index(size, false)]->align;
}
/* Must always inline to match stack trace against caller. */ static __always_inline void test_free(void *ptr)
{ if (test_cache)
kmem_cache_free(test_cache, ptr); else
kfree(ptr);
}
/* * If this should be a KFENCE allocation, and on which side the allocation and * the closest guard page should be.
*/ enum allocation_policy {
ALLOCATE_ANY, /* KFENCE, any side. */
ALLOCATE_LEFT, /* KFENCE, left side of page. */
ALLOCATE_RIGHT, /* KFENCE, right side of page. */
ALLOCATE_NONE, /* No KFENCE allocation. */
};
/* * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the * current test_cache if set up.
*/ staticvoid *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
{ void *alloc; unsignedlong timeout, resched_after; constchar *policy_name;
switch (policy) { case ALLOCATE_ANY:
policy_name = "any"; break; case ALLOCATE_LEFT:
policy_name = "left"; break; case ALLOCATE_RIGHT:
policy_name = "right"; break; case ALLOCATE_NONE:
policy_name = "none"; break;
}
/* * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually.
*/
timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); /* * Especially for non-preemption kernels, ensure the allocation-gate * timer can catch up: after @resched_after, every failed allocation * attempt yields, to ensure the allocation-gate timer is scheduled.
*/
resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); do { if (test_cache)
alloc = kmem_cache_alloc(test_cache, gfp); else
alloc = kmalloc(size, gfp);
/* * Verify that various helpers return the right values * even for KFENCE objects; these are required so that * memcg accounting works correctly.
*/
KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
if (policy == ALLOCATE_ANY) return alloc; if (policy == ALLOCATE_LEFT && PAGE_ALIGNED(alloc)) return alloc; if (policy == ALLOCATE_RIGHT && !PAGE_ALIGNED(alloc)) return alloc;
} elseif (policy == ALLOCATE_NONE) return alloc;
test_free(alloc);
if (time_after(jiffies, resched_after))
cond_resched();
} while (time_before(jiffies, timeout));
KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE"); return NULL; /* Unreachable. */
}
/* * If we don't have our own cache, adjust based on alignment, so that we * actually access guard pages on either side.
*/ if (!test_cache)
size = kmalloc_cache_alignment(size);
/* * KFENCE is unable to detect an OOB if the allocation's alignment requirements * leave a gap between the object and the guard page. Specifically, an * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB * respectively. Therefore it is impossible for the allocated object to * contiguously line up with the right guard page. * * However, we test that an access to memory beyond the gap results in KFENCE * detecting an OOB access.
*/ staticvoid test_kmalloc_aligned_oob_read(struct kunit *test)
{ const size_t size = 73; const size_t align = kmalloc_cache_alignment(size); struct expect_report expect = {
.type = KFENCE_ERROR_OOB,
.fn = test_kmalloc_aligned_oob_read,
.is_write = false,
}; char *buf;
/* * The object is offset to the right, so there won't be an OOB to the * left of it.
*/
READ_ONCE(*(buf - 1));
KUNIT_EXPECT_FALSE(test, report_available());
/* * @buf must be aligned on @align, therefore buf + size belongs to the * same page -> no OOB.
*/
READ_ONCE(*(buf + size));
KUNIT_EXPECT_FALSE(test, report_available());
/* Overflowing by @align bytes will result in an OOB. */
expect.addr = buf + size + align;
READ_ONCE(*expect.addr);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); /* * The object is offset to the right, so we won't get a page * fault immediately after it.
*/
expect.addr = buf + size;
WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
KUNIT_EXPECT_FALSE(test, report_available());
test_free(buf);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
/* Test cache shrinking and destroying with KFENCE. */ staticvoid test_shrink_memcache(struct kunit *test)
{ const size_t size = 32; void *buf;
KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON)); /* Assume it hasn't been disabled on command line. */
setup_test_cache(test, size, 0, NULL);
expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); for (i = 0; i < size; i++)
expect.addr[i] = i + 1;
test_free(expect.addr);
for (i = 0; i < size; i++) { /* * This may fail if the page was recycled by KFENCE and then * written to again -- this however, is near impossible with a * default config.
*/
KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
if (!i) /* Only check first access to not fail test if page is ever re-protected. */
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
}
}
/* Ensure that constructors work properly. */ staticvoid test_memcache_ctor(struct kunit *test)
{ const size_t size = 32; char *buf; int i;
for (i = 0; i < 8; i++)
KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
test_free(buf);
KUNIT_EXPECT_FALSE(test, report_available());
}
/* Test that memory is zeroed if requested. */ staticvoid test_gfpzero(struct kunit *test)
{ const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */ char *buf1, *buf2; int i;
/* Skip if we think it'd take too long. */
KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
setup_test_cache(test, size, 0, NULL);
buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); for (i = 0; i < size; i++)
buf1[i] = i + 1;
test_free(buf1);
/* Try to get same address again -- this can take a while. */ for (i = 0;; i++) {
buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY); if (buf1 == buf2) break;
test_free(buf2);
if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
kunit_warn(test, "giving up ... cannot get same object back\n"); return;
}
cond_resched();
}
for (i = 0; i < size; i++)
KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
rcu_read_lock();
test_free(expect.addr);
KUNIT_EXPECT_EQ(test, *expect.addr, (char)42); /* * Up to this point, memory should not have been freed yet, and * therefore there should be no KFENCE report from the above access.
*/
rcu_read_unlock();
/* Above access to @expect.addr should not have generated a report! */
KUNIT_EXPECT_FALSE(test, report_available());
/* Only after rcu_barrier() is the memory guaranteed to be freed. */
rcu_barrier();
KUNIT_EXPECT_FALSE(test, test_cache);
KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */ for (i = 0; i < size; i++)
buf[i] = i + 1;
/* Check that we successfully change the size. */
buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */ /* Note: Might no longer be a KFENCE alloc. */
KUNIT_EXPECT_GE(test, ksize(buf), size * 3); for (i = 0; i < size; i++)
KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1)); for (; i < size * 3; i++) /* Fill to extra bytes. */
buf[i] = i + 1;
buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
KUNIT_EXPECT_GE(test, ksize(buf), size * 2); for (i = 0; i < size * 2; i++)
KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
/* Test that some objects from a bulk allocation belong to KFENCE pool. */ staticvoid test_memcache_alloc_bulk(struct kunit *test)
{ const size_t size = 32; bool pass = false; unsignedlong timeout;
setup_test_cache(test, size, 0, NULL);
KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */ /* * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually.
*/
timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); do { void *objects[100]; int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
objects); if (!num) continue; for (i = 0; i < ARRAY_SIZE(objects); i++) { if (is_kfence_address(objects[i])) {
pass = true; break;
}
}
kmem_cache_free_bulk(test_cache, num, objects); /* * kmem_cache_alloc_bulk() disables interrupts, and calling it * in a tight loop may not give KFENCE a chance to switch the * static branch. Call cond_resched() to let KFENCE chime in.
*/
cond_resched();
} while (!pass && time_before(jiffies, timeout));
/* * KUnit does not provide a way to provide arguments to tests, and we encode * additional info in the name. Set up 2 tests per test case, one using the * default allocator, and another using a custom memcache (suffix '-memcache').
*/ #define KFENCE_KUNIT_CASE(test_name) \
{ .run_case = test_name, .name = #test_name }, \
{ .run_case = test_name, .name = #test_name"-memcache" }
staticint test_init(struct kunit *test)
{ unsignedlong flags; int i;
if (!__kfence_pool) return -EINVAL;
spin_lock_irqsave(&observed.lock, flags); for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
observed.lines[i][0] = '\0';
observed.nlines = 0;
spin_unlock_irqrestore(&observed.lock, flags);
/* Any test with 'memcache' in its name will want a memcache. */ if (strstr(test->name, "memcache"))
test->priv = TEST_PRIV_WANT_MEMCACHE; else
test->priv = NULL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.