/* This test crafts a race between btf_try_get_module and do_init_module, and * checks whether btf_try_get_module handles the invocation for a well-formed * but uninitialized module correctly. Unless the module has completed its * initcalls, the verifier should fail the program load and return ENXIO. * * userfaultfd is used to trigger a fault in an fmod_ret program, and make it * sleep, then the BPF program is loaded and the return value from verifier is * inspected. After this, the userfaultfd is closed so that the module loading * thread makes forward progress, and fmod_ret injects an error so that the * module load fails and it is freed. * * If the verifier succeeded in loading the supplied program, it will end up * taking reference to freed module, and trigger a crash when the program fd * is closed later. This is true for both kfuncs and ksyms. In both cases, * the crash is triggered inside bpf_prog_free_deferred, when module reference * is finally released.
*/
fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration")) return;
if (!ASSERT_OK(unload_bpf_testmod(false), "unload bpf_testmod")) goto end_mmap;
skel = bpf_mod_race__open(); if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open")) goto end_module;
skel->rodata->bpf_mod_race_config.tgid = getpid();
skel->rodata->bpf_mod_race_config.inject_error = -4242;
skel->rodata->bpf_mod_race_config.fault_addr = fault_addr; if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load")) goto end_destroy;
blockingp = (_Atomic int *)&skel->bss->bpf_blocking;
if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach")) goto end_destroy;
uffd = test_setup_uffd(fault_addr); if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address")) goto end_destroy;
if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL), "load module thread")) goto end_uffd;
/* Now, we either fail loading module, or block in bpf prog, spin to find out */ while (!atomic_load(&state) && !atomic_load(blockingp))
; if (!ASSERT_EQ(state, _TS_INVALID, "module load should block")) goto end_join; if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) {
pthread_kill(load_mod_thrd, SIGKILL); goto end_uffd;
}
/* We might have set bpf_blocking to 1, but may have not blocked in * bpf_copy_from_user. Read userfaultfd descriptor to verify that.
*/ if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg), "read uffd block event")) goto end_join; if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault")) goto end_join;
/* We know that load_mod_thrd is blocked in the fmod_ret program, the * module state is still MODULE_STATE_COMING because mod->init hasn't * returned. This is the time we try to load a program calling kfunc and * check if we get ENXIO from verifier.
*/
skel_fail = config->bpf_open_and_load();
ret = errno; if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) { /* Close uffd to unblock load_mod_thrd */
close(uffd);
uffd = -1; while (atomic_load(blockingp) != 2)
;
ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
config->bpf_destroy(skel_fail); goto end_join;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.