/* The trace of execution is something like this: * * finit_module() * load_module() * prepare_coming_module() * notifier_call(MODULE_STATE_COMING) * btf_parse_module() * btf_alloc_id() // Visible to userspace at this point * list_add(btf_mod->list, &btf_modules) * do_init_module() * freeinit = kmalloc() * ret = mod->init() * bpf_prog_widen_race() * bpf_copy_from_user() * ...<sleep>... * if (ret < 0) * ... * free_module() * return ret * * At this point, module loading thread is blocked, we now load the program: * * bpf_check * add_kfunc_call/check_pseudo_btf_id * btf_try_get_module * try_get_module_live == false * return -ENXIO * * Without the fix (try_get_module_live in btf_try_get_module): * * bpf_check * add_kfunc_call/check_pseudo_btf_id * btf_try_get_module * try_get_module == true * <store module reference in btf_kfunc_tab or used_btf array> * ... * return fd * * Now, if we inject an error in the blocked program, our module will be freed * (going straight from MODULE_STATE_COMING to MODULE_STATE_GOING). * Later, when bpf program is freed, it will try to module_put already freed * module. This is why try_get_module_live returns false if mod->state is not * MODULE_STATE_LIVE.
*/
SEC("fmod_ret.s/bpf_fentry_test1") int BPF_PROG(widen_race, int a, int ret)
{ char dst;
if (!check_thread_id()) return 0; /* Indicate that we will attempt to block */
bpf_blocking = 1;
bpf_copy_from_user(&dst, 1, bpf_mod_race_config.fault_addr); return bpf_mod_race_config.inject_error;
}
SEC("fexit/do_init_module") int BPF_PROG(fexit_init_module, struct module *mod, int ret)
{ if (!check_thread_id()) return 0; /* Indicate that we finished blocking */
bpf_blocking = 2; return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.