/* * Must be called before early_initcall() to be effective.
*/ void static_call_force_reinit(void)
{ if (WARN_ON_ONCE(!static_call_initialized)) return;
static_call_initialized++;
}
/* mutex to protect key modules/sites */ static DEFINE_MUTEX(static_call_mutex);
/* * If uninitialized, we'll not update the callsites, but they still * point to the trampoline and we just patched that.
*/ if (WARN_ON_ONCE(!static_call_initialized)) goto done;
if (!site_mod->sites) { /* * This can happen if the static call key is defined in * a module which doesn't use it. * * It also happens in the has_mods case, where the * 'first' entry has no sites associated with it.
*/ continue;
}
for (site = site_mod->sites;
site < stop && static_call_key(site) == key; site++) { void *site_addr = static_call_addr(site);
if (!init && static_call_is_init(site)) continue;
if (!kernel_text_address((unsignedlong)site_addr)) { /* * This skips patching built-in __exit, which * is part of init_section_contains() but is * not part of kernel_text_address(). * * Skipping built-in __exit is fine since it * will never be executed.
*/
WARN_ONCE(!static_call_is_init(site), "can't patch static call site at %pS",
site_addr); continue;
}
/* * For vmlinux (!mod) avoid the allocation by storing * the sites pointer in the key itself. Also see * __static_call_update()'s @first. * * This allows architectures (eg. x86) to call * static_call_init() before memory allocation works.
*/ if (!mod) {
key->sites = site;
key->type |= 1; goto do_transform;
}
site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); if (!site_mod) return -ENOMEM;
/* * When the key has a direct sites pointer, extract * that into an explicit struct static_call_mod, so we * can have a list of modules.
*/ if (static_call_key_sites(key)) {
site_mod->mod = NULL;
site_mod->next = NULL;
site_mod->sites = static_call_key_sites(key);
key->mods = site_mod;
site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL); if (!site_mod) return -ENOMEM;
}
scoped_guard(rcu) {
mod = __module_text_address((unsignedlong)start);
WARN_ON_ONCE(__module_text_address((unsignedlong)end) != mod); if (!try_module_get(mod))
mod = NULL;
} if (!mod) return 0;
/* * Is the key is exported, 'addr' points to the key, which * means modules are allowed to call static_call_update() on * it. * * Otherwise, the key isn't exported, and 'addr' points to the * trampoline so we need to lookup the key. * * We go through this dance to prevent crazy modules from * abusing sensitive static calls.
*/ if (!kernel_text_address(addr)) continue;
key = tramp_key_lookup(addr); if (!key) {
pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
static_call_addr(site)); return -EINVAL;
}
for (site = start; site < stop; site++) {
key = static_call_key(site);
/* * If the key was not updated due to a memory allocation * failure in __static_call_init() then treating key::sites * as key::mods in the code below would cause random memory * access and #GP. In that case all subsequent sites have * not been touched either, so stop iterating.
*/ if (!static_call_key_has_mods(key)) break;
staticint static_call_module_notify(struct notifier_block *nb, unsignedlong val, void *data)
{ struct module *mod = data; int ret = 0;
cpus_read_lock();
static_call_lock();
switch (val) { case MODULE_STATE_COMING:
ret = static_call_add_module(mod); if (ret) {
pr_warn("Failed to allocate memory for static calls\n");
static_call_del_module(mod);
} break; case MODULE_STATE_GOING:
static_call_del_module(mod); break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.