/* * Mutex protects: * 1) List of modules (also safely readable within RCU read section), * 2) module_use links, * 3) mod_tree.addr_min/mod_tree.addr_max. * (delete and add uses RCU list operations).
*/
DEFINE_MUTEX(module_mutex);
LIST_HEAD(modules);
/* Work queue for freeing init sections in success case */ staticvoid do_free_init(struct work_struct *w); static DECLARE_WORK(init_free_wq, do_free_init); static LLIST_HEAD(init_free_list);
/* * Bounds of module memory, for speeding up __module_address. * Protected by module_mutex.
*/ staticvoid __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base, unsignedint size, struct mod_tree_root *tree)
{ unsignedlong min = (unsignedlong)base; unsignedlong max = min + size;
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC if (mod_mem_type_is_core_data(type)) { if (min < tree->data_addr_min)
tree->data_addr_min = min; if (max > tree->data_addr_max)
tree->data_addr_max = max; return;
} #endif if (min < tree->addr_min)
tree->addr_min = min; if (max > tree->addr_max)
tree->addr_max = max;
}
int register_module_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_register(&module_notify_list, nb);
}
EXPORT_SYMBOL(register_module_notifier);
int unregister_module_notifier(struct notifier_block *nb)
{ return blocking_notifier_chain_unregister(&module_notify_list, nb);
}
EXPORT_SYMBOL(unregister_module_notifier);
/* * We require a truly strong try_module_get(): 0 means success. * Otherwise an error is returned due to ongoing or failed * initialization etc.
*/ staticinlineint strong_try_module_get(struct module *mod)
{
BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); if (mod && mod->state == MODULE_STATE_COMING) return -EBUSY; if (try_module_get(mod)) return 0; else return -ENOENT;
}
/* * Like strncmp(), except s/-/_/g as per scripts/Makefile.lib:name-fix-token rule.
*/ staticint mod_strncmp(constchar *str_a, constchar *str_b, size_t n)
{ for (int i = 0; i < n; i++) { char a = str_a[i]; char b = str_b[i]; int d;
if (a == '-') a = '_'; if (b == '-') b = '_';
d = a - b; if (d) return d;
if (!a) break;
}
return 0;
}
/* * A thread that wants to hold a reference to a module only while it * is running can call this to safely exit.
*/ void __noreturn __module_put_and_kthread_exit(struct module *mod, long code)
{
module_put(mod);
kthread_exit(code);
}
EXPORT_SYMBOL(__module_put_and_kthread_exit);
/* Find a module section: 0 means not found. */ staticunsignedint find_sec(conststruct load_info *info, constchar *name)
{ unsignedint i;
for (i = 1; i < info->hdr->e_shnum; i++) {
Elf_Shdr *shdr = &info->sechdrs[i]; /* Alloc bit cleared means "ignore it." */ if ((shdr->sh_flags & SHF_ALLOC)
&& strcmp(info->secstrings + shdr->sh_name, name) == 0) return i;
} return 0;
}
/** * find_any_unique_sec() - Find a unique section index by name * @info: Load info for the module to scan * @name: Name of the section we're looking for * * Locates a unique section by name. Ignores SHF_ALLOC. * * Return: Section index if found uniquely, zero if absent, negative count * of total instances if multiple were found.
*/ staticint find_any_unique_sec(conststruct load_info *info, constchar *name)
{ unsignedint idx; unsignedint count = 0; int i;
for (i = 1; i < info->hdr->e_shnum; i++) { if (strcmp(info->secstrings + info->sechdrs[i].sh_name,
name) == 0) {
count++;
idx = i;
}
} if (count == 1) { return idx;
} elseif (count == 0) { return 0;
} else { return -count;
}
}
/* Find a module section, or NULL. */ staticvoid *section_addr(conststruct load_info *info, constchar *name)
{ /* Section 0 has sh_addr 0. */ return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
}
/* Find a module section, or NULL. Fill in number of "objects" in section. */ staticvoid *section_objs(conststruct load_info *info, constchar *name,
size_t object_size, unsignedint *num)
{ unsignedint sec = find_sec(info, name);
/* Section 0 has sh_addr 0 and sh_size 0. */
*num = info->sechdrs[sec].sh_size / object_size; return (void *)info->sechdrs[sec].sh_addr;
}
/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ staticunsignedint find_any_sec(conststruct load_info *info, constchar *name)
{ unsignedint i;
for (i = 1; i < info->hdr->e_shnum; i++) {
Elf_Shdr *shdr = &info->sechdrs[i]; if (strcmp(info->secstrings + shdr->sh_name, name) == 0) return i;
} return 0;
}
/* * Find a module section, or NULL. Fill in number of "objects" in section. * Ignores SHF_ALLOC flag.
*/ static __maybe_unused void *any_section_objs(conststruct load_info *info, constchar *name,
size_t object_size, unsignedint *num)
{ unsignedint sec = find_any_sec(info, name);
/* Section 0 has sh_addr 0 and sh_size 0. */
*num = info->sechdrs[sec].sh_size / object_size; return (void *)info->sechdrs[sec].sh_addr;
}
/** * is_module_percpu_address() - test whether address is from module static percpu * @addr: address to test * * Test whether @addr belongs to module static percpu area. * * Return: %true if @addr is from module static percpu area
*/ bool is_module_percpu_address(unsignedlong addr)
{ return __is_module_percpu_address(addr, NULL);
}
#else/* ... !CONFIG_SMP */
staticinlinevoid __percpu *mod_percpu(struct module *mod)
{ return NULL;
} staticint percpu_modalloc(struct module *mod, struct load_info *info)
{ /* UP modules shouldn't have this section: ENOMEM isn't quite right */ if (info->sechdrs[info->index.pcpu].sh_size != 0) return -ENOMEM; return 0;
} staticinlinevoid percpu_modfree(struct module *mod)
{
} staticunsignedint find_pcpusec(struct load_info *info)
{ return 0;
} staticinlinevoid percpu_modcopy(struct module *mod, constvoid *from, unsignedlong size)
{ /* pcpusec should be 0, and size of that section should be 0. */
BUG_ON(size != 0);
} bool is_module_percpu_address(unsignedlong addr)
{ returnfalse;
}
/* Hold reference count during initialization. */
atomic_inc(&mod->refcnt);
return 0;
}
/* Does a already use b? */ staticint already_uses(struct module *a, struct module *b)
{ struct module_use *use;
list_for_each_entry(use, &b->source_list, source_list) { if (use->source == a) return 1;
}
pr_debug("%s does not use %s!\n", a->name, b->name); return 0;
}
/* * Module a uses b * - we add 'a' as a "source", 'b' as a "target" of module use * - the module_use is added to the list of 'b' sources (so * 'b' can walk the list to see who sourced them), and of 'a' * targets (so 'a' can see what modules it targets).
*/ staticint add_module_usage(struct module *a, struct module *b)
{ struct module_use *use;
pr_debug("Allocating new usage for %s.\n", a->name);
use = kmalloc(sizeof(*use), GFP_ATOMIC); if (!use) return -ENOMEM;
#ifdef CONFIG_MODULE_FORCE_UNLOAD staticinlineint try_force_unload(unsignedint flags)
{ int ret = (flags & O_TRUNC); if (ret)
add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); return ret;
} #else staticinlineint try_force_unload(unsignedint flags)
{ return 0;
} #endif/* CONFIG_MODULE_FORCE_UNLOAD */
/* Try to release refcount of module, 0 means success. */ staticint try_release_module_ref(struct module *mod)
{ int ret;
/* Try to decrement refcnt which we set at loading */
ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
BUG_ON(ret < 0); if (ret) /* Someone can put this right now, recover with checking */
ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
return ret;
}
staticint try_stop_module(struct module *mod, int flags, int *forced)
{ /* If it's not unused, quit unless we're forcing. */ if (try_release_module_ref(mod) != 0) {
*forced = try_force_unload(flags); if (!(*forced)) return -EWOULDBLOCK;
}
/* Mark it as dying. */
mod->state = MODULE_STATE_GOING;
return 0;
}
/** * module_refcount() - return the refcount or -1 if unloading * @mod: the module we're checking * * Return: * -1 if the module is in the process of unloading * otherwise the number of references in the kernel to the module
*/ int module_refcount(struct module *mod)
{ return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
}
EXPORT_SYMBOL(module_refcount);
/* This exists whether we can unload or not */ staticvoid free_module(struct module *mod);
if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM;
len = strncpy_from_user(name, name_user, MODULE_NAME_LEN); if (len == 0 || len == MODULE_NAME_LEN) return -ENOENT; if (len < 0) return len;
audit_log_kern_module(name);
if (mutex_lock_interruptible(&module_mutex) != 0) return -EINTR;
mod = find_module(name); if (!mod) {
ret = -ENOENT; goto out;
}
if (!list_empty(&mod->source_list)) { /* Other modules depend on us: get rid of them first. */
ret = -EWOULDBLOCK; goto out;
}
/* Doing init or already dying? */ if (mod->state != MODULE_STATE_LIVE) { /* FIXME: if (force), slam module count damn the torpedoes */
pr_debug("%s already dying\n", mod->name);
ret = -EBUSY; goto out;
}
/* If it has an init func, it must have an exit func to unload */ if (mod->init && !mod->exit) {
forced = try_force_unload(flags); if (!forced) { /* This module can't be removed */
ret = -EBUSY; goto out;
}
}
ret = try_stop_module(mod, flags, &forced); if (ret != 0) goto out;
mutex_unlock(&module_mutex); /* Final destruction now no one is using it. */ if (mod->exit != NULL)
mod->exit();
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
klp_module_going(mod);
ftrace_release_mod(mod);
async_synchronize_full();
/* Store the name and taints of the last unloaded module for diagnostic purposes */
strscpy(last_unloaded_module.name, mod->name);
strscpy(last_unloaded_module.taints, module_flags(mod, buf, false));
free_module(mod); /* someone could wait for the module in add_unformed_module() */
wake_up_all(&module_wq); return 0;
out:
mutex_unlock(&module_mutex); return ret;
}
/* Note this assumes addr is a function, which it currently always is. */ void symbol_put_addr(void *addr)
{ struct module *modaddr; unsignedlong a = (unsignedlong)dereference_function_descriptor(addr);
if (core_kernel_text(a)) return;
/* * Even though we hold a reference on the module; we still need to * RCU read section in order to safely traverse the data structure.
*/
guard(rcu)();
modaddr = __module_text_address(a);
BUG_ON(!modaddr);
module_put(modaddr);
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
bool try_module_get(struct module *module)
{ bool ret = true;
if (module) { /* Note: here, we can fail to get a reference */ if (likely(module_is_live(module) &&
atomic_inc_not_zero(&module->refcnt) != 0))
trace_module_get(module, _RET_IP_); else
ret = false;
} return ret;
}
EXPORT_SYMBOL(try_module_get);
void module_put(struct module *module)
{ int ret;
if (module) {
ret = atomic_dec_if_positive(&module->refcnt);
WARN_ON(ret < 0); /* Failed to put refcount */
trace_module_put(module, _RET_IP_);
}
}
EXPORT_SYMBOL(module_put);
switch (mk->mod->state) { case MODULE_STATE_LIVE:
state = "live"; break; case MODULE_STATE_COMING:
state = "coming"; break; case MODULE_STATE_GOING:
state = "going"; break; default:
BUG();
} return sprintf(buffer, "%s\n", state);
}
/* * get_modinfo() calls made before rewrite_section_headers() * must use sh_offset, as sh_addr isn't set!
*/ char *modinfo = (char *)info->hdr + infosec->sh_offset;
/** * verify_module_namespace() - does @modname have access to this symbol's @namespace * @namespace: export symbol namespace * @modname: module name * * If @namespace is prefixed with "module:" to indicate it is a module namespace * then test if @modname matches any of the comma separated patterns. * * The patterns only support tail-glob.
*/ staticbool verify_module_namespace(constchar *namespace, constchar *modname)
{
size_t len, modlen = strlen(modname); constchar *prefix = "module:"; constchar *sep; bool glob;
if (!strstarts(namespace, prefix)) returnfalse;
for (namespace += strlen(prefix); *namespace; namespace = sep) {
sep = strchrnul(namespace, ',');
len = sep - namespace;
/* Resolve a symbol for this module. I.e. if we find one, record usage. */ staticconststruct kernel_symbol *resolve_symbol(struct module *mod, conststruct load_info *info, constchar *name, char ownername[])
{ struct find_symbol_arg fsa = {
.name = name,
.gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)),
.warn = true,
}; int err;
/* * The module_mutex should not be a heavily contended lock; * if we get the occasional sleep here, we'll go an extra iteration * in the wait_event_interruptible(), which is harmless.
*/
sched_annotate_sleep();
mutex_lock(&module_mutex); if (!find_symbol(&fsa)) goto unlock;
if (fsa.license == GPL_ONLY)
mod->using_gplonly_symbols = true;
getname: /* We must make copy under the lock if we failed to get ref. */
strscpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN);
unlock:
mutex_unlock(&module_mutex); return fsa.sym;
}
/* * The pointer to these blocks of memory are stored on the module * structure and we keep that around so long as the module is * around. We only free that memory when we unload the module. * Just mark them as not being a leak then. The .init* ELF * sections *do* get freed after boot so we *could* treat them * slightly differently with kmemleak_ignore() and only grey * them out as they work as typical memory allocations which * *do* eventually get freed, but let's just keep things simple * and avoid *any* false positives.
*/ if (!mod->mem[type].is_rox)
kmemleak_not_leak(ptr);
/* Free lock-classes; relies on the preceding sync_rcu(). */
lockdep_free_key_range(mod_mem->base, mod_mem->size); if (mod_mem->size)
module_memory_free(mod, type);
}
/* MOD_DATA hosts mod, so free it at last */
lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size);
module_memory_free(mod, MOD_DATA);
}
/* Free a module, remove from lists, etc. */ staticvoid free_module(struct module *mod)
{
trace_module_free(mod);
codetag_unload_module(mod);
mod_sysfs_teardown(mod);
/* * We leave it in list to prevent duplicate loads, but make sure * that noone uses it while it's being deconstructed.
*/
mutex_lock(&module_mutex);
mod->state = MODULE_STATE_UNFORMED;
mutex_unlock(&module_mutex);
/* Free any allocated parameters. */
destroy_params(mod->kp, mod->num_kp);
if (is_livepatch_module(mod))
free_module_elf(mod);
/* Now we can delete it from the lists */
mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */
list_del_rcu(&mod->list);
mod_tree_remove(mod); /* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup(mod); /* Wait for RCU synchronizing before releasing mod->list and buglist. */
synchronize_rcu(); if (try_add_tainted_module(mod))
pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n",
mod->name);
mutex_unlock(&module_mutex);
/* This may be empty, but that's OK */
module_arch_freeing_init(mod);
kfree(mod->args);
percpu_modfree(mod);
scoped_guard(rcu) { if (!find_symbol(&fsa)) return NULL; if (fsa.license != GPL_ONLY) {
pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
symbol); return NULL;
} if (strong_try_module_get(fsa.owner)) return NULL;
} return (void *)kernel_symbol_value(fsa.sym);
}
EXPORT_SYMBOL_GPL(__symbol_get);
/* * Ensure that an exported symbol [global namespace] does not already exist * in the kernel or in some other module's exported symbol table. * * You must hold the module_mutex.
*/ staticint verify_exported_symbols(struct module *mod)
{ unsignedint i; conststruct kernel_symbol *s; struct { conststruct kernel_symbol *sym; unsignedint num;
} arr[] = {
{ mod->syms, mod->num_syms },
{ mod->gpl_syms, mod->num_gpl_syms },
};
for (i = 0; i < ARRAY_SIZE(arr); i++) { for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { struct find_symbol_arg fsa = {
.name = kernel_symbol_name(s),
.gplok = true,
}; if (find_symbol(&fsa)) {
pr_err("%s: exports duplicate symbol %s" " (owned by %s)\n",
mod->name, kernel_symbol_name(s),
module_name(fsa.owner)); return -ENOEXEC;
}
}
} return 0;
}
staticbool ignore_undef_symbol(Elf_Half emachine, constchar *name)
{ /* * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. * i386 has a similar problem but may not deserve a fix. * * If we ever have to ignore many symbols, consider refactoring the code to * only warn if referenced by a relocation.
*/ if (emachine == EM_386 || emachine == EM_X86_64) return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); returnfalse;
}
/* Change all symbols so that st_value encodes the pointer directly. */ staticint simplify_symbols(struct module *mod, conststruct load_info *info)
{
Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
Elf_Sym *sym = (void *)symsec->sh_addr; unsignedlong secbase; unsignedint i; int ret = 0; conststruct kernel_symbol *ksym;
for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { constchar *name = info->strtab + sym[i].st_name;
switch (sym[i].st_shndx) { case SHN_COMMON: /* Ignore common symbols */ if (!strncmp(name, "__gnu_lto", 9)) break;
/* * We compiled with -fno-common. These are not * supposed to happen.
*/
pr_debug("Common symbol: %s\n", name);
pr_warn("%s: please compile with -fno-common\n",
mod->name);
ret = -ENOEXEC; break;
case SHN_ABS: /* Don't need to do anything */
pr_debug("Absolute symbol: 0x%08lx %s\n",
(long)sym[i].st_value, name); break;
case SHN_LIVEPATCH: /* Livepatch symbols are resolved by livepatch */ break;
case SHN_UNDEF:
ksym = resolve_symbol_wait(mod, info, name); /* Ok if resolved. */ if (ksym && !IS_ERR(ksym)) {
sym[i].st_value = kernel_symbol_value(ksym); break;
}
/* Ok if weak or ignored. */ if (!ksym &&
(ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
ignore_undef_symbol(info->hdr->e_machine, name))) break;
ret = PTR_ERR(ksym) ?: -ENOENT;
pr_warn("%s: Unknown symbol %s (err %d)\n",
mod->name, name, ret); break;
default: /* Divert to percpu allocation if a percpu var. */ if (sym[i].st_shndx == info->index.pcpu)
secbase = (unsignedlong)mod_percpu(mod); else
secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
sym[i].st_value += secbase; break;
}
}
/* Now do relocations. */ for (i = 1; i < info->hdr->e_shnum; i++) { unsignedint infosec = info->sechdrs[i].sh_info;
/* Not a valid relocation section? */ if (infosec >= info->hdr->e_shnum) continue;
/* * Don't bother with non-allocated sections. * An exception is the percpu section, which has separate allocations * for individual CPUs. We relocate the percpu section in the initial * ELF template and subsequently copy it to the per-CPU destinations.
*/ if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC) &&
(!infosec || infosec != info->index.pcpu)) continue;
if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
err = klp_apply_section_relocs(mod, info->sechdrs,
info->secstrings,
info->strtab,
info->index.sym, i,
NULL); elseif (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info->sechdrs, info->strtab,
info->index.sym, i, mod); elseif (info->sechdrs[i].sh_type == SHT_RELA)
err = apply_relocate_add(info->sechdrs, info->strtab,
info->index.sym, i, mod); if (err < 0) break;
} return err;
}
/* Additional bytes needed by arch in front of individual sections */ unsignedint __weak arch_mod_section_prepend(struct module *mod, unsignedint section)
{ /* default implementation just returns zero */ return 0;
}
long module_get_offset_and_type(struct module *mod, enum mod_mem_type type,
Elf_Shdr *sechdr, unsignedint section)
{ long offset; long mask = ((unsignedlong)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT;
/* * Do not allocate codetag memory as we load it into * preallocated contiguous memory.
*/ if (codetag_needs_module_section(mod, sname, s->sh_size)) { /* * s->sh_entsize won't be used but populate the * type field to avoid confusion.
*/
s->sh_entsize = ((unsignedlong)(type) & SH_ENTSIZE_TYPE_MASK)
<< SH_ENTSIZE_TYPE_SHIFT; continue;
}
/* * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld * might -- code, read-only data, read-write data, small data. Tally * sizes, and place the offsets into sh_entsize fields: high bit means it * belongs in init.
*/ staticvoid layout_sections(struct module *mod, struct load_info *info)
{ unsignedint i;
for (i = 0; i < info->hdr->e_shnum; i++)
info->sechdrs[i].sh_entsize = ~0UL;
pr_debug("Core section allocation order for %s:\n", mod->name);
__layout_sections(mod, info, false);
pr_debug("Init section allocation order for %s:\n", mod->name);
__layout_sections(mod, info, true);
}
/* * Check for both overflow and offset/size being * too large.
*/
secend = shdr->sh_offset + shdr->sh_size; if (secend < shdr->sh_offset || secend > info->len) return -ENOEXEC;
return 0;
}
/** * elf_validity_ehdr() - Checks an ELF header for module validity * @info: Load info containing the ELF header to check * * Checks whether an ELF header could belong to a valid module. Checks: * * * ELF header is within the data the user provided * * ELF magic is present * * It is relocatable (not final linked, not core file, etc.) * * The header's machine type matches what the architecture expects. * * Optional arch-specific hook for other properties * - module_elf_check_arch() is currently only used by PPC to check * ELF ABI version, but may be used by others in the future. * * Return: %0 if valid, %-ENOEXEC on failure.
*/ staticint elf_validity_ehdr(conststruct load_info *info)
{ if (info->len < sizeof(*(info->hdr))) {
pr_err("Invalid ELF header len %lu\n", info->len); return -ENOEXEC;
} if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) {
pr_err("Invalid ELF header magic: != %s\n", ELFMAG); return -ENOEXEC;
} if (info->hdr->e_type != ET_REL) {
pr_err("Invalid ELF header type: %u != %u\n",
info->hdr->e_type, ET_REL); return -ENOEXEC;
} if (!elf_check_arch(info->hdr)) {
pr_err("Invalid architecture in ELF header: %u\n",
info->hdr->e_machine); return -ENOEXEC;
} if (!module_elf_check_arch(info->hdr)) {
pr_err("Invalid module architecture in ELF header: %u\n",
info->hdr->e_machine); return -ENOEXEC;
} return 0;
}
/** * elf_validity_cache_sechdrs() - Cache section headers if valid * @info: Load info to compute section headers from * * Checks: * * * ELF header is valid (see elf_validity_ehdr()) * * Section headers are the size we expect * * Section array fits in the user provided data * * Section index 0 is NULL * * Section contents are inbounds * * Then updates @info with a &load_info->sechdrs pointer if valid. * * Return: %0 if valid, negative error code if validation failed.
*/ staticint elf_validity_cache_sechdrs(struct load_info *info)
{
Elf_Shdr *sechdrs;
Elf_Shdr *shdr; int i; int err;
err = elf_validity_ehdr(info); if (err < 0) return err;
/* * e_shnum is 16 bits, and sizeof(Elf_Shdr) is * known and small. So e_shnum * sizeof(Elf_Shdr) * will not overflow unsigned long on any platform.
*/ if (info->hdr->e_shoff >= info->len
|| (info->hdr->e_shnum * sizeof(Elf_Shdr) >
info->len - info->hdr->e_shoff)) {
pr_err("Invalid ELF section header overflow\n"); return -ENOEXEC;
}
sechdrs = (void *)info->hdr + info->hdr->e_shoff;
/* * The code assumes that section 0 has a length of zero and * an addr of zero, so check for it.
*/ if (sechdrs[0].sh_type != SHT_NULL
|| sechdrs[0].sh_size != 0
|| sechdrs[0].sh_addr != 0) {
pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n",
sechdrs[0].sh_type); return -ENOEXEC;
}
/* Validate contents are inbounds */ for (i = 1; i < info->hdr->e_shnum; i++) {
shdr = &sechdrs[i]; switch (shdr->sh_type) { case SHT_NULL: case SHT_NOBITS: /* No contents, offset/size don't mean anything */ continue; default:
err = validate_section_offset(info, shdr); if (err < 0) {
pr_err("Invalid ELF section in module (section %u type %u)\n",
i, shdr->sh_type); return err;
}
}
}
info->sechdrs = sechdrs;
return 0;
}
/** * elf_validity_cache_secstrings() - Caches section names if valid * @info: Load info to cache section names from. Must have valid sechdrs. * * Specifically checks: * * * Section name table index is inbounds of section headers * * Section name table is not empty * * Section name table is NUL terminated * * All section name offsets are inbounds of the section * * Then updates @info with a &load_info->secstrings pointer if valid. * * Return: %0 if valid, negative error code if validation failed.
*/ staticint elf_validity_cache_secstrings(struct load_info *info)
{
Elf_Shdr *strhdr, *shdr; char *secstrings; int i;
/* * Verify if the section name table index is valid.
*/ if (info->hdr->e_shstrndx == SHN_UNDEF
|| info->hdr->e_shstrndx >= info->hdr->e_shnum) {
pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n",
info->hdr->e_shstrndx, info->hdr->e_shstrndx,
info->hdr->e_shnum); return -ENOEXEC;
}
strhdr = &info->sechdrs[info->hdr->e_shstrndx];
/* * The section name table must be NUL-terminated, as required * by the spec. This makes strcmp and pr_* calls that access * strings in the section safe.
*/
secstrings = (void *)info->hdr + strhdr->sh_offset; if (strhdr->sh_size == 0) {
pr_err("empty section name table\n"); return -ENOEXEC;
} if (secstrings[strhdr->sh_size - 1] != '\0') {
pr_err("ELF Spec violation: section name table isn't null terminated\n"); return -ENOEXEC;
}
for (i = 0; i < info->hdr->e_shnum; i++) {
shdr = &info->sechdrs[i]; /* SHT_NULL means sh_name has an undefined value */ if (shdr->sh_type == SHT_NULL) continue; if (shdr->sh_name >= strhdr->sh_size) {
pr_err("Invalid ELF section name in module (section %u type %u)\n",
i, shdr->sh_type); return -ENOEXEC;
}
}
info->secstrings = secstrings; return 0;
}
/** * elf_validity_cache_index_info() - Validate and cache modinfo section * @info: Load info to populate the modinfo index on. * Must have &load_info->sechdrs and &load_info->secstrings populated * * Checks that if there is a .modinfo section, it is unique. * Then, it caches its index in &load_info->index.info. * Finally, it tries to populate the name to improve error messages. * * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found.
*/ staticint elf_validity_cache_index_info(struct load_info *info)
{ int info_idx;
info_idx = find_any_unique_sec(info, ".modinfo");
if (info_idx == 0) /* Early return, no .modinfo */ return 0;
if (info_idx < 0) {
pr_err("Only one .modinfo section must exist.\n"); return -ENOEXEC;
}
info->index.info = info_idx; /* Try to find a name early so we can log errors with a module name */
info->name = get_modinfo(info, "name");
return 0;
}
/** * elf_validity_cache_index_mod() - Validates and caches this_module section * @info: Load info to cache this_module on. * Must have &load_info->sechdrs and &load_info->secstrings populated * * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost * uses to refer to __this_module and let's use rely on THIS_MODULE to point * to &__this_module properly. The kernel's modpost declares it on each * modules's *.mod.c file. If the struct module of the kernel changes a full * kernel rebuild is required. * * We have a few expectations for this special section, this function * validates all this for us: * * * The section has contents * * The section is unique * * We expect the kernel to always have to allocate it: SHF_ALLOC * * The section size must match the kernel's run time's struct module * size * * If all checks pass, the index will be cached in &load_info->index.mod * * Return: %0 on validation success, %-ENOEXEC on failure
*/ staticint elf_validity_cache_index_mod(struct load_info *info)
{
Elf_Shdr *shdr; int mod_idx;
mod_idx = find_any_unique_sec(info, ".gnu.linkonce.this_module"); if (mod_idx <= 0) {
pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n",
info->name ?: "(missing .modinfo section or name field)"); return -ENOEXEC;
}
shdr = &info->sechdrs[mod_idx];
if (shdr->sh_type == SHT_NOBITS) {
pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n",
info->name ?: "(missing .modinfo section or name field)"); return -ENOEXEC;
}
if (!(shdr->sh_flags & SHF_ALLOC)) {
pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n",
info->name ?: "(missing .modinfo section or name field)"); return -ENOEXEC;
}
if (shdr->sh_size != sizeof(struct module)) {
pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n",
info->name ?: "(missing .modinfo section or name field)"); return -ENOEXEC;
}
info->index.mod = mod_idx;
return 0;
}
/** * elf_validity_cache_index_sym() - Validate and cache symtab index * @info: Load info to cache symtab index in. * Must have &load_info->sechdrs and &load_info->secstrings populated. * * Checks that there is exactly one symbol table, then caches its index in * &load_info->index.sym. * * Return: %0 if valid, %-ENOEXEC on failure.
*/ staticint elf_validity_cache_index_sym(struct load_info *info)
{ unsignedint sym_idx; unsignedint num_sym_secs = 0; int i;
for (i = 1; i < info->hdr->e_shnum; i++) { if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
num_sym_secs++;
sym_idx = i;
}
}
if (num_sym_secs != 1) {
pr_warn("%s: module has no symbols (stripped?)\n",
info->name ?: "(missing .modinfo section or name field)"); return -ENOEXEC;
}
info->index.sym = sym_idx;
return 0;
}
/** * elf_validity_cache_index_str() - Validate and cache strtab index * @info: Load info to cache strtab index in. * Must have &load_info->sechdrs and &load_info->secstrings populated. * Must have &load_info->index.sym populated. * * Looks at the symbol table's associated string table, makes sure it is * in-bounds, and caches it. * * Return: %0 if valid, %-ENOEXEC on failure.
*/ staticint elf_validity_cache_index_str(struct load_info *info)
{ unsignedint str_idx = info->sechdrs[info->index.sym].sh_link;
/** * elf_validity_cache_index_versions() - Validate and cache version indices * @info: Load info to cache version indices in. * Must have &load_info->sechdrs and &load_info->secstrings populated. * @flags: Load flags, relevant to suppress version loading, see * uapi/linux/module.h * * If we're ignoring modversions based on @flags, zero all version indices * and return validity. Othewrise check: * * * If "__version_ext_crcs" is present, "__version_ext_names" is present * * There is a name present for every crc * * Then populate: * * * &load_info->index.vers * * &load_info->index.vers_ext_crc * * &load_info->index.vers_ext_names * * if present. * * Return: %0 if valid, %-ENOEXEC on failure.
*/ staticint elf_validity_cache_index_versions(struct load_info *info, int flags)
{ unsignedint vers_ext_crc; unsignedint vers_ext_name;
size_t crc_count;
size_t remaining_len;
size_t name_size; char *name;
/* If modversions were suppressed, pretend we didn't find any */ if (flags & MODULE_INIT_IGNORE_MODVERSIONS) {
info->index.vers = 0;
info->index.vers_ext_crc = 0;
info->index.vers_ext_name = 0; return 0;
}
/* If we have one field, we must have the other */ if (!!vers_ext_crc != !!vers_ext_name) {
pr_err("extended version crc+name presence does not match"); return -ENOEXEC;
}
/* * If we have extended version information, we should have the same * number of entries in every section.
*/ if (vers_ext_crc) {
crc_count = info->sechdrs[vers_ext_crc].sh_size / sizeof(u32);
name = (void *)info->hdr +
info->sechdrs[vers_ext_name].sh_offset;
remaining_len = info->sechdrs[vers_ext_name].sh_size;
while (crc_count--) {
name_size = strnlen(name, remaining_len) + 1; if (name_size > remaining_len) {
pr_err("more extended version crcs than names"); return -ENOEXEC;
}
remaining_len -= name_size;
name += name_size;
}
}
/** * elf_validity_cache_index() - Resolve, validate, cache section indices * @info: Load info to read from and update. * &load_info->sechdrs and &load_info->secstrings must be populated. * @flags: Load flags, relevant to suppress version loading, see * uapi/linux/module.h * * Populates &load_info->index, validating as it goes. * See child functions for per-field validation: * * * elf_validity_cache_index_info() * * elf_validity_cache_index_mod() * * elf_validity_cache_index_sym() * * elf_validity_cache_index_str() * * elf_validity_cache_index_versions() * * If CONFIG_SMP is enabled, load the percpu section by name with no * validation. * * Return: 0 on success, negative error code if an index failed validation.
*/ staticint elf_validity_cache_index(struct load_info *info, int flags)
{ int err;
err = elf_validity_cache_index_info(info); if (err < 0) return err;
err = elf_validity_cache_index_mod(info); if (err < 0) return err;
err = elf_validity_cache_index_sym(info); if (err < 0) return err;
err = elf_validity_cache_index_str(info); if (err < 0) return err;
err = elf_validity_cache_index_versions(info, flags); if (err < 0) return err;
info->index.pcpu = find_pcpusec(info);
return 0;
}
/** * elf_validity_cache_strtab() - Validate and cache symbol string table * @info: Load info to read from and update. * Must have &load_info->sechdrs and &load_info->secstrings populated. * Must have &load_info->index populated. * * Checks: * * * The string table is not empty. * * The string table starts and ends with NUL (required by ELF spec). * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the * string table. * * And caches the pointer as &load_info->strtab in @info. * * Return: 0 on success, negative error code if a check failed.
*/ staticint elf_validity_cache_strtab(struct load_info *info)
{
Elf_Shdr *str_shdr = &info->sechdrs[info->index.str];
Elf_Shdr *sym_shdr = &info->sechdrs[info->index.sym]; char *strtab = (char *)info->hdr + str_shdr->sh_offset;
Elf_Sym *syms = (void *)info->hdr + sym_shdr->sh_offset; int i;
if (str_shdr->sh_size == 0) {
pr_err("empty symbol string table\n"); return -ENOEXEC;
} if (strtab[0] != '\0') {
pr_err("symbol string table missing leading NUL\n"); return -ENOEXEC;
} if (strtab[str_shdr->sh_size - 1] != '\0') {
pr_err("symbol string table isn't NUL terminated\n"); return -ENOEXEC;
}
/* * Now that we know strtab is correctly structured, check symbol * starts are inbounds before they're used later.
*/ for (i = 0; i < sym_shdr->sh_size / sizeof(*syms); i++) { if (syms[i].st_name >= str_shdr->sh_size) {
pr_err("symbol name out of bounds in string table"); return -ENOEXEC;
}
}
info->strtab = strtab; return 0;
}
/* * Check userspace passed ELF module against our expectations, and cache * useful variables for further processing as we go. * * This does basic validity checks against section offsets and sizes, the * section name string table, and the indices used for it (sh_name). * * As a last step, since we're already checking the ELF sections we cache * useful variables which will be used later for our convenience: * * o pointers to section headers * o cache the modinfo symbol section * o cache the string symbol section * o cache the module section * * As a last step we set info->mod to the temporary copy of the module in * info->hdr. The final one will be allocated in move_module(). Any * modifications we make to our copy of the module will be carried over * to the final minted module.
*/ staticint elf_validity_cache_copy(struct load_info *info, int flags)
{ int err;
err = elf_validity_cache_sechdrs(info); if (err < 0) return err;
err = elf_validity_cache_secstrings(info); if (err < 0) return err;
err = elf_validity_cache_index(info, flags); if (err < 0) return err;
err = elf_validity_cache_strtab(info); if (err < 0) return err;
/* This is temporary: point mod into copy of data. */
info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset;
/* * If we didn't load the .modinfo 'name' field earlier, fall back to * on-disk struct mod 'name' field.
*/ if (!info->name)
info->name = info->mod->name;
return 0;
}
#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
staticint copy_chunked_from_user(void *dst, constvoid __user *usrc, unsignedlong len)
{ do { unsignedlong n = min(len, COPY_CHUNK_SIZE);
if (copy_from_user(dst, usrc, n) != 0) return -EFAULT;
cond_resched();
dst += n;
usrc += n;
len -= n;
} while (len); return 0;
}
staticint check_modinfo_livepatch(struct module *mod, struct load_info *info)
{ if (!get_modinfo(info, "livepatch")) /* Nothing more to do */ return 0;
if (set_livepatch_module(mod)) return 0;
pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
mod->name); return -ENOEXEC;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.