/* * klp_mutex is a coarse lock which serializes access to klp data. All * accesses to klp-related variables and structures must have mutex protection, * except within the following functions which carefully avoid the need for it: * * - klp_ftrace_handler() * - klp_update_patch_state() * - __klp_sched_try_switch()
*/
DEFINE_MUTEX(klp_mutex);
/* * Actively used patches: enabled or in transition. Note that replaced * or disabled patches are not listed even though the related kernel * module still can be loaded.
*/
LIST_HEAD(klp_patches);
/* sets obj->mod if object is not vmlinux and module is found */ staticvoid klp_find_object_module(struct klp_object *obj)
{ struct module *mod;
if (!klp_is_module(obj)) return;
guard(rcu)(); /* * We do not want to block removal of patched modules and therefore * we do not take a reference here. The patches are removed by * klp_module_going() instead.
*/
mod = find_module(obj->name); /* * Do not mess work of klp_module_coming() and klp_module_going(). * Note that the patch might still be needed before klp_module_going() * is called. Module functions can be called even in the GOING state * until mod->exit() finishes. This is especially important for * patches that modify semantic of the functions.
*/ if (mod && mod->klp_alive)
obj->mod = mod;
}
/* * Finish the search when the symbol is found for the desired position * or the position is not defined for a non-unique symbol.
*/ if ((args->pos && (args->count == args->pos)) ||
(!args->pos && (args->count > 1))) return 1;
if (objname)
module_kallsyms_on_each_symbol(objname, klp_find_callback, &args); else
kallsyms_on_each_match_symbol(klp_match_callback, name, &args);
/* * Ensure an address was found. If sympos is 0, ensure symbol is unique; * otherwise ensure the symbol position count matches sympos.
*/ if (args.addr == 0)
pr_err("symbol '%s' not found in symbol table\n", name); elseif (args.count > 1 && sympos == 0) {
pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
name, objname);
} elseif (sympos != args.count && sympos > 0) {
pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
sympos, name, objname ? objname : "vmlinux");
} else {
*addr = args.addr; return 0;
}
/* * Since the field widths for sym_objname and sym_name in the sscanf() * call are hard-coded and correspond to MODULE_NAME_LEN and * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN * and KSYM_NAME_LEN have the values we expect them to have. * * Because the value of MODULE_NAME_LEN can differ among architectures, * we use the smallest/strictest upper bound possible (56, based on * the current definition of MODULE_NAME_LEN) to prevent overflows.
*/
BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
relas = (Elf_Rela *) relasec->sh_addr; /* For each rela in this klp relocation section */ for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); if (sym->st_shndx != SHN_LIVEPATCH) {
pr_err("symbol %s is not marked as a livepatch symbol\n",
strtab + sym->st_name); return -EINVAL;
}
/* * Prevent module-specific KLP rela sections from referencing * vmlinux symbols. This helps prevent ordering issues with * module special section initializations. Presumably such * symbols are exported and normal relas can be used instead.
*/ if (!sec_vmlinux && sym_vmlinux) {
pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section\n",
sym_name); return -EINVAL;
}
/* klp_find_object_symbol() treats a NULL objname as vmlinux */
ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
sym_name, sympos, &addr); if (ret) return ret;
/* * At a high-level, there are two types of klp relocation sections: those which * reference symbols which live in vmlinux; and those which reference symbols * which live in other modules. This function is called for both types: * * 1) When a klp module itself loads, the module code calls this function to * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections). * These relocations are written to the klp module text to allow the patched * code/data to reference unexported vmlinux symbols. They're written as * early as possible to ensure that other module init code (.e.g., * jump_label_apply_nops) can access any unexported vmlinux symbols which * might be referenced by the klp module's special sections. * * 2) When a to-be-patched module loads -- or is already loaded when a * corresponding klp module loads -- klp code calls this function to write * module-specific klp relocations (.klp.rela.{module}.* sections). These * are written to the klp module text to allow the patched code/data to * reference symbols which live in the to-be-patched module or one of its * module dependencies. Exported symbols are supported, in addition to * unexported symbols, in order to enable late module patching, which allows * the to-be-patched module to be loaded and patched sometime *after* the * klp module is loaded.
*/ staticint klp_write_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, constchar *shstrtab, constchar *strtab, unsignedint symndx, unsignedint secndx, constchar *objname, bool apply)
{ int cnt, ret; char sec_objname[MODULE_NAME_LEN];
Elf_Shdr *sec = sechdrs + secndx;
/* * Format: .klp.rela.sec_objname.section_name * See comment in klp_resolve_symbols() for an explanation * of the selected field width value.
*/
cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
sec_objname); if (cnt != 1) {
pr_err("section %s has an incorrectly formatted name\n",
shstrtab + sec->sh_name); return -EINVAL;
}
if (strcmp(objname ? objname : "vmlinux", sec_objname)) return 0;
if (apply) {
ret = klp_resolve_symbols(sechdrs, strtab, symndx,
sec, sec_objname); if (ret) return ret;
if (patch->enabled == enabled) { /* already in requested state */
ret = -EINVAL; goto out;
}
/* * Allow to reverse a pending transition in both ways. It might be * necessary to complete the transition without forcing and breaking * the system integrity. * * Do not allow to re-enable a disabled patch.
*/ if (patch == klp_transition_patch)
klp_reverse_transition(); elseif (!enabled)
ret = __klp_disable_patch(patch); else
ret = -EINVAL;
func = kzalloc(sizeof(*func), GFP_KERNEL); if (!func) return NULL;
if (old_func->old_name) {
func->old_name = kstrdup(old_func->old_name, GFP_KERNEL); if (!func->old_name) {
kfree(func); return NULL;
}
}
klp_init_func_early(obj, func); /* * func->new_func is same as func->old_func. These addresses are * set when the object is loaded, see klp_init_object_loaded().
*/
func->old_sympos = old_func->old_sympos;
func->nop = true;
if (!obj) {
obj = klp_alloc_object_dynamic(old_obj->name, patch); if (!obj) return -ENOMEM;
}
klp_for_each_func(old_obj, old_func) {
func = klp_find_func(obj, old_func); if (func) continue;
func = klp_alloc_func_nop(old_func, obj); if (!func) return -ENOMEM;
}
return 0;
}
/* * Add 'nop' functions which simply return to the caller to run the * original function. * * They are added only when the atomic replace mode is used and only for * functions which are currently livepatched but are no longer included * in the new livepatch.
*/ staticint klp_add_nops(struct klp_patch *patch)
{ struct klp_patch *old_patch; struct klp_object *old_obj;
klp_for_each_patch(old_patch) {
klp_for_each_object(old_patch, old_obj) { int err;
err = klp_add_object_nops(patch, old_obj); if (err) return err;
}
}
/* * This function implements the free operations that can be called safely * under klp_mutex. * * The operation must be completed by calling klp_free_patch_finish() * outside klp_mutex.
*/ staticvoid klp_free_patch_start(struct klp_patch *patch)
{ if (!list_empty(&patch->list))
list_del(&patch->list);
klp_free_objects(patch);
}
/* * This function implements the free part that must be called outside * klp_mutex. * * It must be called after klp_free_patch_start(). And it has to be * the last function accessing the livepatch structures when the patch * gets disabled.
*/ staticvoid klp_free_patch_finish(struct klp_patch *patch)
{ /* * Avoid deadlock with enabled_store() sysfs callback by * calling this outside klp_mutex. It is safe because * this is called when the patch gets disabled and it * cannot get enabled again.
*/
kobject_put(&patch->kobj);
wait_for_completion(&patch->finish);
/* Put the module after the last access to struct klp_patch. */ if (!patch->forced)
module_put(patch->mod);
}
/* * The livepatch might be freed from sysfs interface created by the patch. * This work allows to wait until the interface is destroyed in a separate * context.
*/ staticvoid klp_free_patch_work_fn(struct work_struct *work)
{ struct klp_patch *patch =
container_of(work, struct klp_patch, free_work);
/* * NOPs get the address later. The patched module must be loaded, * see klp_init_object_loaded().
*/ if (!func->new_func && !func->nop) return -EINVAL;
if (strlen(func->old_name) >= KSYM_NAME_LEN) return -EINVAL;
/* The format for the sysfs directory is <function,sympos> where sympos * is the nth occurrence of this symbol in kallsyms for the patched * object. If the user selects 0 for old_sympos, then 1 will be used * since a unique symbol will be the first occurrence.
*/ return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
func->old_name,
func->old_sympos ? func->old_sympos : 1);
}
staticint klp_write_object_relocs(struct klp_patch *patch, struct klp_object *obj, bool apply)
{ int i, ret; struct klp_modinfo *info = patch->mod->klp_info;
for (i = 1; i < info->hdr.e_shnum; i++) {
Elf_Shdr *sec = info->sechdrs + i;
if (!(sec->sh_flags & SHF_RELA_LIVEPATCH)) continue;
ret = klp_write_section_relocs(patch->mod, info->sechdrs,
info->secstrings,
patch->mod->core_kallsyms.strtab,
info->symndx, i, obj->name, apply); if (ret) return ret;
}
/* parts of the initialization that is done only when the object is loaded */ staticint klp_init_object_loaded(struct klp_patch *patch, struct klp_object *obj)
{ struct klp_func *func; int ret;
if (klp_is_module(obj)) { /* * Only write module-specific relocations here * (.klp.rela.{module}.*). vmlinux-specific relocations were * written earlier during the initialization of the klp module * itself.
*/
ret = klp_apply_object_relocs(patch, obj); if (ret) return ret;
}
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
func->old_sympos,
(unsignedlong *)&func->old_func); if (ret) return ret;
ret = kallsyms_lookup_size_offset((unsignedlong)func->old_func,
&func->old_size, NULL); if (!ret) {
pr_err("kallsyms size lookup failed for '%s'\n",
func->old_name); return -ENOENT;
}
if (func->nop)
func->new_func = func->old_func;
ret = kallsyms_lookup_size_offset((unsignedlong)func->new_func,
&func->new_size, NULL); if (!ret) {
pr_err("kallsyms size lookup failed for '%s' replacement\n",
func->old_name); return -ENOENT;
}
}
klp_for_each_object(patch, obj) if (obj->patched)
klp_pre_unpatch_callback(obj);
/* * Enforce the order of the func->transition writes in * klp_init_transition() and the TIF_PATCH_PENDING writes in * klp_start_transition(). In the rare case where klp_ftrace_handler() * is called shortly after klp_update_patch_state() switches the task, * this ensures the handler sees that func->transition is set.
*/
smp_wmb();
/* * Enforce the order of the func->transition writes in * klp_init_transition() and the ops->func_stack writes in * klp_patch_object(), so that klp_ftrace_handler() will see the * func->transition updates before the handler is registered and the * new funcs become visible to the handler.
*/
smp_wmb();
klp_for_each_object(patch, obj) { if (!klp_is_object_loaded(obj)) continue;
ret = klp_pre_patch_callback(obj); if (ret) {
pr_warn("pre-patch callback failed for object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux"); goto err;
}
ret = klp_patch_object(obj); if (ret) {
pr_warn("failed to patch object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux"); goto err;
}
}
return 0;
err:
pr_warn("failed to enable patch '%s'\n", patch->mod->name);
klp_cancel_transition(); return ret;
}
/** * klp_enable_patch() - enable the livepatch * @patch: patch to be enabled * * Initializes the data structure associated with the patch, creates the sysfs * interface, performs the needed symbol lookups and code relocations, * registers the patched functions with ftrace. * * This function is supposed to be called from the livepatch module_init() * callback. * * Return: 0 on success, otherwise error
*/ int klp_enable_patch(struct klp_patch *patch)
{ int ret; struct klp_object *obj;
if (!patch || !patch->mod || !patch->objs) return -EINVAL;
klp_for_each_object_static(patch, obj) { if (!obj->funcs) return -EINVAL;
}
if (!is_livepatch_module(patch->mod)) {
pr_err("module %s is not marked as a livepatch module\n",
patch->mod->name); return -EINVAL;
}
if (!klp_initialized()) return -ENODEV;
if (!klp_have_reliable_stack()) {
pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
pr_warn("The livepatch transition may never complete.\n");
}
mutex_lock(&klp_mutex);
if (!klp_is_patch_compatible(patch)) {
pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
patch->mod->name);
mutex_unlock(&klp_mutex); return -EINVAL;
}
if (!try_module_get(patch->mod)) {
mutex_unlock(&klp_mutex); return -ENODEV;
}
klp_init_patch_early(patch);
ret = klp_init_patch(patch); if (ret) goto err;
ret = __klp_enable_patch(patch); if (ret) goto err;
/* * This function unpatches objects from the replaced livepatches. * * We could be pretty aggressive here. It is called in the situation where * these structures are no longer accessed from the ftrace handler. * All functions are redirected by the klp_transition_patch. They * use either a new code or they are in the original code because * of the special nop function patches. * * The only exception is when the transition was forced. In this case, * klp_ftrace_handler() might still see the replaced patch on the stack. * Fortunately, it is carefully designed to work with removed functions * thanks to RCU. We only have to keep the patches on the system. Also * this is handled transparently by patch->module_put.
*/ void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
{ struct klp_patch *old_patch;
klp_for_each_patch(old_patch) { if (old_patch == new_patch) return;
/* * This function removes the dynamically allocated 'nop' functions. * * We could be pretty aggressive. NOPs do not change the existing * behavior except for adding unnecessary delay by the ftrace handler. * * It is safe even when the transition was forced. The ftrace handler * will see a valid ops->func_stack entry thanks to RCU. * * We could even free the NOPs structures. They must be the last entry * in ops->func_stack. Therefore unregister_ftrace_function() is called. * It does the same as klp_synchronize_transition() to make sure that * nobody is inside the ftrace handler once the operation finishes. * * IMPORTANT: It must be called right after removing the replaced patches!
*/ void klp_discard_nops(struct klp_patch *new_patch)
{
klp_unpatch_objects_dynamic(klp_transition_patch);
klp_free_objects_dynamic(klp_transition_patch);
}
/* * Remove parts of patches that touch a given kernel module. The list of * patches processed might be limited. When limit is NULL, all patches * will be handled.
*/ staticvoid klp_cleanup_module_patches_limited(struct module *mod, struct klp_patch *limit)
{ struct klp_patch *patch; struct klp_object *obj;
klp_for_each_patch(patch) { if (patch == limit) break;
klp_for_each_object(patch, obj) { if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) continue;
if (patch != klp_transition_patch)
klp_pre_unpatch_callback(obj);
pr_notice("reverting patch '%s' on unloading module '%s'\n",
patch->mod->name, obj->mod->name);
klp_unpatch_object(obj);
mutex_lock(&klp_mutex); /* * Each module has to know that klp_module_coming() * has been called. We never know what module will * get patched by a new patch.
*/
mod->klp_alive = true;
mutex_lock(&klp_mutex); /* * Each module has to know that klp_module_going() * has been called. We never know what module will * get patched by a new patch.
*/
mod->klp_alive = false;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.