/* * The ftrace_test_recursion_trylock() will disable preemption, * which is required for the variant of synchronize_rcu() that is * used to allow patching functions where RCU is not watching. * See klp_synchronize_transition() for more details.
*/
bit = ftrace_test_recursion_trylock(ip, parent_ip); if (WARN_ON_ONCE(bit < 0)) return;
/* * func should never be NULL because preemption should be disabled here * and unregister_ftrace_function() does the equivalent of a * synchronize_rcu() before the func_stack removal.
*/ if (WARN_ON_ONCE(!func)) goto unlock;
/* * In the enable path, enforce the order of the ops->func_stack and * func->transition reads. The corresponding write barrier is in * __klp_enable_patch(). * * (Note that this barrier technically isn't needed in the disable * path. In the rare case where klp_update_patch_state() runs before * this handler, its TIF_PATCH_PENDING read and this func->transition * read need to be ordered. But klp_update_patch_state() already * enforces that.)
*/
smp_rmb();
if (unlikely(func->transition)) {
/* * Enforce the order of the func->transition and * current->patch_state reads. Otherwise we could read an * out-of-date task state and pick the wrong function. The * corresponding write barrier is in klp_init_transition().
*/
smp_rmb();
patch_state = current->patch_state;
WARN_ON_ONCE(patch_state == KLP_TRANSITION_IDLE);
if (patch_state == KLP_TRANSITION_UNPATCHED) { /* * Use the previously patched version of the function. * If no previous patches exist, continue with the * original function.
*/
func = list_entry_rcu(func->stack_node.next, struct klp_func, stack_node);
if (&func->stack_node == &ops->func_stack) goto unlock;
}
}
/* * NOPs are used to replace existing patches with original code. * Do nothing! Setting pc would cause an infinite loop.
*/ if (func->nop) goto unlock;
staticint klp_patch_func(struct klp_func *func)
{ struct klp_ops *ops; int ret;
if (WARN_ON(!func->old_func)) return -EINVAL;
if (WARN_ON(func->patched)) return -EINVAL;
ops = klp_find_ops(func->old_func); if (!ops) { unsignedlong ftrace_loc;
ftrace_loc = ftrace_location((unsignedlong)func->old_func); if (!ftrace_loc) {
pr_err("failed to find location for function '%s'\n",
func->old_name); return -EINVAL;
}
ops = kzalloc(sizeof(*ops), GFP_KERNEL); if (!ops) return -ENOMEM;
ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); if (ret) {
pr_err("failed to set ftrace filter for function '%s' (%d)\n",
func->old_name, ret); goto err;
}
ret = register_ftrace_function(&ops->fops); if (ret) {
pr_err("failed to register ftrace handler for function '%s' (%d)\n",
func->old_name, ret);
ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); goto err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.