if (validate) {
call[0] = to_auipc_t0(offset); /* * Read the text we want to modify; * return must be -EFAULT on read error
*/ if (copy_from_kernel_nofault(replaced, (void *)source, 2 * MCOUNT_INSN_SIZE)) return -EFAULT;
if (replaced[0] != call[0]) {
pr_err("%p: expected (%08x) but got (%08x)\n",
(void *)source, call[0], replaced[0]); return -EINVAL;
}
}
/* Replace the jalr at once. Return -EPERM on write error. */ if (patch_insn_write((void *)(source + MCOUNT_AUIPC_SIZE), call + 1, MCOUNT_JALR_SIZE)) return -EPERM;
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsignedlong addr)
{
u32 nop4 = RISCV_INSN_NOP4; int ret;
ret = ftrace_rec_set_nop_ops(rec); if (ret) return ret;
if (patch_insn_write((void *)rec->ip, &nop4, MCOUNT_NOP4_SIZE)) return -EPERM;
return 0;
}
/* * This is called early on, and isn't wrapped by * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold * text_mutex, which triggers a lockdep failure. SMP isn't running so we could * just directly poke the text, but it's simpler to just take the lock * ourselves.
*/ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{ unsignedlong pc = rec->ip - MCOUNT_AUIPC_SIZE; unsignedint nops[2], offset; int ret;
guard(mutex)(&text_mutex);
ret = ftrace_rec_set_nop_ops(rec); if (ret) return ret;
ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE);
return ret;
}
ftrace_func_t ftrace_call_dest = ftrace_stub; int ftrace_update_ftrace_func(ftrace_func_t func)
{ /* * When using CALL_OPS, the function to call is associated with the * call site, and we don't have a global function pointer to update.
*/ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) return 0;
WRITE_ONCE(ftrace_call_dest, func); /* * The data fence ensure that the update to ftrace_call_dest happens * before the write to function_trace_op later in the generic ftrace. * If the sequence is not enforced, then an old ftrace_call_dest may * race loading a new function_trace_op set in ftrace_modify_all_code
*/
smp_wmb(); /* * Updating ftrace dpes not take stop_machine path, so irqs should not * be disabled.
*/
WARN_ON(irqs_disabled());
smp_call_function(ftrace_sync_ipi, NULL, 1); return 0;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Most of this function is copied from arm64.
*/ void prepare_ftrace_return(unsignedlong *parent, unsignedlong self_addr, unsignedlong frame_pointer)
{ unsignedlong return_hooker = (unsignedlong)&return_to_handler; unsignedlong old;
if (unlikely(atomic_read(¤t->tracing_graph_pause))) return;
/* * We don't suffer access faults, so no extra fault-recovery assembly * is needed here.
*/
old = *parent;
if (!function_graph_enter(old, self_addr, frame_pointer, parent))
*parent = return_hooker;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.