// SPDX-License-Identifier: GPL-2.0 /* * Code for replacing ftrace calls with jumps. * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. * * Added function graph tracer code, taken from x86 that was written * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. *
*/
/* * We generally only have a single long_branch tramp and at most 2 or 3 plt * tramps generated. But, we don't use the plt tramps currently. We also allot * 2 tramps after .text and .init.text. So, we only end up with around 3 usable * tramps in total. Set aside 8 just to be sure.
*/ #define NUM_FTRACE_TRAMPS 8 staticunsignedlong ftrace_tramps[NUM_FTRACE_TRAMPS];
/* * Note: * We are paranoid about modifying text, as if a bug was to happen, it * could cause us to read or write to someplace that could cause harm. * Carefully read and modify the code with probe_kernel_*(), and make * sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */ if (copy_inst_from_kernel_nofault(&replaced, (void *)ip)) return -EFAULT;
/* Make sure it is what we expect it to be */ if (!ppc_inst_equal(replaced, old)) {
pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old)); return -EINVAL;
}
/* replace the text with the new text */ return patch_instruction((u32 *)ip, new);
}
/* * Helper functions that are the same for both PPC64 and PPC32.
*/ staticint test_24bit_addr(unsignedlong ip, unsignedlong addr)
{
addr = ppc_function_entry((void *)addr);
if (!mod) {
mod = ftrace_lookup_module(rec); if (!mod) return -EINVAL;
}
/* read where this goes */ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n"); return -EFAULT;
}
/* Make sure that this is still a 24bit jump */ if (!is_bl_op(op)) {
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op)); return -EINVAL;
}
/* lets find where the pointer goes */
tramp = find_bl_target(ip, op);
pr_devel("ip:%lx jumps to %lx", ip, tramp);
if (module_trampoline_target(mod, tramp, &ptr)) {
pr_err("Failed to get trampoline target\n"); return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
entry = ppc_global_function_entry((void *)addr); /* This should match what was called */ if (ptr != entry) {
pr_err("addr %lx does not match expected %lx\n", ptr, entry); return -EINVAL;
}
if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) { if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
pr_err("Fetching instruction at %lx failed.\n", ip - 4); return -EFAULT;
}
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
!ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
pr_err("Unexpected instruction %08lx around bl _mcount\n",
ppc_inst_as_ulong(op)); return -EINVAL;
}
} elseif (IS_ENABLED(CONFIG_PPC64)) { /* * Check what is in the next instruction. We can see ld r2,40(r1), but * on first pass after boot we will see mflr r0.
*/ if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
pr_err("Fetching op failed.\n"); return -EFAULT;
}
if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
ppc_inst_as_ulong(op)); return -EINVAL;
}
}
/* * When using -mprofile-kernel or PPC32 there is no load to jump over. * * Otherwise our original call site looks like: * * bl <tramp> * ld r2,XX(r1) * * Milton Miller pointed out that we can not simply nop the branch. * If a task was preempted when calling a trace function, the nops * will remove the way to restore the TOC in r2 and the r2 TOC will * get corrupted. * * Use a b +8 to jump over the load.
*/ if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
pop = ppc_inst(PPC_RAW_NOP()); else
pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */
staticunsignedlong find_ftrace_tramp(unsignedlong ip)
{ int i;
/* * We have the compiler generated long_branch tramps at the end * and we prefer those
*/ for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--) if (!ftrace_tramps[i]) continue; elseif (is_offset_in_branch_range(ftrace_tramps[i] - ip)) return ftrace_tramps[i];
return 0;
}
staticint add_ftrace_tramp(unsignedlong tramp)
{ int i;
for (i = 0; i < NUM_FTRACE_TRAMPS; i++) if (!ftrace_tramps[i]) {
ftrace_tramps[i] = tramp; return 0;
}
return -1;
}
/* * If this is a compiler generated long_branch trampoline (essentially, a * trampoline that has a branch to _mcount()), we re-write the branch to * instead go to ftrace_[regs_]caller() and note down the location of this * trampoline.
*/ staticint setup_mcount_compiler_tramp(unsignedlong tramp)
{ int i;
ppc_inst_t op; unsignedlong ptr;
/* Is this a known long jump tramp? */ for (i = 0; i < NUM_FTRACE_TRAMPS; i++) if (ftrace_tramps[i] == tramp) return 0;
/* New trampoline -- read where this goes */ if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
pr_debug("Fetching opcode failed.\n"); return -1;
}
/* Is this a 24 bit branch? */ if (!is_b_op(op)) {
pr_debug("Trampoline is not a long branch tramp.\n"); return -1;
}
/* lets find where the pointer goes */
ptr = find_bl_target(tramp, op);
if (ptr != ppc_global_function_entry((void *)_mcount)) {
pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr); return -1;
}
/* Let's re-write the tramp to go to ftrace_[regs_]caller */ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
ptr = ppc_global_function_entry((void *)ftrace_regs_caller); else
ptr = ppc_global_function_entry((void *)ftrace_caller);
if (patch_branch((u32 *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n"); return -1;
}
if (add_ftrace_tramp(tramp)) {
pr_debug("No tramp locations left\n"); return -1;
}
/* Read where this goes */ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n"); return -EFAULT;
}
/* Make sure that this is still a 24bit jump */ if (!is_bl_op(op)) {
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op)); return -EINVAL;
}
/* Let's find where the pointer goes */
tramp = find_bl_target(ip, op);
pr_devel("ip:%lx jumps to %lx", ip, tramp);
if (setup_mcount_compiler_tramp(tramp)) { /* Are other trampolines reachable? */ if (!find_ftrace_tramp(ip)) {
pr_err("No ftrace trampolines reachable from %ps\n",
(void *)ip); return -EINVAL;
}
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsignedlong addr)
{ unsignedlong ip = rec->ip;
ppc_inst_t old, new;
/* * If the calling address is more that 24 bits away, * then we had to use a trampoline to make the call. * Otherwise just update the call site.
*/ if (test_24bit_addr(ip, addr)) { /* within range */
old = ftrace_call_replace(ip, addr, 1); new = ppc_inst(PPC_RAW_NOP()); return ftrace_modify_code(ip, old, new);
} elseif (core_kernel_text(ip)) { return __ftrace_make_nop_kernel(rec, addr);
} elseif (!IS_ENABLED(CONFIG_MODULES)) { return -EINVAL;
}
return __ftrace_make_nop(mod, rec, addr);
}
#ifdef CONFIG_MODULES /* * Examine the existing instructions for __ftrace_make_call. * They should effectively be a NOP, and follow formal constraints, * depending on the ABI. Return false if they don't.
*/ staticbool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
{ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())); else return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
}
/* read where this goes */ if (copy_inst_from_kernel_nofault(op, ip)) return -EFAULT;
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
copy_inst_from_kernel_nofault(op + 1, ip + 4)) return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1])); return -EINVAL;
}
/* If we never set up ftrace trampoline(s), then bail */ if (!mod->arch.tramp ||
(IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
pr_err("No ftrace trampoline\n"); return -EINVAL;
}
if (module_trampoline_target(mod, tramp, &ptr)) {
pr_err("Failed to get trampoline target\n"); return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
entry = ppc_global_function_entry((void *)addr); /* This should match what was called */ if (ptr != entry) {
pr_err("addr %lx does not match expected %lx\n", ptr, entry); return -EINVAL;
}
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n"); return -EINVAL;
}
/* Make sure we're being asked to patch branch to a known ftrace addr */
entry = ppc_global_function_entry((void *)ftrace_caller);
ptr = ppc_global_function_entry((void *)addr);
if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
entry = ppc_global_function_entry((void *)ftrace_regs_caller);
if (ptr != entry) {
pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr); return -EINVAL;
}
/* Make sure we have a nop */ if (copy_inst_from_kernel_nofault(&op, ip)) {
pr_err("Unable to read ftrace location %p\n", ip); return -EFAULT;
}
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Unexpected call sequence at %p: %08lx\n",
ip, ppc_inst_as_ulong(op)); return -EINVAL;
}
tramp = find_ftrace_tramp((unsignedlong)ip); if (!tramp) {
pr_err("No ftrace trampolines reachable from %ps\n", ip); return -EINVAL;
}
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
pr_err("Error patching branch to ftrace tramp!\n"); return -EINVAL;
}
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsignedlong addr)
{ unsignedlong ip = rec->ip;
ppc_inst_t old, new;
/* * If the calling address is more that 24 bits away, * then we had to use a trampoline to make the call. * Otherwise just update the call site.
*/ if (test_24bit_addr(ip, addr)) { /* within range */
old = ppc_inst(PPC_RAW_NOP()); new = ftrace_call_replace(ip, addr, 1); return ftrace_modify_code(ip, old, new);
} elseif (core_kernel_text(ip)) { return __ftrace_make_call_kernel(rec, addr);
} elseif (!IS_ENABLED(CONFIG_MODULES)) { /* We should not get here without modules */ return -EINVAL;
}
/* If we never set up ftrace trampolines, then bail */ if (!mod->arch.tramp || !mod->arch.tramp_regs) {
pr_err("No ftrace trampoline\n"); return -EINVAL;
}
/* read where this goes */ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n"); return -EFAULT;
}
/* Make sure that this is still a 24bit jump */ if (!is_bl_op(op)) {
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op)); return -EINVAL;
}
/* lets find where the pointer goes */
tramp = find_bl_target(ip, op);
entry = ppc_global_function_entry((void *)old_addr);
pr_devel("ip:%lx jumps to %lx", ip, tramp);
if (tramp != entry) { /* old_addr is not within range, so we must have used a trampoline */ if (module_trampoline_target(mod, tramp, &ptr)) {
pr_err("Failed to get trampoline target\n"); return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
/* This should match what was called */ if (ptr != entry) {
pr_err("addr %lx does not match expected %lx\n", ptr, entry); return -EINVAL;
}
}
/* The new target may be within range */ if (test_24bit_addr(ip, addr)) { /* within range */ if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n"); return -EINVAL;
}
if (module_trampoline_target(mod, tramp, &ptr)) {
pr_err("Failed to get trampoline target\n"); return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
entry = ppc_global_function_entry((void *)addr); /* This should match what was called */ if (ptr != entry) {
pr_err("addr %lx does not match expected %lx\n", ptr, entry); return -EINVAL;
}
if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n"); return -EINVAL;
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsignedlong old_addr, unsignedlong addr)
{ unsignedlong ip = rec->ip;
ppc_inst_t old, new;
/* * If the calling address is more that 24 bits away, * then we had to use a trampoline to make the call. * Otherwise just update the call site.
*/ if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) { /* within range */
old = ftrace_call_replace(ip, old_addr, 1); new = ftrace_call_replace(ip, addr, 1); return ftrace_modify_code(ip, old, new);
} elseif (core_kernel_text(ip)) { /* * We always patch out of range locations to go to the regs * variant, so there is nothing to do here
*/ return 0;
} elseif (!IS_ENABLED(CONFIG_MODULES)) { /* We should not get here without modules */ return -EINVAL;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{ unsignedlong ip = (unsignedlong)(&ftrace_call);
ppc_inst_t old, new; int ret;
old = ppc_inst_read((u32 *)&ftrace_call); new = ftrace_call_replace(ip, (unsignedlong)func, 1);
ret = ftrace_modify_code(ip, old, new);
/* Also update the regs callback function */ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
ip = (unsignedlong)(&ftrace_regs_call);
old = ppc_inst_read((u32 *)&ftrace_regs_call); new = ftrace_call_replace(ip, (unsignedlong)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
return ret;
}
/* * Use the default ftrace_modify_all_code, but without * stop_machine().
*/ void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) return 0;
old = ftrace_call_replace(ip, enable ? stub : addr, 0); new = ftrace_call_replace(ip, enable ? addr : stub, 0);
return ftrace_modify_code(ip, old, new);
}
int ftrace_enable_ftrace_graph_caller(void)
{ return ftrace_modify_ftrace_graph_caller(true);
}
int ftrace_disable_ftrace_graph_caller(void)
{ return ftrace_modify_ftrace_graph_caller(false);
}
/* * Hook the return address and push it in the stack of return addrs * in current thread info. Return the address we want to divert to.
*/ staticunsignedlong
__prepare_ftrace_return(unsignedlong parent, unsignedlong ip, unsignedlong sp, struct ftrace_regs *fregs)
{ unsignedlong return_hooker;
if (unlikely(ftrace_graph_is_dead())) goto out;
if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.