// SPDX-License-Identifier: GPL-2.0 /* * Code for replacing ftrace calls with jumps. * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China * Author: Wu Zhangjin <wuzhangjin@gmail.com> * * Thanks goes to Steven Rostedt for writing the original x86 version.
*/
/* Arch override because MIPS doesn't need to run this from stop_machine() */ void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsignedlong addr)
{ unsignedintnew; unsignedlong ip = rec->ip;
/* * If ip is in kernel space, no long call, otherwise, long call is * needed.
*/ new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; #ifdef CONFIG_64BIT return ftrace_modify_code(ip, new); #else /* * On 32 bit MIPS platforms, gcc adds a stack adjust * instruction in the delay slot after the branch to * mcount and expects mcount to restore the sp on return. * This is based on a legacy API and does nothing but * waste instructions so it's being removed at runtime.
*/ return ftrace_modify_code_2(ip, new, INSN_NOP); #endif
}
int ftrace_make_call(struct dyn_ftrace *rec, unsignedlong addr)
{ unsignedintnew; unsignedlong ip = rec->ip;
new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
/* * For module, move the ip from the return address after the * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for * kernel, move after the instruction "move ra, at"(offset is 16)
*/
ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
/* * search the text until finding the non-store instruction or "s{d,w} * ra, offset(sp)" instruction
*/ do { /* get the code at "ip": code = *(unsigned int *)ip; */
safe_load_code(code, ip, faulted);
if (unlikely(faulted)) return 0; /* * If we hit the non-store instruction before finding where the * ra is stored, then this is a leaf function and it does not * store the ra on the stack
*/ if ((code & S_R_SP) != S_R_SP) return parent_ra_addr;
/* Move to the next instruction */
ip -= 4;
} while ((code & S_RA_SP) != S_RA_SP);
sp = fp + (code & OFFSET_MASK);
/* tmp = *(unsigned long *)sp; */
safe_load_stack(tmp, sp, faulted); if (unlikely(faulted)) return 0;
if (tmp == old_parent_ra) return sp; return 0;
}
#endif/* !KBUILD_MCOUNT_RA_ADDRESS */
/* * Hook the return address and push it in the stack of return addrs * in current thread info.
*/ void prepare_ftrace_return(unsignedlong *parent_ra_addr, unsignedlong self_ra, unsignedlong fp)
{ unsignedlong old_parent_ra; unsignedlong return_hooker = (unsignedlong)
&return_to_handler; int faulted, insns;
if (unlikely(ftrace_graph_is_dead())) return;
if (unlikely(atomic_read(¤t->tracing_graph_pause))) return;
/* * "parent_ra_addr" is the stack address where the return address of * the caller of _mcount is saved. * * If gcc < 4.5, a leaf function does not save the return address * in the stack address, so we "emulate" one in _mcount's stack space, * and hijack it directly. * For a non-leaf function, it does save the return address to its own * stack space, so we can not hijack it directly, but need to find the * real stack address, which is done by ftrace_get_parent_addr(). * * If gcc >= 4.5, with the new -mmcount-ra-address option, for a * non-leaf function, the location of the return address will be saved * to $12 for us. * For a leaf function, it just puts a zero into $12, so we handle * it in ftrace_graph_caller() of mcount.S.
*/
/* old_parent_ra = *parent_ra_addr; */
safe_load_stack(old_parent_ra, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; #ifndef KBUILD_MCOUNT_RA_ADDRESS
parent_ra_addr = (unsignedlong *)ftrace_get_parent_ra_addr(self_ra,
old_parent_ra, (unsignedlong)parent_ra_addr, fp); /* * If fails when getting the stack address of the non-leaf function's * ra, stop function graph tracer and return
*/ if (parent_ra_addr == NULL) goto out; #endif /* *parent_ra_addr = return_hooker; */
safe_store_stack(return_hooker, parent_ra_addr, faulted); if (unlikely(faulted)) goto out;
/* * Get the recorded ip of the current mcount calling site in the * __mcount_loc section, which will be used to filter the function * entries configured through the tracing/set_graph_function interface.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.