/* * Dynamic function tracing support. * * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> * Copyright (C) 2010 Rabin Vincent <rabin@rab.in> * * For licencing details, see COPYING. * * Defines low-level handling of mcount calls when the kernel * is compiled with the -pg flag. When using dynamic ftrace, the * mcount call-sites get patched with NOP till they are enabled. * All code mutation routines here are called under stop_machine().
*/
/* * The compiler emitted profiling hook consists of * * PUSH {LR} * BL __gnu_mcount_nc * * To turn this combined sequence into a NOP, we need to restore the value of * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not * modified anyway, and reloading LR from memory is highly likely to be less * efficient.
*/ #ifdef CONFIG_THUMB2_KERNEL #define NOP 0xf10d0d04 /* add.w sp, sp, #4 */ #else #define NOP 0xe28dd004 /* add sp, sp, #4 */ #endif
#ifdef CONFIG_DYNAMIC_FTRACE
staticint __ftrace_modify_code(void *data)
{ int *command = data;
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsignedlong addr)
{ unsignedlong aaddr = adjust_address(rec, addr); unsignedlong ip = rec->ip; unsignedlong old; unsignedlongnew; int ret;
#ifdef CONFIG_ARM_MODULE_PLTS /* mod is only supplied during module loading */ if (!mod)
mod = rec->arch.mod; else
rec->arch.mod = mod; #endif
old = ftrace_call_replace(ip, aaddr,
!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod); #ifdef CONFIG_ARM_MODULE_PLTS if (!old && mod) {
aaddr = get_module_plt(mod, ip, aaddr);
old = ftrace_call_replace(ip, aaddr, true);
} #endif
new = ftrace_nop_replace(rec); /* * Locations in .init.text may call __gnu_mcount_mc via a linker * emitted veneer if they are too far away from its implementation, and * so validation may fail spuriously in such cases. Let's work around * this by omitting those from validation.
*/
ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip));
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
err_out: return;
if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) { /* * Usually, the stack frames are contiguous in memory but cases * have been observed where the next stack frame does not live * at 'frame_pointer + 4' as this code used to assume. * * Instead, dereference the field in the stack frame that * stores the SP of the calling frame: to avoid unbounded * recursion, this cannot involve any ftrace instrumented * functions, so use the __get_kernel_nofault() primitive * directly.
*/
__get_kernel_nofault(&frame_pointer,
(unsignedlong *)(frame_pointer - 8), unsignedlong, err_out);
} else { struct stackframe frame = {
.fp = frame_pointer,
.sp = stack_pointer,
.lr = self_addr,
.pc = self_addr,
}; if (unwind_frame(&frame) < 0) return; if (frame.lr != self_addr)
parent = frame.lr_addr;
frame_pointer = frame.sp;
}
old = *parent;
*parent = return_hooker;
if (function_graph_enter(old, self_addr, frame_pointer, NULL))
*parent = old;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.