// SPDX-License-Identifier: GPL-2.0-only /* * stacktrace.c : stacktracing APIs needed by rest of kernel * (wrappers over ARC dwarf based unwinder) * * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * vineetg: aug 2009 * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( ) * for displaying task's kernel mode call stack in /proc/<pid>/stack * -Iterator based approach to have single copy of unwinding core and APIs * needing unwinding, implement the logic in iterator regarding: * = which frame onwards to start capture * = which frame to stop capturing (wchan) * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc) * * vineetg: March 2009 * -Implemented correct versions of thread_saved_pc() and __get_wchan() * * rajeshwarr: 2008 * -Initial implementation
*/
frame_info->regs.r27 = fp;
frame_info->regs.r28 = sp;
frame_info->regs.r31 = blink;
frame_info->regs.r63 = ret;
frame_info->call_frame = 0;
} else { /* * Asynchronous unwinding of a likely sleeping task * - first ensure it is actually sleeping * - if so, it will be in __switch_to, kernel mode SP of task * is safe-kept and BLINK at a well known location in there
*/
/* In the prologue of __switch_to, first FP is saved on stack * and then SP is copied to FP. Dwarf assumes cfa as FP based * but we didn't save FP. The value retrieved above is FP's * state in previous frame. * As a work around for this, we unwind from __switch_to start * and adjust SP accordingly. The other limitation is that * __switch_to macro is dwarf rules are not generated for inline * assembly code
*/
frame_info->regs.r27 = 0;
frame_info->regs.r28 += 60;
frame_info->call_frame = 0;
if (seed_unwind_frame_info(tsk, regs, &frame_info)) return 0;
while (1) {
address = UNW_PC(&frame_info);
if (!address || !__kernel_text_address(address)) break;
if (consumer_fn(address, arg) == -1) break;
ret = arc_unwind(&frame_info); if (ret) break;
frame_info.regs.r63 = frame_info.regs.r31;
if (cnt++ > 128) {
printk("unwinder looping too long, aborting !\n"); return 0;
}
}
return address; /* return the last address it saw */ #else /* On ARC, only Dward based unwinder works. fp based backtracing is * not possible (-fno-omit-frame-pointer) because of the way function * prologue is setup (callee regs saved and then fp set and not other * way around
*/
pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); return 0;
#endif
}
/*------------------------------------------------------------------------- * callbacks called by unwinder iterator to implement kernel APIs * * The callback can return -1 to force the iterator to stop, which by default * keeps going till the bottom-most frame. *-------------------------------------------------------------------------
*/
/* Call-back which plugs into unwinding core to dump the stack in * case of panic/OOPs/BUG etc
*/ staticint __print_sym(unsignedint address, void *arg)
{ constchar *loglvl = arg;
/* Call-back which plugs into unwinding core to capture the * traces needed by kernel on /proc/<pid>/stack
*/ staticint __collect_all(unsignedint address, void *arg)
{ struct stack_trace *trace = arg;
if (trace->skip > 0)
trace->skip--; else
trace->entries[trace->nr_entries++] = address;
if (trace->nr_entries >= trace->max_entries) return -1;
if (trace->skip > 0)
trace->skip--; else
trace->entries[trace->nr_entries++] = address;
if (trace->nr_entries >= trace->max_entries) return -1;
return 0;
}
#endif
staticint __get_first_nonsched(unsignedint address, void *unused)
{ if (in_sched_functions(address)) return 0;
return -1;
}
/*------------------------------------------------------------------------- * APIs expected by various kernel sub-systems *-------------------------------------------------------------------------
*/
/* Another API expected by schedular, shows up in "ps" as Wait Channel * Of course just returning schedule( ) would be pointless so unwind until * the function is not in schedular code
*/ unsignedint __get_wchan(struct task_struct *tsk)
{ return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
}
#ifdef CONFIG_STACKTRACE
/* * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP. * A typical use is when /proc/<pid>/stack is queried by userland
*/ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{ /* Assumes @tsk is sleeping so unwinds from __switch_to */
arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
}
void save_stack_trace(struct stack_trace *trace)
{ /* Pass NULL for task so it unwinds the current call frame */
arc_unwind_core(NULL, NULL, __collect_all, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace); #endif
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.21Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.