/* * Kernel unwind state * * @common: Common unwind state. * @task: The task being unwound. * @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding. * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance * associated with the most recently encountered replacement lr * value.
*/ struct kunwind_state { struct unwind_state common; struct task_struct *task; int graph_idx; #ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif enum kunwind_source source; union unwind_flags flags; struct pt_regs *regs;
};
/* * Start an unwind from a pt_regs. * * The unwind will begin at the PC within the regs. * * The regs must be on a stack currently owned by the calling task.
*/ static __always_inline void
kunwind_init_from_regs(struct kunwind_state *state, struct pt_regs *regs)
{
kunwind_init(state, current);
/* * Start an unwind from a caller. * * The unwind will begin at the caller of whichever function this is inlined * into. * * The function which invokes this must be noinline.
*/ static __always_inline void
kunwind_init_from_caller(struct kunwind_state *state)
{
kunwind_init(state, current);
/* * Start an unwind from a blocked task. * * The unwind will begin at the blocked tasks saved PC (i.e. the caller of * cpu_switch_to()). * * The caller should ensure the task is blocked in cpu_switch_to() for the * duration of the unwind, or the unwind will be bogus. It is never valid to * call this for the current task.
*/ static __always_inline void
kunwind_init_from_task(struct kunwind_state *state, struct task_struct *task)
{
kunwind_init(state, task);
/* * Unwind from one frame record (A) to the next frame record (B). * * We terminate early if the location of B indicates a malformed chain of frame * records (e.g. a cycle), determined based on the location and fp value of A * and the location (but not the fp value) of B.
*/ static __always_inline int
kunwind_next(struct kunwind_state *state)
{ int err;
state->flags.all = 0;
switch (state->source) { case KUNWIND_SOURCE_FRAME: case KUNWIND_SOURCE_CALLER: case KUNWIND_SOURCE_TASK: case KUNWIND_SOURCE_REGS_PC:
err = kunwind_next_frame_record(state); break; default:
err = -EINVAL;
}
static __always_inline bool
arch_reliable_kunwind_consume_entry(conststruct kunwind_state *state, void *cookie)
{ /* * At an exception boundary we can reliably consume the saved PC. We do * not know whether the LR was live when the exception was taken, and * so we cannot perform the next unwind step reliably. * * All that matters is whether the *entire* unwind is reliable, so give * up as soon as we hit an exception boundary.
*/ if (state->source == KUNWIND_SOURCE_REGS_PC) returnfalse;
/* * The struct defined for userspace stack frame in AARCH64 mode.
*/ struct frame_tail { struct frame_tail __user *fp; unsignedlong lr;
} __attribute__((packed));
/* * Get the return address for a single stackframe and return a pointer to the * next frame tail.
*/ staticstruct frame_tail __user *
unwind_user_frame(struct frame_tail __user *tail, void *cookie,
stack_trace_consume_fn consume_entry)
{ struct frame_tail buftail; unsignedlong err; unsignedlong lr;
/* Also check accessibility of one struct frame_tail beyond */ if (!access_ok(tail, sizeof(buftail))) return NULL;
/* * Frame pointers should strictly progress back up the stack * (towards higher addresses).
*/ if (tail >= buftail.fp) return NULL;
return buftail.fp;
}
#ifdef CONFIG_COMPAT /* * The registers we're interested in are at the end of the variable * length saved register structure. The fp points at the end of this * structure so the address of this struct is: * (struct compat_frame_tail *)(xxx->fp)-1 * * This code has been adapted from the ARM OProfile support.
*/ struct compat_frame_tail {
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
u32 sp;
u32 lr;
} __attribute__((packed));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.