/* * This disables KASAN checking when reading a value from another task's stack, * since the other task could be running on another CPU and could have poisoned * the stack in the meantime.
*/ #define READ_ONCE_TASK_STACK(task, x) \
({ \ unsignedlong val; \ unsignedlong addr = x; \ if ((task) == current) \
val = READ_ONCE(addr); \ else \
val = READ_ONCE_NOCHECK(addr); \
val; \
})
if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc)))) break;
if (unlikely(!fp_is_valid(fp, sp))) break;
/* Unwind stack frame */
frame = (struct stackframe *)fp - 1;
sp = fp; if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) { /* We hit function where ra is not saved on the stack */
fp = frame->ra;
pc = regs->ra;
} else {
fp = READ_ONCE_TASK_STACK(task, frame->fp);
pc = READ_ONCE_TASK_STACK(task, frame->ra);
pc = ftrace_graph_ret_addr(current, &graph_idx, pc,
&frame->ra); if (pc >= (unsignedlong)handle_exception &&
pc < (unsignedlong)&ret_from_exception_end) { if (unlikely(!fn(arg, pc))) break;
/* * Get the return address for a single stackframe and return a pointer to the * next frame tail.
*/ staticunsignedlong unwind_user_frame(stack_trace_consume_fn consume_entry, void *cookie, unsignedlong fp, unsignedlong reg_ra)
{ struct stackframe buftail; unsignedlong ra = 0; unsignedlong __user *user_frame_tail =
(unsignedlong __user *)(fp - sizeof(struct stackframe));
/* Check accessibility of one struct frame_tail beyond */ if (!access_ok(user_frame_tail, sizeof(buftail))) return 0; if (__copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail))) return 0;
ra = reg_ra ? : buftail.ra;
fp = buftail.fp; if (!ra || !consume_entry(cookie, ra)) return 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.