/* * If we crash with IP==0, the last successfully executed instruction * was probably an indirect function call with a NULL function pointer, * and we don't have unwind information for NULL. * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function * pointer into its parent and then continue normally from there.
*/ staticstruct orc_entry orc_null_entry = {
.sp_reg = ORC_REG_SP,
.sp_offset = sizeof(long),
.fp_reg = ORC_REG_UNDEFINED,
.type = ORC_TYPE_CALL
};
/* * Do a binary range search to find the rightmost duplicate of a given * starting address. Some entries are section terminators which are * "weak" entries for ensuring there are no gaps. They should be * ignored when they conflict with a real entry.
*/ while (first <= last) {
mid = first + ((last - first) / 2);
if (orc_ip(mid) <= ip) {
found = mid;
first = mid + 1;
} else
last = mid - 1;
}
/* * Ftrace dynamic trampolines do not have orc entries of their own. * But they are copies of the ftrace entries that are static and * defined in ftrace_*.S, which do have orc entries. * * If the unwinder comes across a ftrace trampoline, then find the * ftrace function that was used to create it, and use that ftrace * function's orc entry, as the placement of the return code in * the stack will be identical.
*/ staticstruct orc_entry *orc_ftrace_find(unsignedlong ip)
{ struct ftrace_ops *ops; unsignedlong tramp_addr, offset;
ops = ftrace_ops_trampoline(ip); if (!ops) return NULL;
/* Set tramp_addr to the start of the code copied by the trampoline */ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
tramp_addr = (unsignedlong)ftrace_regs_caller; else
tramp_addr = (unsignedlong)ftrace_caller;
/* Now place tramp_addr to the location within the trampoline ip is at */
offset = ip - ops->trampoline;
tramp_addr += offset;
if (a_val > b_val) return 1; if (a_val < b_val) return -1;
/* * The "weak" section terminator entries need to always be first * to ensure the lookup code skips them in favor of real entries. * These terminator entries exist to handle any gaps created by * whitelisted .o files which didn't get objtool generation.
*/
orc_a = cur_orc_table + (a - cur_orc_ip_table);
/* * The 'cur_orc_*' globals allow the orc_sort_swap() callback to * associate an .orc_unwind_ip table entry with its corresponding * .orc_unwind entry so they can both be swapped.
*/
mutex_lock(&sort_mutex);
cur_orc_ip_table = orc_ip;
cur_orc_table = orc;
sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
mutex_unlock(&sort_mutex);
/* * Note, the orc_unwind and orc_unwind_ip tables were already * sorted at build time via the 'sorttable' tool. * It's ready for binary search straight away, no need to sort it.
*/
/* Initialize the fast lookup table: */
lookup_num_blocks = orc_lookup_end - orc_lookup; for (i = 0; i < lookup_num_blocks-1; i++) {
orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
num_entries, LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); if (!orc) {
orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); return;
}
/* Don't let modules unload while we're reading their ORC data. */
guard(rcu)();
if (is_entry_func(state->pc)) goto end;
orc = orc_find(state->pc); if (!orc) { /* * As a fallback, try to assume this code uses a frame pointer. * This is useful for generated code, like BPF, which ORC * doesn't know about. This is just a guess, so the rest of * the unwind is no longer considered reliable.
*/
orc = &orc_fp_entry;
state->error = true;
} else { if (orc->type == ORC_TYPE_UNDEFINED) goto err;
if (orc->type == ORC_TYPE_END_OF_STACK) goto end;
}
switch (orc->sp_reg) { case ORC_REG_SP: if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
orc->type = ORC_TYPE_REGS; else
state->sp = state->sp + orc->sp_offset; break; case ORC_REG_FP:
state->sp = state->fp; break; default:
orc_warn("unknown SP base reg %d at %pB\n", orc->sp_reg, (void *)state->pc); goto err;
}
switch (orc->fp_reg) { case ORC_REG_PREV_SP:
p = (unsignedlong *)(state->sp + orc->fp_offset); if (!stack_access_ok(state, (unsignedlong)p, sizeof(unsignedlong))) goto err;
state->fp = *p; break; case ORC_REG_UNDEFINED: /* Nothing. */ break; default:
orc_warn("unknown FP base reg %d at %pB\n", orc->fp_reg, (void *)state->pc); goto err;
}
switch (orc->type) { case ORC_TYPE_CALL: if (orc->ra_reg == ORC_REG_PREV_SP) {
p = (unsignedlong *)(state->sp + orc->ra_offset); if (!stack_access_ok(state, (unsignedlong)p, sizeof(unsignedlong))) goto err;
pc = unwind_graph_addr(state, *p, state->sp);
pc -= LOONGARCH_INSN_SIZE;
} elseif (orc->ra_reg == ORC_REG_UNDEFINED) { if (!state->ra || state->ra == state->pc) goto err;
pc = unwind_graph_addr(state, state->ra, state->sp);
pc -= LOONGARCH_INSN_SIZE;
state->ra = 0;
} else {
orc_warn("unknown ra base reg %d at %pB\n", orc->ra_reg, (void *)state->pc); goto err;
} break; case ORC_TYPE_REGS: if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
regs = (struct pt_regs *)info->next_sp; else
regs = (struct pt_regs *)state->sp;
if (!stack_access_ok(state, (unsignedlong)regs, sizeof(*regs))) goto err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.