if (type == STACK_TYPE_ENTRY) { /* * On 64-bit, we have a generic entry stack that we * use for all the kernel entry points, including * SYSENTER.
*/ return"ENTRY_TRAMPOLINE";
}
if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) return exception_stack_names[type - STACK_TYPE_EXCEPTION];
return NULL;
}
/** * struct estack_pages - Page descriptor for exception stacks * @offs: Offset from the start of the exception stack area * @size: Size of the exception stack * @type: Type to store in the stack_info struct
*/ struct estack_pages {
u32 offs;
u16 size;
u16 type;
};
/* * Array of exception stack page descriptors. If the stack is larger than * PAGE_SIZE, all pages covering a particular stack will have the same * info. The guard pages including the not mapped DB2 stack are zeroed * out.
*/ staticconst struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
EPAGERANGE(DF),
EPAGERANGE(NMI),
EPAGERANGE(DB),
EPAGERANGE(MCE),
EPAGERANGE(VC),
EPAGERANGE(VC2),
};
begin = (unsignedlong)__this_cpu_read(cea_exception_stacks); /* * Handle the case where stack trace is collected _before_ * cea_exception_stacks had been initialized.
*/ if (!begin) returnfalse;
end = begin + sizeof(struct cea_exception_stacks); /* Bail if @stack is outside the exception stack area. */ if (stk < begin || stk >= end) returnfalse;
/* Calc page offset from start of exception stacks */
k = (stk - begin) >> PAGE_SHIFT; /* Lookup the page descriptor */
ep = &estack_pages[k]; /* Guard page? */ if (!ep->size) returnfalse;
begin += (unsignedlong)ep->offs;
end = begin + (unsignedlong)ep->size;
regs = (struct pt_regs *)end - 1;
/* * @end points directly to the top most stack entry to avoid a -8 * adjustment in the stack switch hotpath. Adjust it back before * calculating @begin.
*/
end++;
begin = end - (IRQ_STACK_SIZE / sizeof(long));
/* * Due to the switching logic RSP can never be == @end because the * final operation is 'popq %rsp' which means after that RSP points * to the original stack and not to @end.
*/ if (stack < begin || stack >= end) returnfalse;
/* * The next stack pointer is stored at the top of the irq stack * before switching to the irq stack. Actual stack entries are all * below that.
*/
info->next_sp = (unsignedlong *)*(end - 1);
if (!get_stack_info_noinstr(stack, task, info)) goto unknown;
/* * Make sure we don't iterate through any given stack more than once. * If it comes up a second time then there's something wrong going on: * just break out and report an unknown stack type.
*/ if (visit_mask) { if (*visit_mask & (1UL << info->type)) { if (task == current)
printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type); goto unknown;
}
*visit_mask |= 1UL << info->type;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.