if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
IS_ENABLED(CONFIG_CC_IS_GCC) &&
end > ALIGN(frame, THREAD_SIZE)) { /* * If we are walking past the end of the stack, it may be due * to the fact that we are on an IRQ or overflow stack. In this * case, we can load the address of the other stack from the * frame record.
*/
frame = ((unsignedlong *)frame)[-2] - 4;
end = frame + 4 + sizeof(struct pt_regs);
}
#ifndef CONFIG_KALLSYMS
printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
loglvl, where, from); #elifdefined CONFIG_BACKTRACE_VERBOSE
printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
loglvl, where, (void *)where, from, (void *)from); #else
printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from); #endif
if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
dump_mem(loglvl, "Exception stack", frame + 4, end);
}
for (reg = 10, x = 0, p = str; reg >= 0; reg--) { if (instruction & BIT(reg)) {
p += sprintf(p, " r%d:%08x", reg, *stack--); if (++x == 6) {
x = 0;
p = str;
printk("%s%s\n", loglvl, str);
}
}
} if (p != str)
printk("%s%s\n", loglvl, str);
}
#ifndef CONFIG_ARM_UNWIND /* * Stack pointers should always be within the kernels view of * physical memory. If it is not there, then we can't dump * out any information relating to the stack.
*/ staticint verify_stack(unsignedlong sp)
{ if (sp < PAGE_OFFSET ||
(!IS_ENABLED(CONFIG_VMAP_STACK) &&
sp > (unsignedlong)high_memory && high_memory != NULL)) return -EFAULT;
return 0;
} #endif
/* * Dump out the contents of some memory nicely...
*/ void dump_mem(constchar *lvl, constchar *str, unsignedlong bottom, unsignedlong top)
{ unsignedlong first; int i;
printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
for (first = bottom & ~31; first < top; first += 32) { unsignedlong p; char str[sizeof(" 12345678") * 8 + 1];
/* * Note that we now dump the code first, just in case the backtrace * kills us.
*/
for (i = -4; i < 1 + !!thumb; i++) { unsignedint val, bad;
if (thumb) {
u16 tmp;
if (user_mode(regs))
bad = get_user(tmp, &((u16 __user *)addr)[i]); else
bad = get_kernel_nofault(tmp, &((u16 *)addr)[i]);
val = __mem_to_opcode_thumb16(tmp);
} else { if (user_mode(regs))
bad = get_user(val, &((u32 __user *)addr)[i]); else
bad = get_kernel_nofault(val, &((u32 *)addr)[i]);
val = __mem_to_opcode_arm(val);
}
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
width, val); else {
p += sprintf(p, "bad PC value"); break;
}
}
printk("%sCode: %s\n", lvl, str);
}
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); if (ret == NOTIFY_STOP) return 1;
if (in_interrupt())
panic("Fatal exception in interrupt"); if (panic_on_oops)
panic("Fatal exception"); if (signr)
make_task_dead(signr);
}
/* * This function is protected against re-entrancy.
*/ void die(constchar *str, struct pt_regs *regs, int err)
{ enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; unsignedlong flags = oops_begin(); int sig = SIGSEGV;
if (!user_mode(regs))
bug_type = report_bug(regs->ARM_pc, regs); if (bug_type != BUG_TRAP_TYPE_NONE)
str = "Oops - BUG";
if (__die(str, err, regs))
sig = 0;
oops_end(flags, regs, sig);
}
void arm_notify_die(constchar *str, struct pt_regs *regs, int signo, int si_code, void __user *addr, unsignedlong err, unsignedlong trap)
{ if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
/* * Handle FIQ similarly to NMI on x86 systems. * * The runtime environment for NMIs is extremely restrictive * (NMIs can pre-empt critical sections meaning almost all locking is * forbidden) meaning this default FIQ handling must only be used in * circumstances where non-maskability improves robustness, such as * watchdog or debug logic. * * This handler is not appropriate for general purpose use in drivers * platform code and can be overrideen using set_fiq_handler.
*/
asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
{ struct pt_regs *old_regs = set_irq_regs(regs);
nmi_enter();
/* nop. FIQ handlers for special arch/arm features can be added here. */
nmi_exit();
set_irq_regs(old_regs);
}
/* * bad_mode handles the impossible case in the vectors. If you see one of * these, then it's extremely serious, and could mean you have buggy hardware. * It never returns, and never tries to sync. We hope that we can at least * dump out some state information...
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason)
{
console_verbose();
pr_crit("Bad mode in %s handler detected\n", handler[reason]);
die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
staticint bad_syscall(int n, struct pt_regs *regs)
{ if ((current->personality & PER_MASK) != PER_LINUX) {
send_sig(SIGSEGV, current, 1); return regs->ARM_r0;
}
#ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_SYSCALL) {
pr_err("[%d] %s: obsolete system call %08x.\n",
task_pid_nr(current), current->comm, n);
dump_instr(KERN_ERR, regs);
} #endif
arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
(void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4),
n, 0);
return regs->ARM_r0;
}
staticinlineint
__do_cache_op(unsignedlong start, unsignedlong end)
{ unsignedint ua_flags; int ret;
do { unsignedlong chunk = min(PAGE_SIZE, end - start);
if (fatal_signal_pending(current)) return 0;
ua_flags = uaccess_save_and_enable();
ret = flush_icache_user_range(start, start + chunk);
uaccess_restore(ua_flags); if (ret) return ret;
cond_resched();
start += chunk;
} while (start < end);
return 0;
}
staticinlineint
do_cache_op(unsignedlong start, unsignedlong end, int flags)
{ if (end < start || flags) return -EINVAL;
if (!access_ok((void __user *)start, end - start)) return -EFAULT;
return __do_cache_op(start, end);
}
/* * Handle all unrecognised system calls. * 0x9f0000 - 0x9fffff are some more esoteric system calls
*/ #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
{ if ((no >> 16) != (__ARM_NR_BASE>> 16)) return bad_syscall(no, regs);
switch (no & 0xffff) { case 0: /* branch through 0 */
arm_notify_die("branch through zero", regs,
SIGSEGV, SEGV_MAPERR, NULL, 0, 0); return 0;
/* * Flush a region from virtual address 'r0' to virtual address 'r1' * _exclusive_. There is no alignment requirement on either address; * user space does not need to know the hardware cache layout. * * r2 contains flags. It should ALWAYS be passed as ZERO until it * is defined to be something else. For now we ignore it, but may * the fires of hell burn in your belly if you break this rule. ;) * * (at a later date, we may want to allow this call to not flush * various aspects of the cache. Passing '0' will guarantee that * everything necessary gets flushed to maintain consistency in * the specified region).
*/ case NR(cacheflush): return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
case NR(usr26): if (!(elf_hwcap & HWCAP_26BIT)) break;
regs->ARM_cpsr &= ~MODE32_BIT; return regs->ARM_r0;
case NR(usr32): if (!(elf_hwcap & HWCAP_26BIT)) break;
regs->ARM_cpsr |= MODE32_BIT; return regs->ARM_r0;
case NR(set_tls):
set_tls(regs->ARM_r0); return 0;
case NR(get_tls): return current_thread_info()->tp_value[0];
default: /* Calls 9f00xx..9f07ff are defined to return -ENOSYS if not implemented, rather than raising SIGILL. This way the calling program can gracefully determine whether
a feature is supported. */ if ((no & 0xffff) <= 0x7ff) return -ENOSYS; break;
} #ifdef CONFIG_DEBUG_USER /* * experience shows that these seem to indicate that * something catastrophic has happened
*/ if (user_debug & UDBG_SYSCALL) {
pr_err("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
dump_instr(KERN_ERR, regs); if (user_mode(regs)) {
__show_regs(regs);
c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
}
} #endif
arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
(void __user *)instruction_pointer(regs) -
(thumb_mode(regs) ? 2 : 4),
no, 0); return 0;
}
#ifdef CONFIG_TLS_REG_EMUL
/* * We might be running on an ARMv6+ processor which should have the TLS * register but for some reason we can't use it, or maybe an SMP system * using a pre-ARMv6 processor (there are apparently a few prototypes like * that in existence) and therefore access to that register must be * emulated.
*/
/* * A data abort trap was taken, but we did not handle the instruction. * Try to abort the user program, or panic if it was the kernel.
*/
asmlinkage void
baddataabort(int code, unsignedlong instr, struct pt_regs *regs)
{ unsignedlong addr = instruction_pointer(regs);
#ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_BADABORT) {
pr_err("8<--- cut here ---\n");
pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
task_pid_nr(current), current->comm, code, instr);
dump_instr(KERN_ERR, regs);
show_pte(KERN_ERR, current->mm, addr);
} #endif
if (system_state >= SYSTEM_FREEING_INITMEM) {
pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
smp_processor_id()); return SPECTRE_VULNERABLE;
}
/* * Poison the vectors page with an undefined instruction. This * instruction is chosen to be undefined for both ARM and Thumb * ISAs. The Thumb version is an undefined instruction with a * branch back to the undefined instruction.
*/ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
((u32 *)vectors_base)[i] = 0xe7fddef1;
/* * Copy the vectors, stubs and kuser helpers (in entry-armv.S) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream.
*/
copy_from_lma(vectors_base, __vectors_start, __vectors_end);
copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
kuser_init(vectors_base);
flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
} #else/* ifndef CONFIG_CPU_V7M */ void __init early_trap_init(void *vectors_base)
{ /* * on V7-M there is no need to copy the vector table to a dedicated * memory area. The address is configurable and so a table in the kernel * image can be used.
*/
} #endif
#ifndef CONFIG_ARM_LPAE /* * Normally, we rely on the logic in do_translation_fault() to update stale PMD * entries covering the vmalloc space in a task's page tables when it first * accesses the region in question. Unfortunately, this is not sufficient when * the task stack resides in the vmalloc region, as do_translation_fault() is a * C function that needs a stack to run. * * So we need to ensure that these PMD entries are up to date *before* the MM * switch. As we already have some logic in the MM switch path that takes care * of this, let's trigger it by bumping the counter every time the core vmalloc * code modifies a PMD entry in the vmalloc region. Use release semantics on * the store so that other CPUs observing the counter's new value are * guaranteed to see the updated page table entries as well.
*/ void arch_sync_kernel_mappings(unsignedlong start, unsignedlong end)
{ if (start < VMALLOC_END && end > VMALLOC_START)
atomic_inc_return_release(&init_mm.context.vmalloc_seq);
} #endif #endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.26 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.