/* * linux/arch/m68k/kernel/traps.c * * Copyright (C) 1993, 1994 by Hamish Macdonald * * 68040 fixes by Michael Rausch * 68040 fixes by Martin Apel * 68040 fixes and writeback by Richard Zidlicky * 68060 fixes by Roman Hodek * 68060 fixes by Jesper Skov * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details.
*/
staticinlineint do_040writeback1(unsignedshort wbs, unsignedlong wba, unsignedlong wbd)
{ int res = 0;
set_fc(wbs);
switch (wbs & WBSIZ_040) { case BA_SIZE_BYTE:
res = put_user(wbd & 0xff, (char __user *)wba); break; case BA_SIZE_WORD:
res = put_user(wbd & 0xffff, (short __user *)wba); break; case BA_SIZE_LONG:
res = put_user(wbd, (int __user *)wba); break;
}
set_fc(USER_DATA);
pr_debug("do_040writeback1, res=%d\n", res);
return res;
}
/* after an exception in a writeback the stack frame corresponding * to that exception is discarded, set a few bits in the old frame * to simulate what it should look like
*/ staticinlinevoid fix_xframe040(struct frame *fp, unsignedlong wba, unsignedshort wbs)
{
fp->un.fmt7.faddr = wba;
fp->un.fmt7.ssw = wbs & 0xff; if (wba != current->thread.faddr)
fp->un.fmt7.ssw |= MA_040;
}
staticinlinevoid do_040writebacks(struct frame *fp)
{ int res = 0; #if 0 if (fp->un.fmt7.wb1s & WBV_040)
pr_err("access_error040: cannot handle 1st writeback. oops.\n"); #endif
if ((fp->un.fmt7.wb2s & WBV_040) &&
!(fp->un.fmt7.wb2s & WBTT_040)) {
res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
fp->un.fmt7.wb2d); if (res)
fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s); else
fp->un.fmt7.wb2s = 0;
}
/* do the 2nd wb only if the first one was successful (except for a kernel wb) */ if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
fp->un.fmt7.wb3d); if (res)
{
fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
/* * called from sigreturn(), must ensure userspace code didn't * manipulate exception frame to circumvent protection, then complete * pending writebacks * we just clear TM2 to turn it into a userspace access
*/
asmlinkage void berr_040cleanup(struct frame *fp)
{
fp->un.fmt7.wb2s &= ~4;
fp->un.fmt7.wb3s &= ~4;
/* * The MMU status has to be determined AFTER the address * has been corrected if there was a misaligned access (MA).
*/ if (ssw & MA_040)
addr = (addr + 7) & -8;
/* MMU error, get the MMUSR info for this access */
mmusr = probe040(!(ssw & RW_040), addr, ssw);
pr_debug("mmusr = %lx\n", mmusr);
errorcode = 1; if (!(mmusr & MMU_R_040)) { /* clear the invalid atc entry */
__flush_tlb040_one(addr);
errorcode = 0;
}
/* despite what documentation seems to say, RMW
* accesses have always both the LK and RW bits set */ if (!(ssw & RW_040) || (ssw & LK_040))
errorcode |= 2;
if (do_page_fault(&fp->ptregs, addr, errorcode)) {
pr_debug("do_page_fault() !=0\n"); if (user_mode(&fp->ptregs)){ /* delay writebacks after signal delivery */
pr_debug(".. was usermode - return\n"); return;
} /* disable writeback into user space from kernel * (if do_page_fault didn't fix the mapping, * the writeback won't do good)
*/
disable_wb:
pr_debug(".. disabling wb2\n"); if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
fp->un.fmt7.wb2s &= ~WBV_040; if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
fp->un.fmt7.wb3s &= ~WBV_040;
}
} else { /* In case of a bus error we either kill the process or expect * the kernel to catch the fault, which then is also responsible * for cleaning up the mess.
*/
current->thread.signo = SIGBUS;
current->thread.faddr = fp->un.fmt7.faddr; if (send_fault_sig(&fp->ptregs) >= 0)
pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
fp->un.fmt7.faddr); goto disable_wb;
}
if (ssw & (FC | FB))
pr_debug("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF)
pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
str_read_write(ssw & RW),
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
/* * Check if this page should be demand-mapped. This needs to go before * the testing for a bad kernel-space access (demand-mapping applies * to kernel accesses too).
*/
if ((ssw & DF)
&& (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) { if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0)) return;
}
/* Check for kernel-space pagefault (BAD). */ if (fp->ptregs.sr & PS_S) { /* kernel fault must be a data fault to user space */ if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) { // try checking the kernel mappings before surrender if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1)) return; /* instruction fault or kernel data fault! */ if (ssw & (FC | FB))
pr_err("Instruction fault at %#010lx\n",
fp->ptregs.pc); if (ssw & DF) { /* was this fault incurred testing bus mappings? */ if((fp->ptregs.pc >= (unsignedlong)&_sun3_map_test_start) &&
(fp->ptregs.pc <= (unsignedlong)&_sun3_map_test_end)) {
send_fault_sig(&fp->ptregs); return;
}
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
str_read_write(ssw & RW),
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
}
pr_err("BAD KERNEL BUSERR\n");
die_if_kernel("Oops", &fp->ptregs,0);
force_sig(SIGKILL); return;
}
} else { /* user fault */ if (!(ssw & (FC | FB)) && !(ssw & DF)) /* not an instruction fault or data fault! BAD */
panic ("USER BUSERR w/o instruction or data fault");
}
/* First handle the data fault, if any. */ if (ssw & DF) {
addr = fp->un.fmtb.daddr;
// errorcode bit 0: 0 -> no page 1 -> protection fault // errorcode bit 1: 0 -> read fault 1 -> write fault
/* setup an ATC entry for the access about to be retried */ if (!(ssw & RW) || (ssw & RM)) asmvolatile ("ploadw %1,%0@" : /* no outputs */
: "a" (addr), "d" (ssw)); else asmvolatile ("ploadr %1,%0@" : /* no outputs */
: "a" (addr), "d" (ssw));
}
/* Now handle the instruction fault. */
if (!(ssw & (FC|FB))) return;
if (fp->ptregs.sr & PS_S) {
pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc);
buserr:
pr_err("BAD KERNEL BUSERR\n");
die_if_kernel("Oops",&fp->ptregs,0);
force_sig(SIGKILL); return;
}
/* get the fault address */ if (fp->ptregs.format == 10)
addr = fp->ptregs.pc + 4; else
addr = fp->un.fmtb.baddr; if (ssw & FC)
addr -= 2;
if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0) /* Insn fault on same page as data fault. But we
should still create the ATC entry. */ goto create_atc_entry;
#ifdef DEBUG asmvolatile ("ptestr #1,%2@,#7,%0\n\t" "pmove %%psr,%1"
: "=a&" (desc), "=m" (temp)
: "a" (addr));
pr_debug("mmusr is %#x for addr %#lx in task %p\n",
temp, addr, current);
pr_debug("descriptor address is 0x%p, contents %#lx\n",
__va(desc), *(unsignedlong *)__va(desc)); #else asmvolatile ("ptestr #1,%1@,#7\n\t" "pmove %%psr,%0"
: "=m" (temp) : "a" (addr)); #endif
mmusr = temp; if (mmusr & MMU_I)
do_page_fault (&fp->ptregs, addr, 0); elseif (mmusr & (MMU_B|MMU_L|MMU_S)) {
pr_err("invalid insn access at %#lx from pc %#lx\n",
addr, fp->ptregs.pc);
pr_debug("Unknown SIGSEGV - 2\n");
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV); return;
}
create_atc_entry: /* setup an ATC entry for the access about to be retried */ asmvolatile ("ploadr #2,%0@" : /* no outputs */
: "a" (addr));
} #endif/* CPU_M68020_OR_M68030 */ #endif/* !CONFIG_SUN3 */
asmlinkage void buserr_c(struct frame *fp)
{ /* Only set esp0 if coming from user mode */ if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsignedlong) fp;
pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#ifdefined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) if (CPU_IS_COLDFIRE) { unsignedint fs;
fs = (fp->ptregs.vector & 0x3) |
((fp->ptregs.vector & 0xc00) >> 8); switch (fs) { case 0x5: case 0x6: case 0x7: case 0x9: case 0xa: case 0xd: case 0xe: case 0xf:
access_errorcf(fs, fp); return; default: break;
}
} #endif/* CONFIG_COLDFIRE && CONFIG_MMU */
printk("%sCall Trace:", loglvl);
addr = (unsignedlong)stack + THREAD_SIZE - 1;
endstack = (unsignedlong *)(addr & -THREAD_SIZE);
i = 0; while (stack + 1 <= endstack) {
addr = *stack++; /* * If the address is either in the text segment of the * kernel, or in the region which contains vmalloc'ed * memory, it *may* be the address of a calling * routine; if so, print it so that someone tracing * down the cause of the crash will be able to figure * out the call path that was taken.
*/ if (__kernel_text_address(addr)) { #ifndef CONFIG_KALLSYMS if (i % 5 == 0)
pr_cont("\n "); #endif
pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr);
i++;
}
}
pr_cont("\n");
}
printk("%sStack from %08lx:", loglvl, (unsignedlong)stack);
p = stack; for (i = 0; i < kstack_depth_to_print; i++) { if (p + 1 > endstack) break; if (i % 8 == 0)
pr_cont("\n ");
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
show_trace(stack, loglvl);
}
/* * The vector number returned in the frame pointer may also contain * the "fs" (Fault Status) bits on ColdFire. These are in the bottom * 2 bits, and upper 2 bits. So we need to mask out the real vector * number before using it in comparisons. You don't need to do this on * real 68k parts, but it won't hurt either.
*/
if (ssw & RC)
pr_err("Pipe stage C instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2); if (ssw & RB)
pr_err("Pipe stage B instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 4 : fp->un.fmtb.baddr); if (ssw & DF)
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
str_read_write(ssw & RW),
fp->un.fmtb.daddr, space_names[ssw & DFC],
fp->ptregs.pc);
}
pr_err("Current process id is %d\n", task_pid_nr(current));
die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
}
asmlinkage void trap_c(struct frame *fp)
{ int sig, si_code; void __user *addr; int vector = (fp->ptregs.vector >> 2) & 0xff;
if (fp->ptregs.sr & PS_S) { if (vector == VEC_TRACE) { /* traced a trapping instruction on a 68020/30, * real exception will be executed afterwards.
*/ return;
} #ifdef CONFIG_MMU if (fixup_exception(&fp->ptregs)) return; #endif
bad_super_trap(fp); return;
}
/* send the appropriate signal to the user program */ switch (vector) { case VEC_ADDRERR:
si_code = BUS_ADRALN;
sig = SIGBUS; break; case VEC_ILLEGAL: case VEC_LINE10: case VEC_LINE11:
si_code = ILL_ILLOPC;
sig = SIGILL; break; case VEC_PRIV:
si_code = ILL_PRVOPC;
sig = SIGILL; break; case VEC_COPROC:
si_code = ILL_COPROC;
sig = SIGILL; break; case VEC_TRAP1: case VEC_TRAP2: case VEC_TRAP3: case VEC_TRAP4: case VEC_TRAP5: case VEC_TRAP6: case VEC_TRAP7: case VEC_TRAP8: case VEC_TRAP9: case VEC_TRAP10: case VEC_TRAP11: case VEC_TRAP12: case VEC_TRAP13: case VEC_TRAP14:
si_code = ILL_ILLTRP;
sig = SIGILL; break; case VEC_FPBRUC: case VEC_FPOE: case VEC_FPNAN:
si_code = FPE_FLTINV;
sig = SIGFPE; break; case VEC_FPIR:
si_code = FPE_FLTRES;
sig = SIGFPE; break; case VEC_FPDIVZ:
si_code = FPE_FLTDIV;
sig = SIGFPE; break; case VEC_FPUNDER:
si_code = FPE_FLTUND;
sig = SIGFPE; break; case VEC_FPOVER:
si_code = FPE_FLTOVF;
sig = SIGFPE; break; case VEC_ZERODIV:
si_code = FPE_INTDIV;
sig = SIGFPE; break; case VEC_CHK: case VEC_TRAP:
si_code = FPE_INTOVF;
sig = SIGFPE; break; case VEC_TRACE: /* ptrace single step */
si_code = TRAP_TRACE;
sig = SIGTRAP; break; case VEC_TRAP15: /* breakpoint */
si_code = TRAP_BRKPT;
sig = SIGTRAP; break; default:
si_code = ILL_ILLOPC;
sig = SIGILL; break;
} switch (fp->ptregs.format) { default:
addr = (void __user *) fp->ptregs.pc; break; case 2:
addr = (void __user *) fp->un.fmt2.iaddr; break; case 7:
addr = (void __user *) fp->un.fmt7.effaddr; break; case 9:
addr = (void __user *) fp->un.fmt9.iaddr; break; case 10:
addr = (void __user *) fp->un.fmta.daddr; break; case 11:
addr = (void __user*) fp->un.fmtb.daddr; break;
}
force_sig_fault(sig, si_code, addr);
}
void die_if_kernel (char *str, struct pt_regs *fp, int nr)
{ if (!(fp->sr & PS_S)) return;
/* * This function is called if an error occur while accessing * user-space from the fpsp040 code.
*/
asmlinkage void fpsp040_die(void)
{
force_exit_sig(SIGSEGV);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.