if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
show_raw_backtrace(sp, loglvl, user); return;
}
printk("%sCall Trace:\n", loglvl); do {
print_ip_sym(loglvl, pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
pr_cont("\n");
}
/* * This routine abuses get_user()/put_user() to reference pointers * with at least a bit of error checking ...
*/ staticvoid show_stacktrace(struct task_struct *task, conststruct pt_regs *regs, constchar *loglvl, bool user)
{ constint field = 2 * sizeof(unsignedlong); unsignedlong stackdata; int i; unsignedlong *sp = (unsignedlong *)regs->regs[29];
printk("%sStack :", loglvl);
i = 0; while ((unsignedlong) sp & (PAGE_SIZE - 1)) { if (i && ((i % (64 / field)) == 0)) {
pr_cont("\n");
printk("%s ", loglvl);
} if (i > 39) {
pr_cont(" ..."); break;
}
/* Given an address, look for it in the exception tables. */ staticconststruct exception_table_entry *search_dbe_tables(unsignedlong addr)
{ conststruct exception_table_entry *e;
e = search_extable(__start___dbe_table,
__stop___dbe_table - __start___dbe_table, addr); if (!e)
e = search_module_dbetables(addr); return e;
}
asmlinkage void do_be(struct pt_regs *regs)
{ constint field = 2 * sizeof(unsignedlong); conststruct exception_table_entry *fixup = NULL; int data = regs->cp0_cause & 4; int action = MIPS_BE_FATAL; enum ctx_state prev_state;
prev_state = exception_enter(); /* XXX For now. Fixme, this searches the wrong table ... */ if (data && !user_mode(regs))
fixup = search_dbe_tables(exception_epc(regs));
if (fixup)
action = MIPS_BE_FIXUP;
if (board_be_handler)
action = board_be_handler(regs, fixup != NULL); else
mips_cm_error_report();
switch (action) { case MIPS_BE_DISCARD: goto out; case MIPS_BE_FIXUP: if (fixup) {
regs->cp0_epc = fixup->nextinsn; goto out;
} break; default: break;
}
/* * Assume it would be too dangerous to continue ...
*/
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]); if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
SIGBUS) == NOTIFY_STOP) goto out;
if (put_user(regs->regs[reg], vaddr)) return SIGSEGV;
regs->regs[reg] = 1;
return 0;
}
/* * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both * opcodes are supposed to result in coprocessor unusable exceptions if * executed on ll/sc-less processors. That's the theory. In practice a * few processors such as NEC's VR4100 throw reserved instruction exceptions * instead, so we're doing the emulation thing in both exception handlers.
*/ staticint simulate_llsc(struct pt_regs *regs, unsignedint opcode)
{ if ((opcode & OPCODE) == LL) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0); return simulate_ll(regs, opcode);
} if ((opcode & OPCODE) == SC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0); return simulate_sc(regs, opcode);
}
return -1; /* Must be something else ... */
}
/* * Simulate trapping 'rdhwr' instructions to provide user accessible * registers not implemented in hardware.
*/ staticint simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
{ struct thread_info *ti = task_thread_info(current);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0); switch (rd) { case MIPS_HWR_CPUNUM: /* CPU number */
regs->regs[rt] = smp_processor_id(); return 0; case MIPS_HWR_SYNCISTEP: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz); return 0; case MIPS_HWR_CC: /* Read count register */
regs->regs[rt] = read_c0_count(); return 0; case MIPS_HWR_CCRES: /* Count register resolution */ switch (current_cpu_type()) { case CPU_20KC: case CPU_25KF:
regs->regs[rt] = 1; break; default:
regs->regs[rt] = 2;
} return 0; case MIPS_HWR_ULR: /* Read UserLocal register */
regs->regs[rt] = ti->tp_value; return 0; default: return -1;
}
}
/* * Send SIGFPE according to FCSR Cause bits, which must have already * been masked against Enable bits. This is impotant as Inexact can * happen together with Overflow or Underflow, and `ptrace' can set * any bits.
*/ void force_fcr31_sig(unsignedlong fcr31, void __user *fault_addr, struct task_struct *tsk)
{ int si_code = FPE_FLTUNK;
/* If it's obviously not an FP instruction, skip it */ switch (inst.i_format.opcode) { case cop1_op: case cop1x_op: case lwc1_op: case ldc1_op: case swc1_op: case sdc1_op: break;
default: return -1;
}
/* * do_ri skipped over the instruction via compute_return_epc, undo * that for the FPU emulator.
*/
regs->cp0_epc = old_epc;
regs->regs[31] = old_ra;
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
/* * We can't allow the emulated instruction to leave any * enabled Cause bits set in $fcr31.
*/
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
/* Send a signal if required. */
process_fpemu_return(sig, fault_addr, fcr31);
return 0;
}
/* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
*/
asmlinkage void do_fpe(struct pt_regs *regs, unsignedlong fcr31)
{ enum ctx_state prev_state; void __user *fault_addr; int sig;
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) { /* * Unimplemented operation exception. If we've got the full * software emulator on-board, let's use it... * * Force FPU to dump state into task/thread context. We're * moving a lot of data here for what is probably a single * instruction, but the alternative is to pre-decode the FP * register operands before invoking the emulator, which seems * a bit extreme for what should be an infrequent event.
*/
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
/* * We can't allow the emulated instruction to leave any * enabled Cause bits set in $fcr31.
*/
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
} else {
sig = SIGFPE;
fault_addr = (void __user *) regs->cp0_epc;
}
/* Send a signal if required. */
process_fpemu_return(sig, fault_addr, fcr31);
out:
exception_exit(prev_state);
}
/* * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've * emulated more than some threshold number of instructions, force migration to * a "CPU" that has FP support.
*/ staticvoid mt_ase_fp_affinity(void)
{ #ifdef CONFIG_MIPS_MT_FPAFF if (mt_fpemul_threshold > 0 &&
((current->thread.emulated_fp++ > mt_fpemul_threshold))) { /* * If there's no FPU present, or if the application has already * restricted the allowed set to exclude any CPUs with FPUs, * we'll skip the procedure.
*/ if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
cpumask_t tmask;
if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
SIGTRAP) == NOTIFY_STOP) return;
/* * A short test says that IRIX 5.3 sends SIGTRAP for all trap * insns, even for trap and break codes that indicate arithmetic * failures. Weird ... * But should we continue the brokenness??? --macro
*/ switch (code) { case BRK_OVERFLOW: case BRK_DIVZERO:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
force_sig_fault(SIGFPE,
code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
(void __user *) regs->cp0_epc); break; case BRK_BUG:
die_if_kernel("Kernel bug detected", regs);
force_sig(SIGTRAP); break; case BRK_MEMU: /* * This breakpoint code is used by the FPU emulator to retake * control of the CPU after executing the instruction from the * delay slot of an emulated branch. * * Terminate if exception was recognized as a delay slot return * otherwise handle as normal.
*/ if (do_dsemulret(regs)) return;
/* * There is the ancient bug in the MIPS assemblers that the break * code starts left to bit 16 instead to bit 6 in the opcode. * Gas is bug-compatible, but not always, grrr... * We handle both cases with a simple heuristics. --macro
*/ if (bcode >= (1 << 10))
bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
/* * notify the kprobe handlers, if instruction is likely to * pertain to them.
*/ switch (bcode) { case BRK_UPROBE: if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) goto out; else break; case BRK_UPROBE_XOL: if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) goto out; else break; case BRK_KPROBE_BP: if (notify_die(DIE_BREAK, "debug", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) goto out; else break; case BRK_KPROBE_SSTEPBP: if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) goto out; else break; default: break;
}
staticint enable_restore_fp_context(int msa)
{ int err, was_fpu_owner, prior_msa; bool first_fp;
/* Initialize context if it hasn't been used already */
first_fp = init_fp_ctx(current);
if (first_fp) {
preempt_disable();
err = own_fpu_inatomic(1); if (msa && !err) {
enable_msa(); /* * with MSA enabled, userspace can see MSACSR * and MSA regs, but the values in them are from * other task before current task, restore them * from saved fp/msa context
*/
write_msa_csr(current->thread.fpu.msacsr); /* * own_fpu_inatomic(1) just restore low 64bit, * fix the high 64bit
*/
init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
preempt_enable(); return err;
}
/* * This task has formerly used the FP context. * * If this thread has no live MSA vector context then we can simply * restore the scalar FP context. If it has live MSA vector context * (that is, it has or may have used MSA since last performing a * function call) then we'll need to restore the vector context. This * applies even if we're currently only executing a scalar FP * instruction. This is because if we were to later execute an MSA * instruction then we'd either have to: * * - Restore the vector context & clobber any registers modified by * scalar FP instructions between now & then. * * or * * - Not restore the vector context & lose the most significant bits * of all vector registers. * * Neither of those options is acceptable. We cannot restore the least * significant bits of the registers now & only restore the most * significant bits later because the most significant bits of any * vector registers whose aliased FP register is modified now will have * been zeroed. We'd have no way to know that when restoring the vector * context & thus may load an outdated value for the most significant * bits of a vector register.
*/ if (!msa && !thread_msa_context_live()) return own_fpu(1);
/* * This task is using or has previously used MSA. Thus we require * that Status.FR == 1.
*/
preempt_disable();
was_fpu_owner = is_fpu_owner();
err = own_fpu_inatomic(0); if (err) goto out;
/* * If this is the first time that the task is using MSA and it has * previously used scalar FP in this time slice then we already nave * FP context which we shouldn't clobber. We do however need to clear * the upper 64b of each vector register so that this task has no * opportunity to see data left behind by another.
*/
prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); if (!prior_msa && was_fpu_owner) {
init_msa_upper();
goto out;
}
if (!prior_msa) { /* * Restore the least significant 64b of each vector register * from the existing scalar FP context.
*/
_restore_fp(current);
/* * The task has not formerly used MSA, so clear the upper 64b * of each vector register such that it cannot see data left * behind by another task.
*/
init_msa_upper();
} else { /* We need to restore the vector context. */
restore_msa(current);
/* Restore the scalar FP control & status register */ if (!was_fpu_owner)
write_32bit_cp1_register(CP1_STATUS,
current->thread.fpu.fcr31);
}
#ifdef CONFIG_MIPS_FP_SUPPORT case 3: /* * The COP3 opcode space and consequently the CP0.Status.CU3 * bit and the CP0.Cause.CE=3 encoding have been removed as * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs * up the space has been reused for COP1X instructions, that * are enabled by the CP0.Status.CU1 bit and consequently * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable * exceptions. Some FPU-less processors that implement one * of these ISAs however use this code erroneously for COP1X * instructions. Therefore we redirect this trap to the FP * emulator too.
*/ if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
force_sig(SIGILL); break;
}
fallthrough; case 1: { void __user *fault_addr; unsignedlong fcr31; int err, sig;
err = enable_restore_fp_context(0);
if (raw_cpu_has_fpu && !err) break;
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
&fault_addr);
/* * We can't allow the emulated instruction to leave * any enabled Cause bits set in $fcr31.
*/
fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */ if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
mt_ase_fp_affinity();
break;
} #else/* CONFIG_MIPS_FP_SUPPORT */ case 1: case 3:
force_sig(SIGILL); break; #endif/* CONFIG_MIPS_FP_SUPPORT */
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); break;
}
/* * Called with interrupts disabled.
*/
asmlinkage void do_watch(struct pt_regs *regs)
{ enum ctx_state prev_state;
prev_state = exception_enter(); /* * Clear WP (bit 22) bit of cause register so we don't loop * forever.
*/
clear_c0_cause(CAUSEF_WP);
/* * If the current thread has the watch registers loaded, save * their values and send SIGTRAP. Otherwise another thread * left the registers set, clear them and continue.
*/ if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
local_irq_enable();
force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
} else {
mips_clear_watch_registers();
local_irq_enable();
}
exception_exit(prev_state);
}
/* * Some chips may have other causes of machine check (e.g. SB1 * graduation timer)
*/
panic("Caught Machine Check exception - %scaused by multiple " "matching entries in the TLB.",
(multi_match) ? "" : "not ");
}
asmlinkage void do_mt(struct pt_regs *regs)
{ int subcode;
subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
>> VPECONTROL_EXCPT_SHIFT; switch (subcode) { case 0:
printk(KERN_DEBUG "Thread Underflow\n"); break; case 1:
printk(KERN_DEBUG "Thread Overflow\n"); break; case 2:
printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); break; case 3:
printk(KERN_DEBUG "Gating Storage Exception\n"); break; case 4:
printk(KERN_DEBUG "YIELD Scheduler Exception\n"); break; case 5:
printk(KERN_DEBUG "Gating Storage Scheduler Exception\n"); break; default:
printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
subcode); break;
}
die_if_kernel("MIPS MT Thread exception in kernel", regs);
asmlinkage void do_reserved(struct pt_regs *regs)
{ /* * Game over - no way to handle this if it ever occurs. Most probably * caused by a new unknown cpu type or after another deadly * hard/software error.
*/
show_regs(regs);
panic("Caught reserved exception %ld - should not happen.",
(regs->cp0_cause & 0x7f) >> 2);
}
/* * Some MIPS CPUs can enable/disable for cache parity detection, but do * it different ways.
*/ staticinline __init void parity_protection_init(void)
{ #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000
if (mips_cm_revision() >= CM_REV_CM3) {
ulong gcr_ectl, cp0_ectl;
/* * With CM3 systems we need to ensure that the L1 & L2 * parity enables are set to the same value, since this * is presumed by the hardware engineers. * * If the user disabled either of L1 or L2 ECC checking, * disable both.
*/
l1parity &= l2parity;
l2parity &= l1parity;
switch (current_cpu_type()) { case CPU_24K: case CPU_34K: case CPU_74K: case CPU_1004K: case CPU_1074K: case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: case CPU_QEMU_GENERIC: case CPU_P6600:
{ unsignedlong errctl; unsignedint l1parity_present, l2parity_present;
case CPU_5KC: case CPU_5KE: case CPU_LOONGSON32:
write_c0_errctl(0x80000000);
back_to_back_c0_hazard(); /* Set the PE bit (bit 31) in the c0_errctl register. */
pr_info("Cache parity protection %s\n",
str_enabled_disabled(read_c0_errctl() & 0x80000000)); break; case CPU_20KC: case CPU_25KF: /* Clear the DE bit (bit 16) in the c0_status register. */
printk(KERN_INFO "Enable cache parity protection for " "MIPS 20KC/25KF CPUs.\n");
clear_c0_status(ST0_DE); break; default: break;
}
}
/* For the moment, report the problem and hang. */
printk("Cache error exception:\n");
printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
printk("c0_cacheerr == %08x\n", reg_val);
switch (exccode) { case 0x08: /* Undocumented exception, will trigger on certain * also-undocumented instructions accessible from userspace. * Processor state is not otherwise corrupted, but currently * we don't know how to proceed. Maybe there is some * undocumented control flag to enable the instructions?
*/
force_sig(SIGILL); break;
default: /* None of the other exceptions, documented or not, have * further details given; none are encountered in the wild * either. Panic in case some of them turn out to be fatal.
*/
show_regs(regs);
panic("Unhandled Loongson exception - GSCause = %08x", diag1);
}
exception_exit(prev_state);
}
/* * SDBBP EJTAG debug exception handler. * We skip the instruction and return to the next instruction.
*/ void ejtag_exception_handler(struct pt_regs *regs)
{ constint field = 2 * sizeof(unsignedlong); unsignedlong depc, old_epc, old_ra; unsignedint debug;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
depc = read_c0_depc();
debug = read_c0_debug();
printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); if (debug & 0x80000000) { /* * In branch delay slot. * We cheat a little bit here and use EPC to calculate the * debug return address (DEPC). EPC is restored after the * calculation.
*/
old_epc = regs->cp0_epc;
old_ra = regs->regs[31];
regs->cp0_epc = depc;
compute_return_epc(regs);
depc = regs->cp0_epc;
regs->cp0_epc = old_epc;
regs->regs[31] = old_ra;
} else
depc += 4;
write_c0_depc(depc);
void reserve_exception_space(phys_addr_t addr, unsignedlong size)
{ /* * reserve exception space on CPUs other than CPU0 * is too late, since memblock is unavailable when APs * up
*/ if (smp_processor_id() == 0)
memblock_reserve(addr, size);
}
#ifdef CONFIG_CPU_MICROMIPS /* * Only the TLB handlers are cache aligned with an even * address. All other handlers are on an odd address and * require no modification. Otherwise, MIPS32 mode will * be entered when handling any TLB exceptions. That * would be bad...since we must stay in microMIPS mode.
*/ if (!(handler & 0x1))
handler |= 1; #endif
old_handler = xchg(&exception_handlers[n], handler);
b = (unsignedchar *)(ebase + 0x200 + n*VECTORSPACING);
if (cpu_has_veic) { if (board_bind_eic_interrupt)
board_bind_eic_interrupt(n, 0);
} elseif (cpu_has_vint) { /* SRSMap is only defined if shadow sets are implemented */ if (srssets > 1)
change_c0_srsmap(0xf << n*4, 0 << n*4);
}
if (handler_len > VECTORSPACING) { /* * Sigh... panicing won't help as the console * is probably not configured :(
*/
panic("VECTORSPACING too small");
}
/* configure STATUS register */ staticvoid configure_status(void)
{ /* * Disable coprocessors and select 32-bit or 64-bit addressing * and the 16/32 or 32/32 FPR register model. Reset the BEV * flag that some firmware may have left set and the TS bit (for * IP27). Set XX for ISA IV code to work.
*/ unsignedint status_set = ST0_KERNEL_CUMASK; #ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
status_set |= ST0_XX; if (cpu_has_dsp)
status_set |= ST0_MX;
if (!noulri && cpu_has_userlocal)
hwrena |= MIPS_HWRENA_ULR;
if (hwrena)
write_c0_hwrena(hwrena);
}
staticvoid configure_exception_vector(void)
{ if (cpu_has_mips_r2_r6) { unsignedlong sr = set_c0_status(ST0_BEV); /* If available, use WG to set top bits of EBASE */ if (cpu_has_ebase_wg) { #ifdef CONFIG_64BIT
write_c0_ebase_64(ebase | MIPS_EBASE_WG); #else
write_c0_ebase(ebase | MIPS_EBASE_WG); #endif
}
write_c0_ebase(ebase);
write_c0_status(sr);
} if (cpu_has_veic || cpu_has_vint) { /* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
} if (cpu_has_divec) { if (cpu_has_mipsmt) { unsignedint vpflags = dvpe();
set_c0_cause(CAUSEF_IV);
evpe(vpflags);
} else
set_c0_cause(CAUSEF_IV);
}
}
void per_cpu_trap_init(bool is_boot_cpu)
{ unsignedint cpu = smp_processor_id();
configure_status();
configure_hwrena();
configure_exception_vector();
/* * Before R2 both interrupt numbers were fixed to 7, so on R2 only: * * o read IntCtl.IPTI to determine the timer interrupt * o read IntCtl.IPPCI to determine the performance counter interrupt * o read IntCtl.IPFDC to determine the fast debug channel interrupt
*/ if (cpu_has_mips_r2_r6) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7; if (!cp0_fdc_irq)
cp0_fdc_irq = -1;
staticconstchar panic_null_cerr[] = "Trying to set NULL cache error exception handler\n";
/* * Install uncached CPU exception handler. * This is suitable only for the cache error exception which is the only * exception handler that is being run uncached.
*/ void set_uncached_handler(unsignedlong offset, void *addr, unsignedlong size)
{ unsignedlong uncached_ebase = CKSEG1ADDR_OR_64BIT(__pa(ebase));
ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size)); if (!ebase_pa)
panic("%s: Failed to allocate %lu bytes align=0x%x\n",
__func__, vec_size, 1 << fls(vec_size));
/* * Try to ensure ebase resides in KSeg0 if possible. * * It shouldn't generally be in XKPhys on MIPS64 to avoid * hitting a poorly defined exception base for Cache Errors. * The allocation is likely to be in the low 512MB of physical, * in which case we should be able to convert to KSeg0. * * EVA is special though as it allows segments to be rearranged * and to become uncached during cache error handling.
*/ if (!IS_ENABLED(CONFIG_EVA) && ebase_pa < 0x20000000)
ebase = CKSEG0ADDR(ebase_pa); else
ebase = (unsignedlong)phys_to_virt(ebase_pa); if (ebase_pa >= 0x20000000)
pr_warn("ebase(%pa) should better be in KSeg0",
&ebase_pa);
}
if (cpu_has_mmips) { unsignedint config3 = read_c0_config3();
if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
write_c0_config3(config3 | MIPS_CONF3_ISA_OE); else
write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
}
if (board_ebase_setup)
board_ebase_setup();
per_cpu_trap_init(true);
memblock_set_bottom_up(false);
/* * Copy the generic exception handlers to their final destination. * This will be overridden later as suitable for a particular * configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);
/* * Setup default vectors
*/ for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
/* * Copy the EJTAG debug exception vector handler code to its final * destination.
*/ if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup();
/* * Only some CPUs have the watch exceptions.
*/ if (cpu_has_watch)
set_except_vector(EXCCODE_WATCH, handle_watch);
/* * Initialise interrupt handlers
*/ if (cpu_has_veic || cpu_has_vint) { int nvec = cpu_has_veic ? 64 : 8; for (i = 0; i < nvec; i++)
set_vi_handler(i, NULL);
} elseif (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/* * Some CPUs can enable/disable for cache parity detection, but does * it different ways.
*/
parity_protection_init();
/* * The Data Bus Errors / Instruction Bus Errors are signaled * by external hardware. Therefore these two exceptions * may have board specific handlers.
*/ if (board_be_init)
board_be_init();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.