staticconstchar *humanize_exc_name(unsignedint ecode, unsignedint esubcode)
{ /* * LoongArch users and developers are probably more familiar with * those names found in the ISA manual, so we are going to print out * the latter. This will require some mapping.
*/ switch (ecode) { case EXCCODE_RSV: return"INT"; case EXCCODE_TLBL: return"PIL"; case EXCCODE_TLBS: return"PIS"; case EXCCODE_TLBI: return"PIF"; case EXCCODE_TLBM: return"PME"; case EXCCODE_TLBNR: return"PNR"; case EXCCODE_TLBNX: return"PNX"; case EXCCODE_TLBPE: return"PPI"; case EXCCODE_ADE: switch (esubcode) { case EXSUBCODE_ADEF: return"ADEF"; case EXSUBCODE_ADEM: return"ADEM";
} break; case EXCCODE_ALE: return"ALE"; case EXCCODE_BCE: return"BCE"; case EXCCODE_SYS: return"SYS"; case EXCCODE_BP: return"BRK"; case EXCCODE_INE: return"INE"; case EXCCODE_IPE: return"IPE"; case EXCCODE_FPDIS: return"FPD"; case EXCCODE_LSXDIS: return"SXD"; case EXCCODE_LASXDIS: return"ASXD"; case EXCCODE_FPE: switch (esubcode) { case EXCSUBCODE_FPE: return"FPE"; case EXCSUBCODE_VFPE: return"VFPE";
} break; case EXCCODE_WATCH: switch (esubcode) { case EXCSUBCODE_WPEF: return"WPEF"; case EXCSUBCODE_WPEM: return"WPEM";
} break; case EXCCODE_BTDIS: return"BTD"; case EXCCODE_BTE: return"BTE"; case EXCCODE_GSPR: return"GSPR"; case EXCCODE_HVC: return"HVC"; case EXCCODE_GCM: switch (esubcode) { case EXCSUBCODE_GCSC: return"GCSC"; case EXCSUBCODE_GCHC: return"GCHC";
} break; /* * The manual did not mention the EXCCODE_SE case, but print out it * nevertheless.
*/ case EXCCODE_SE: return"SE";
}
/* * Send SIGFPE according to FCSR Cause bits, which must have already * been masked against Enable bits. This is impotant as Inexact can * happen together with Overflow or Underflow, and `ptrace' can set * any bits.
*/ staticvoid force_fcsr_sig(unsignedlong fcsr, void __user *fault_addr, struct task_struct *tsk)
{ int si_code = FPE_FLTUNK;
/* * Did we catch a fault trying to load an instruction?
*/ if (regs->csr_badvaddr == regs->csr_era) goto sigbus; if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) goto sigbus; if (!unaligned_enabled) goto sigbus; if (!no_unaligned_warning)
show_registers(regs);
case BUG_TRAP_TYPE_WARN: /* Skip the BUG instruction and continue */
regs->csr_era += LOONGARCH_INSN_SIZE; break;
default: if (!fixup_exception(regs))
die("Oops - BUG", regs);
}
}
asmlinkage void noinstr do_bce(struct pt_regs *regs)
{ bool user = user_mode(regs); bool pie = regs_irqs_disabled(regs); unsignedlong era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX; union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
if (!pie)
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
die_if_kernel("Bounds check error in kernel code", regs);
/* * Pull out the address that failed bounds checking, and the lower / * upper bound, by minimally looking at the faulting instruction word * and reading from the correct register.
*/ if (__get_inst(&insn.word, (u32 *)era, user)) goto bad_era;
switch (insn.reg3_format.opcode) { case asrtle_op: if (insn.reg3_format.rd != 0) break; /* not asrtle */
badv = regs->regs[insn.reg3_format.rj];
upper = regs->regs[insn.reg3_format.rk]; break;
case asrtgt_op: if (insn.reg3_format.rd != 0) break; /* not asrtgt */
badv = regs->regs[insn.reg3_format.rj];
lower = regs->regs[insn.reg3_format.rk]; break;
case ldleb_op: case ldleh_op: case ldlew_op: case ldled_op: case stleb_op: case stleh_op: case stlew_op: case stled_op: case fldles_op: case fldled_op: case fstles_op: case fstled_op:
badv = regs->regs[insn.reg3_format.rj];
upper = regs->regs[insn.reg3_format.rk]; break;
case ldgtb_op: case ldgth_op: case ldgtw_op: case ldgtd_op: case stgtb_op: case stgth_op: case stgtw_op: case stgtd_op: case fldgts_op: case fldgtd_op: case fstgts_op: case fstgtd_op:
badv = regs->regs[insn.reg3_format.rj];
lower = regs->regs[insn.reg3_format.rk]; break;
}
bad_era: /* * Cannot pull out the instruction word, hence cannot provide more * info than a regular SIGSEGV in this case.
*/
force_sig(SIGSEGV); goto out;
}
asmlinkage void noinstr do_bp(struct pt_regs *regs)
{ bool user = user_mode(regs); bool pie = regs_irqs_disabled(regs); unsignedint opcode, bcode; unsignedlong era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
if (!pie)
local_irq_enable();
if (__get_inst(&opcode, (u32 *)era, user)) goto out_sigsegv;
bcode = (opcode & 0x7fff);
/* * notify the kprobe handlers, if instruction is likely to * pertain to them.
*/ switch (bcode) { case BRK_KDB: if (kgdb_breakpoint_handler(regs)) goto out; else break; case BRK_KPROBE_BP: if (kprobe_breakpoint_handler(regs)) goto out; else break; case BRK_KPROBE_SSTEPBP: if (kprobe_singlestep_handler(regs)) goto out; else break; case BRK_UPROBE_BP: if (uprobe_breakpoint_handler(regs)) goto out; else break; case BRK_UPROBE_XOLBP: if (uprobe_singlestep_handler(regs)) goto out; else break; default:
current->thread.trap_nr = read_csr_excode(); if (notify_die(DIE_TRAP, "Break", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) goto out; else break;
}
switch (bcode) { case BRK_BUG:
bug_handler(regs); break; case BRK_DIVZERO:
die_if_kernel("Break instruction in kernel code", regs);
force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era); break; case BRK_OVERFLOW:
die_if_kernel("Break instruction in kernel code", regs);
force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era); break; default:
die_if_kernel("Break instruction in kernel code", regs);
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era); break;
}
#ifndef CONFIG_HAVE_HW_BREAKPOINT
pr_warn("Hardware watch point handler not implemented!\n"); #else if (kgdb_breakpoint_handler(regs)) goto out;
if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) { int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1); unsignedlong pc = instruction_pointer(regs); union loongarch_instruction *ip = (union loongarch_instruction *)pc;
if (llbit) { /* * When the ll-sc combo is encountered, it is regarded as an single * instruction. So don't clear llbit and reset CSR.FWPS.Skip until * the llsc execution is completed.
*/
csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL); goto out;
}
if (pc == current->thread.single_step) { /* * Certain insns are occasionally not skipped when CSR.FWPS.Skip is * set, such as fld.d/fst.d. So singlestep needs to compare whether * the csr_era is equal to the value of singlestep which last time set.
*/ if (!is_self_loop_ins(ip, regs)) { /* * Check if the given instruction the target pc is equal to the * current pc, If yes, then we should not set the CSR.FWPS.SKIP * bit to break the original instruction stream.
*/
csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS); goto out;
}
}
} else {
breakpoint_handler(regs);
watchpoint_handler(regs);
}
staticvoid init_restore_fp(void)
{ if (!used_math()) { /* First time FP context user. */
init_fpu();
} else { /* This task has formerly used the FP context */ if (!is_fpu_owner())
own_fpu_inatomic(1);
}
BUG_ON(!is_fp_enabled());
}
staticvoid init_restore_lsx(void)
{
enable_lsx();
if (!thread_lsx_context_live()) { /* First time LSX context user */
init_restore_fp();
init_lsx_upper();
set_thread_flag(TIF_LSX_CTX_LIVE);
} else { if (!is_simd_owner()) { if (is_fpu_owner()) {
restore_lsx_upper(current);
} else {
__own_fpu();
restore_lsx(current);
}
}
}
staticvoid init_restore_lbt(void)
{ if (!thread_lbt_context_live()) { /* First time LBT context user */
init_lbt();
set_thread_flag(TIF_LBT_CTX_LIVE);
} else { if (!is_lbt_owner())
own_lbt_inatomic(1);
}
BUG_ON(!is_lbt_enabled());
}
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{ bool pie = regs_irqs_disabled(regs);
irqentry_state_t state = irqentry_enter(regs);
/* * BTD (Binary Translation Disable exception) can be triggered * during FP save/restore if TM (Top Mode) is on, which may * cause irq_enable during 'switch_to'. To avoid this situation * (including the user using 'MOVGR2GCSR' to turn on TM, which * will not trigger the BTE), we need to check PRMD first.
*/ if (!pie)
local_irq_enable();
if (!cpu_has_lbt) {
force_sig(SIGILL); goto out;
}
BUG_ON(is_lbt_enabled());
local_irq_enable(); /* * Game over - no way to handle this if it ever occurs. Most probably * caused by a fatal error after another hardware/software error.
*/
pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
read_csr_excode(), current->pid, current->comm);
die_if_kernel("do_reserved exception", regs);
force_sig(SIGUNUSED);
local_irq_disable();
irqentry_exit(regs, state);
}
asmlinkage void cache_parity_error(void)
{ /* For the moment, report the problem and hang. */
pr_err("Cache error exception:\n");
pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
panic("Can't handle the cache error!");
}
staticconstchar panic_null_cerr[] = "Trying to set NULL cache error exception handler\n";
/* * Install uncached CPU exception handler. * This is suitable only for the cache error exception which is the only * exception handler that is being run uncached.
*/ void set_merr_handler(unsignedlong offset, void *addr, unsignedlong size)
{ unsignedlong uncached_eentry = TO_UNCACHE(__pa(eentry));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.