/* FPC[2] is Data Exception Code */ if ((fpc & 0x00000300) == 0) { /* bits 6 and 7 of DXC are 0 iff IEEE exception */ if (fpc & 0x8000) /* invalid fp operation */
si_code = FPE_FLTINV; elseif (fpc & 0x4000) /* div by 0 */
si_code = FPE_FLTDIV; elseif (fpc & 0x2000) /* overflow */
si_code = FPE_FLTOVF; elseif (fpc & 0x1000) /* underflow */
si_code = FPE_FLTUND; elseif (fpc & 0x0800) /* inexact */
si_code = FPE_FLTRES;
}
do_trap(regs, SIGFPE, si_code, "floating point exception");
}
staticvoid translation_specification_exception(struct pt_regs *regs)
{ /* May never happen. */
panic("Translation-Specification Exception");
}
staticvoid illegal_op(struct pt_regs *regs)
{ int is_uprobe_insn = 0;
u16 __user *location; int signal = 0;
u16 opcode;
location = get_trap_ip(regs); if (user_mode(regs)) { if (get_user(opcode, location)) return; if (opcode == S390_BREAKPOINT_U16) { if (current->ptrace)
force_sig_fault(SIGTRAP, TRAP_BRKPT, location); else
signal = SIGILL; #ifdef CONFIG_UPROBES
} elseif (opcode == UPROBE_SWBP_INSN) {
is_uprobe_insn = 1; #endif
} else {
signal = SIGILL;
}
} /* * This is either an illegal op in kernel mode, or user space trapped * on a uprobes illegal instruction. See if kprobes or uprobes picks * it up. If not, SIGILL.
*/ if (is_uprobe_insn || !user_mode(regs)) { if (notify_die(DIE_BPT, "bpt", regs, 0, 3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
} if (signal)
do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
}
NOKPROBE_SYMBOL(illegal_op);
staticvoid vector_exception(struct pt_regs *regs)
{ int si_code, vic;
/* get vector interrupt code from fpc */
save_user_fpu_regs();
vic = (current->thread.ufpu.fpc & 0xf00) >> 8; switch (vic) { case 1: /* invalid vector operation */
si_code = FPE_FLTINV; break; case 2: /* division by zero */
si_code = FPE_FLTDIV; break; case 3: /* overflow */
si_code = FPE_FLTOVF; break; case 4: /* underflow */
si_code = FPE_FLTUND; break; case 5: /* inexact */
si_code = FPE_FLTRES; break; default: /* unknown cause */
si_code = 0;
}
do_trap(regs, SIGFPE, si_code, "vector exception");
}
staticvoid space_switch_exception(struct pt_regs *regs)
{ /* Set user psw back to home space mode. */ if (user_mode(regs))
regs->psw.mask |= PSW_ASC_HOME; /* Send SIGILL. */
do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
}
staticvoid monitor_event_exception(struct pt_regs *regs)
{ if (user_mode(regs)) return; switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) { case BUG_TRAP_TYPE_NONE:
fixup_exception(regs); break; case BUG_TRAP_TYPE_WARN: break; case BUG_TRAP_TYPE_BUG:
die(regs, "monitor event"); break;
}
}
void kernel_stack_invalid(struct pt_regs *regs)
{ /* * Normally regs are unpoisoned by the generic entry code, but * kernel_stack_overflow() is a rare case that is called bypassing it.
*/
kmsan_unpoison_entry_regs(regs);
bust_spinlocks(1);
pr_emerg("Kernel stack pointer invalid\n");
show_regs(regs);
bust_spinlocks(0);
panic("Invalid kernel stack pointer, cannot continue");
}
NOKPROBE_SYMBOL(kernel_stack_invalid);
staticvoid __init test_monitor_call(void)
{ int val = 1;
if (!IS_ENABLED(CONFIG_BUG)) return;
asm_inline volatile( " mc 0,0\n" "0: lhi %[val],0\n" "1:\n"
EX_TABLE(0b, 1b)
: [val] "+d" (val)); if (!val)
panic("Monitor call doesn't work!\n");
}
teid.val = lc->trans_exc_code;
regs->int_code = lc->pgm_int_code;
regs->int_parm_long = teid.val; /* * In case of a guest fault, short-circuit the fault handler and return. * This way the sie64a() function will return 0; fault address and * other relevant bits are saved in current->thread.gmap_teid, and * the fault number in current->thread.gmap_int_code. KVM will be * able to use this information to handle the fault.
*/ if (test_pt_regs_flag(regs, PIF_GUEST_FAULT)) {
current->thread.gmap_teid.val = regs->int_parm_long;
current->thread.gmap_int_code = regs->int_code & 0xffff; return;
}
state = irqentry_enter(regs); if (user_mode(regs)) {
update_timer_sys(); if (!cpu_has_bear()) { if (regs->last_break < 4096)
regs->last_break = 1;
}
current->thread.last_break = regs->last_break;
} if (lc->pgm_code & 0x0200) { /* transaction abort */
current->thread.trap_tdb = lc->pgm_tdb;
} if (lc->pgm_code & PGM_INT_CODE_PER) { if (user_mode(regs)) { struct per_event *ev = ¤t->thread.per_event;
set_thread_flag(TIF_PER_TRAP);
ev->address = lc->per_address;
ev->cause = lc->per_code_combined;
ev->paid = lc->per_access_id;
} else { /* PER event in kernel is kprobes */
__arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
do_per_trap(regs); goto out;
}
} if (!irqs_disabled_flags(regs->psw.mask))
trace_hardirqs_on();
__arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
trapnr = regs->int_code & PGM_INT_CODE_MASK; if (trapnr)
pgm_check_table[trapnr](regs);
out:
local_irq_disable();
irqentry_exit(regs, state);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.