#ifdef CONFIG_SMP /* * Should we dump all CPUs backtraces in an oops event? * Defaults to 0, can be changed via sysctl.
*/ staticunsignedint __read_mostly sysctl_oops_all_cpu_backtrace; #else #define sysctl_oops_all_cpu_backtrace 0 #endif/* CONFIG_SMP */
int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
unsignedlong panic_print;
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
EXPORT_SYMBOL(panic_notifier_list);
#ifdef CONFIG_SYSCTL
/* * Taint values can only be increased * This means we can safely use a temporary.
*/ staticint proc_taint(conststruct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)
{ struct ctl_table t; unsignedlong tmptaint = get_taint(); int err;
if (write && !capable(CAP_SYS_ADMIN)) return -EPERM;
t = *table;
t.data = &tmptaint;
err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err;
if (write) { int i;
/* * If we are relying on panic_on_taint not producing * false positives due to userspace input, bail out * before setting the requested taint flags.
*/ if (panic_on_taint_nousertaint && (tmptaint & panic_on_taint)) return -EINVAL;
/* * Poor man's atomic or. Not worth adding a primitive * to everyone's atomic.h for this
*/ for (i = 0; i < TAINT_FLAGS_COUNT; i++) if ((1UL << i) & tmptaint)
add_taint(i, LOCKDEP_STILL_OK);
}
return err;
}
staticint sysctl_panic_print_handler(conststruct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)
{
pr_info_once("Kernel: 'panic_print' sysctl interface will be obsoleted by both 'panic_sys_info' and 'panic_console_replay'\n"); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
/* The format is "panic_sys_info=tasks,mem,locks,ftrace,..." */ staticint __init setup_panic_sys_info(char *buf)
{ /* There is no risk of race in kernel boot phase */
panic_print = sys_info_parse_param(buf); return 1;
}
__setup("panic_sys_info=", setup_panic_sys_info);
/* Returns how long it waited in ms */ long (*panic_blink)(int state);
EXPORT_SYMBOL(panic_blink);
/* * Stop ourself in panic -- architecture code may override this
*/ void __weak __noreturn panic_smp_self_stop(void)
{ while (1)
cpu_relax();
}
/* * Stop ourselves in NMI context if another CPU has already panicked. Arch code * may override this to prepare for crash dumping, e.g. save regs info.
*/ void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs)
{
panic_smp_self_stop();
}
/* * Stop other CPUs in panic. Architecture dependent code may override this * with more suitable version. For example, if the architecture supports * crash dump, it should save registers of each stopped CPU and disable * per-CPU features such as virtualization extensions.
*/ void __weak crash_smp_send_stop(void)
{ staticint cpus_stopped;
/* * This function can be called twice in panic path, but obviously * we execute this only once.
*/ if (cpus_stopped) return;
/* * Note smp_send_stop is the usual smp shutdown function, which * unfortunately means it may not be hardened to work in a panic * situation.
*/
smp_send_stop();
cpus_stopped = 1;
}
/* * A variant of panic() called from NMI context. We return if we've already * panicked on this CPU. If another CPU already panicked, loop in * nmi_panic_self_stop() which can provide architecture dependent code such * as saving register state for crash dump.
*/ void nmi_panic(struct pt_regs *regs, constchar *msg)
{ int old_cpu, this_cpu;
if (panic_on_warn)
panic("%s: panic_on_warn set ...\n", origin);
limit = READ_ONCE(warn_limit); if (atomic_inc_return(&warn_count) >= limit && limit)
panic("%s: system warned too often (kernel.warn_limit is %d)",
origin, limit);
}
/* * Helper that triggers the NMI backtrace (if set in panic_print) * and then performs the secondary CPUs shutdown - we cannot have * the NMI backtrace after the CPUs are off!
*/ staticvoid panic_other_cpus_shutdown(bool crash_kexec)
{ if (panic_print & SYS_INFO_ALL_CPU_BT) { /* Temporary allow non-panic CPUs to write their backtraces. */
panic_triggering_all_cpu_backtrace = true;
trigger_all_cpu_backtrace();
panic_triggering_all_cpu_backtrace = false;
}
/* * Note that smp_send_stop() is the usual SMP shutdown function, * which unfortunately may not be hardened to work in a panic * situation. If we want to do crash dump after notifier calls * and kmsg_dump, we will need architecture dependent extra * bits in addition to stopping other CPUs, hence we rely on * crash_smp_send_stop() for that.
*/ if (!crash_kexec)
smp_send_stop(); else
crash_smp_send_stop();
}
/** * vpanic - halt the system * @fmt: The text string to print * @args: Arguments for the format string * * Display a message, then perform cleanups. This function never returns.
*/ void vpanic(constchar *fmt, va_list args)
{ staticchar buf[1024]; long i, i_next = 0, len; int state = 0; int old_cpu, this_cpu; bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
if (panic_on_warn) { /* * This thread may hit another WARN() in the panic path. * Resetting this prevents additional WARN() from panicking the * system on this thread. Other threads are blocked by the * panic_mutex in panic().
*/
panic_on_warn = 0;
}
/* * Disable local interrupts. This will prevent panic_smp_self_stop * from deadlocking the first cpu that invokes the panic, since * there is nothing to prevent an interrupt handler (that runs * after setting panic_cpu) from invoking panic() again.
*/
local_irq_disable();
preempt_disable_notrace();
/* * It's possible to come here directly from a panic-assertion and * not have preempt disabled. Some functions called from here want * preempt to be disabled. No point enabling it later though... * * Only one CPU is allowed to execute the panic code from here. For * multiple parallel invocations of panic, all other CPUs either * stop themself or will wait until they are stopped by the 1st CPU * with smp_send_stop(). * * cmpxchg success means this is the 1st CPU which comes here, * so go ahead. * `old_cpu == this_cpu' means we came from nmi_panic() which sets * panic_cpu to this CPU. In this case, this is also the 1st CPU.
*/
old_cpu = PANIC_CPU_INVALID;
this_cpu = raw_smp_processor_id();
/* atomic_try_cmpxchg updates old_cpu on failure */ if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) { /* go ahead */
} elseif (old_cpu != this_cpu)
panic_smp_self_stop();
console_verbose();
bust_spinlocks(1);
len = vscnprintf(buf, sizeof(buf), fmt, args);
pr_emerg("Kernel panic - not syncing: %s\n", buf); #ifdef CONFIG_DEBUG_BUGVERBOSE /* * Avoid nested stack-dumping if a panic occurs during oops processing
*/ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
dump_stack(); #endif
/* * If kgdb is enabled, give it a chance to run before we stop all * the other CPUs or else we won't be able to debug processes left * running on them.
*/
kgdb_panic(buf);
/* * If we have crashed and we have a crash kernel loaded let it handle * everything else. * If we want to run this after calling panic_notifiers, pass * the "crash_kexec_post_notifiers" option to the kernel. * * Bypass the panic_cpu check and call __crash_kexec directly.
*/ if (!_crash_kexec_post_notifiers)
__crash_kexec(NULL);
/* * Run any panic handlers, including those that might need to * add information to the kmsg dump output.
*/
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
sys_info(panic_print);
kmsg_dump_desc(KMSG_DUMP_PANIC, buf);
/* * If you doubt kdump always works fine in any situation, * "crash_kexec_post_notifiers" offers you a chance to run * panic_notifiers and dumping kmsg before kdump. * Note: since some panic_notifiers can make crashed kernel * more unstable, it can increase risks of the kdump failure too. * * Bypass the panic_cpu check and call __crash_kexec directly.
*/ if (_crash_kexec_post_notifiers)
__crash_kexec(NULL);
console_unblank();
/* * We may have ended up stopping the CPU holding the lock (in * smp_send_stop()) while still having some valuable data in the console * buffer. Try to acquire the lock then release it regardless of the * result. The release will also print the buffers out. Locks debug * should be disabled to avoid reporting bad unlock balance when * panic() is not being callled from OOPS.
*/
debug_locks_off();
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
if ((panic_print & SYS_INFO_PANIC_CONSOLE_REPLAY) ||
panic_console_replay)
console_flush_on_panic(CONSOLE_REPLAY_ALL);
if (!panic_blink)
panic_blink = no_blink;
if (panic_timeout > 0) { /* * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked.
*/
pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
touch_nmi_watchdog(); if (i >= i_next) {
i += panic_blink(state ^= 1);
i_next = i + 3600 / PANIC_BLINK_SPD;
}
mdelay(PANIC_TIMER_STEP);
}
} if (panic_timeout != 0) { /* * This will not be a clean reboot, with everything * shutting down. But if there is a chance of * rebooting the system it will be rebooted.
*/ if (panic_reboot_mode != REBOOT_UNDEFINED)
reboot_mode = panic_reboot_mode;
emergency_restart();
} #ifdef __sparc__
{ externint stop_a_enabled; /* Make sure the user can actually press Stop-A (L1-A) */
stop_a_enabled = 1;
pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" "twice on console to return to the boot prom\n");
} #endif #ifdefined(CONFIG_S390)
disabled_wait(); #endif
pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
/* Do not scroll important messages printed above */
suppress_printk = 1;
/* * The final messages may not have been printed if in a context that * defers printing (such as NMI) and irq_work is not available. * Explicitly flush the kernel log buffer one last time.
*/
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
nbcon_atomic_flush_unsafe();
local_irq_enable(); for (i = 0; ; i += PANIC_TIMER_STEP) {
touch_softlockup_watchdog(); if (i >= i_next) {
i += panic_blink(state ^= 1);
i_next = i + 3600 / PANIC_BLINK_SPD;
}
mdelay(PANIC_TIMER_STEP);
}
}
EXPORT_SYMBOL(vpanic);
/* Identical to vpanic(), except it takes variadic arguments instead of va_list */ void panic(constchar *fmt, ...)
{
va_list args;
/** * print_tainted - return a string to represent the kernel taint state. * * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst * * The string is overwritten by the next call to print_tainted(), * but is always NULL terminated.
*/ constchar *print_tainted(void)
{ return _print_tainted(false);
}
/** * print_tainted_verbose - A more verbose version of print_tainted()
*/ constchar *print_tainted_verbose(void)
{ return _print_tainted(true);
}
int test_taint(unsigned flag)
{ return test_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(test_taint);
/** * add_taint: add a taint flag if not already set. * @flag: one of the TAINT_* constants. * @lockdep_ok: whether lock debugging is still OK. * * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for * some notewortht-but-not-corrupting cases, it can be set to true.
*/ void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
{ if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
pr_warn("Disabling lock debugging due to kernel taint\n");
set_bit(flag, &tainted_mask);
if (tainted_mask & panic_on_taint) {
panic_on_taint = 0;
panic("panic_on_taint set ...");
}
}
EXPORT_SYMBOL(add_taint);
staticvoid spin_msec(int msecs)
{ int i;
for (i = 0; i < msecs; i++) {
touch_nmi_watchdog();
mdelay(1);
}
}
/* * It just happens that oops_enter() and oops_exit() are identically * implemented...
*/ staticvoid do_oops_enter_exit(void)
{ unsignedlong flags; staticint spin_counter;
if (!pause_on_oops) return;
spin_lock_irqsave(&pause_on_oops_lock, flags); if (pause_on_oops_flag == 0) { /* This CPU may now print the oops message */
pause_on_oops_flag = 1;
} else { /* We need to stall this CPU */ if (!spin_counter) { /* This CPU gets to do the counting */
spin_counter = pause_on_oops; do {
spin_unlock(&pause_on_oops_lock);
spin_msec(MSEC_PER_SEC);
spin_lock(&pause_on_oops_lock);
} while (--spin_counter);
pause_on_oops_flag = 0;
} else { /* This CPU waits for a different one */ while (spin_counter) {
spin_unlock(&pause_on_oops_lock);
spin_msec(1);
spin_lock(&pause_on_oops_lock);
}
}
}
spin_unlock_irqrestore(&pause_on_oops_lock, flags);
}
/* * Return true if the calling CPU is allowed to print oops-related info. * This is a bit racy..
*/ bool oops_may_print(void)
{ return pause_on_oops_flag == 0;
}
/* * Called when the architecture enters its oops handler, before it prints * anything. If this is the first CPU to oops, and it's oopsing the first * time then let it proceed. * * This is all enabled by the pause_on_oops kernel boot option. We do all * this to ensure that oopses don't scroll off the screen. It has the * side-effect of preventing later-oopsing CPUs from mucking up the display, * too. * * It turns out that the CPU which is allowed to print ends up pausing for * the right duration, whereas all the other CPUs pause for twice as long: * once in oops_enter(), once in oops_exit().
*/ void oops_enter(void)
{
nbcon_cpu_emergency_enter();
tracing_off(); /* can't trust the integrity of the kernel anymore: */
debug_locks_off();
do_oops_enter_exit();
if (sysctl_oops_all_cpu_backtrace)
trigger_all_cpu_backtrace();
}
staticvoid print_oops_end_marker(void)
{
pr_warn("---[ end trace %016llx ]---\n", 0ULL);
}
/* * Called when the architecture exits its oops handler, after printing * everything.
*/ void oops_exit(void)
{
do_oops_enter_exit();
print_oops_end_marker();
nbcon_cpu_emergency_exit();
kmsg_dump(KMSG_DUMP_OOPS);
}
static __init int register_warn_debugfs(void)
{ /* Don't care about failure */
debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL,
&clear_warn_once_fops); return 0;
}
device_initcall(register_warn_debugfs); #endif
#ifdef CONFIG_STACKPROTECTOR
/* * Called when gcc's -fstack-protector feature is used, and * gcc detects corruption of the on-stack canary value
*/
__visible noinstr void __stack_chk_fail(void)
{ unsignedlong flags;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.