/* * Power off function, if any
*/ void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
/* * This is set if we need to go through the 'emergency' path. * When machine_emergency_restart() is called, we may be on * an inconsistent state and won't be able to do a clean cleanup
*/ staticint reboot_emergency;
/* This is set by the PCI code if either type 1 or type 2 PCI is detected */ bool port_cf9_safe = false;
/* * Reboot options and system auto-detection code provided by * Dell Inc. so their systems "just work". :-)
*/
/* * Some machines require the "reboot=a" commandline options
*/ staticint __init set_acpi_reboot(conststruct dmi_system_id *d)
{ if (reboot_type != BOOT_ACPI) {
reboot_type = BOOT_ACPI;
pr_info("%s series board detected. Selecting %s-method for reboots.\n",
d->ident, "ACPI");
} return 0;
}
/* * Some machines require the "reboot=b" or "reboot=k" commandline options, * this quirk makes that automatic.
*/ staticint __init set_bios_reboot(conststruct dmi_system_id *d)
{ if (reboot_type != BOOT_BIOS) {
reboot_type = BOOT_BIOS;
pr_info("%s series board detected. Selecting %s-method for reboots.\n",
d->ident, "BIOS");
} return 0;
}
/* * Some machines don't handle the default ACPI reboot method and * require the EFI reboot method:
*/ staticint __init set_efi_reboot(conststruct dmi_system_id *d)
{ if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
reboot_type = BOOT_EFI;
pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
} return 0;
}
/* * Write zero to CMOS register number 0x0f, which the BIOS POST * routine will recognize as telling it to do a proper reboot. (Well * that's what this book in front of me says -- it may only apply to * the Phoenix BIOS though, it's not clear). At the same time, * disable NMIs by setting the top bit in the CMOS address register, * as we're about to do peculiar things to the CPU. I'm not sure if * `outb_p' is needed instead of just `outb'. Use it to be on the * safe side. (Yes, CMOS_WRITE does outb_p's. - Paul G.)
*/
spin_lock(&rtc_lock);
CMOS_WRITE(0x00, 0x8f);
spin_unlock(&rtc_lock);
/* * Switch to the trampoline page table.
*/
load_trampoline_pgtable();
/* * Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
*/ staticint __init set_pci_reboot(conststruct dmi_system_id *d)
{ if (reboot_type != BOOT_CF9_FORCE) {
reboot_type = BOOT_CF9_FORCE;
pr_info("%s series board detected. Selecting %s-method for reboots.\n",
d->ident, "PCI");
} return 0;
}
staticint __init set_kbd_reboot(conststruct dmi_system_id *d)
{ if (reboot_type != BOOT_KBD) {
reboot_type = BOOT_KBD;
pr_info("%s series board detected. Selecting %s-method for reboot.\n",
d->ident, "KBD");
} return 0;
}
/* * This is a single dmi_table handling all reboot quirks.
*/ staticconststruct dmi_system_id reboot_dmi_table[] __initconst = {
/* Sony */
{ /* Handle problems with rebooting on Sony VGN-Z540N */
.callback = set_bios_reboot,
.ident = "Sony VGN-Z540N",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
},
},
{ }
};
staticint __init reboot_init(void)
{ int rv;
/* * Only do the DMI check if reboot_type hasn't been overridden * on the command line
*/ if (!reboot_default) return 0;
/* * The DMI quirks table takes precedence. If no quirks entry * matches and the ACPI Hardware Reduced bit is set and EFI * runtime services are enabled, force EFI reboot.
*/
rv = dmi_check_system(reboot_dmi_table);
if (!rv && efi_reboot_required() && !efi_runtime_disabled())
reboot_type = BOOT_EFI;
return 0;
}
core_initcall(reboot_init);
staticinlinevoid kb_wait(void)
{ int i;
for (i = 0; i < 0x10000; i++) { if ((inb(0x64) & 0x02) == 0) break;
udelay(2);
}
}
/* * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if * GIF=0, i.e. if the crash occurred between CLGI and STGI.
*/ void cpu_emergency_disable_virtualization(void)
{
cpu_emergency_virt_cb *callback;
/* * IRQs must be disabled as KVM enables virtualization in hardware via * function call IPIs, i.e. IRQs need to be disabled to guarantee * virtualization stays disabled.
*/
lockdep_assert_irqs_disabled();
rcu_read_lock();
callback = rcu_dereference(cpu_emergency_virt_callback); if (callback)
callback();
rcu_read_unlock();
}
/* * Disable virtualization on all CPUs before rebooting to avoid hanging * the system, as VMX and SVM block INIT when running in the host. * * We can't take any locks and we may be on an inconsistent state, so * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt. * * Do the NMI shootdown even if virtualization is off on _this_ CPU, as * other CPUs may have virtualization enabled.
*/ if (rcu_access_pointer(cpu_emergency_virt_callback)) { /* Safely force _this_ CPU out of VMX/SVM operation. */
cpu_emergency_disable_virtualization();
/* Disable VMX/SVM and halt on other CPUs. */
nmi_shootdown_cpus_on_restart();
}
} #else staticvoid emergency_reboot_disable_virtualization(void) { } #endif/* CONFIG_KVM_X86 */
/* * To the best of our knowledge Windows compatible x86 hardware expects * the following on reboot: * * 1) If the FADT has the ACPI reboot register flag set, try it * 2) If still alive, write to the keyboard controller * 3) If still alive, write to the ACPI reboot register again * 4) If still alive, write to the keyboard controller again * 5) If still alive, call the EFI runtime service to reboot * 6) If no EFI runtime service, call the BIOS to do a reboot * * We default to following the same pattern. We also have * two other reboot methods: 'triple fault' and 'PCI', which * can be triggered via the reboot= kernel boot option or * via quirks. * * This means that this function can never return, it can misbehave * by not rebooting properly and hanging.
*/ staticvoid native_machine_emergency_restart(void)
{ int i; int attempt = 0; int orig_reboot_type = reboot_type; unsignedshort mode;
if (reboot_emergency)
emergency_reboot_disable_virtualization();
tboot_shutdown(TB_SHUTDOWN_REBOOT);
/* Tell the BIOS if we want cold or warm reboot */
mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0;
*((unsignedshort *)__va(0x472)) = mode;
/* * If an EFI capsule has been registered with the firmware then * override the reboot= parameter.
*/ if (efi_capsule_pending(NULL)) {
pr_info("EFI capsule is pending, forcing EFI reboot.\n");
reboot_type = BOOT_EFI;
}
for (;;) { /* Could also try the reset bit in the Hammer NB */ switch (reboot_type) { case BOOT_ACPI:
acpi_reboot();
reboot_type = BOOT_KBD; break;
case BOOT_KBD:
mach_reboot_fixups(); /* For board specific fixups */
case BOOT_EFI:
efi_reboot(reboot_mode, NULL);
reboot_type = BOOT_BIOS; break;
case BOOT_BIOS:
machine_real_restart(MRR_BIOS);
/* We're probably dead after this, but... */
reboot_type = BOOT_CF9_SAFE; break;
case BOOT_CF9_FORCE:
port_cf9_safe = true;
fallthrough;
case BOOT_CF9_SAFE: if (port_cf9_safe) {
u8 reboot_code = reboot_mode == REBOOT_WARM ? 0x06 : 0x0E;
u8 cf9 = inb(0xcf9) & ~reboot_code;
outb(cf9|2, 0xcf9); /* Request hard reset */
udelay(50); /* Actually do the reset */
outb(cf9|reboot_code, 0xcf9);
udelay(50);
}
reboot_type = BOOT_TRIPLE; break;
case BOOT_TRIPLE:
idt_invalidate();
__asm__ __volatile__("int3");
/* We're probably dead after this, but... */
reboot_type = BOOT_KBD; break;
}
}
}
void native_machine_shutdown(void)
{ /* * Call enc_kexec_begin() while all CPUs are still active and * interrupts are enabled. This will allow all in-flight memory * conversions to finish cleanly.
*/ if (kexec_in_progress)
x86_platform.guest.enc_kexec_begin();
/* Stop the cpus and apics */ #ifdef CONFIG_X86_IO_APIC /* * Disabling IO APIC before local APIC is a workaround for * erratum AVR31 in "Intel Atom Processor C2000 Product Family * Specification Update". In this situation, interrupts that target * a Logical Processor whose Local APIC is either in the process of * being hardware disabled or software disabled are neither delivered * nor discarded. When this erratum occurs, the processor may hang. * * Even without the erratum, it still makes sense to quiet IO APIC * before disabling Local APIC.
*/
clear_IO_APIC(); #endif
#ifdef CONFIG_SMP /* * Stop all of the others. Also disable the local irq to * not receive the per-cpu timer interrupt which may trigger * scheduler's load balance.
*/
local_irq_disable();
stop_other_cpus(); #endif
if (!reboot_force)
machine_shutdown();
__machine_emergency_restart(0);
}
staticvoid native_machine_halt(void)
{ /* Stop other cpus and apics */
machine_shutdown();
tboot_shutdown(TB_SHUTDOWN_HALT);
stop_this_cpu(NULL);
}
staticvoid native_machine_power_off(void)
{ if (kernel_can_power_off()) { if (!reboot_force)
machine_shutdown();
do_kernel_power_off();
} /* A fallback in case there is no PM info available */
tboot_shutdown(TB_SHUTDOWN_HALT);
}
staticint crash_nmi_callback(unsignedint val, struct pt_regs *regs)
{ int cpu;
cpu = raw_smp_processor_id();
/* * Don't do anything if this handler is invoked on crashing cpu. * Otherwise, system will completely hang. Crashing cpu can get * an NMI if system was initially booted with nmi_watchdog parameter.
*/ if (cpu == crashing_cpu) return NMI_HANDLED;
local_irq_disable();
if (shootdown_callback)
shootdown_callback(cpu, regs);
/* * Prepare the CPU for reboot _after_ invoking the callback so that the * callback can safely use virtualization instructions, e.g. VMCLEAR.
*/
cpu_emergency_disable_virtualization();
atomic_dec(&waiting_for_crash_ipi);
if (smp_ops.stop_this_cpu) {
smp_ops.stop_this_cpu();
BUG();
}
/* Assume hlt works */
halt(); for (;;)
cpu_relax();
return NMI_HANDLED;
}
/** * nmi_shootdown_cpus - Stop other CPUs via NMI * @callback: Optional callback to be invoked from the NMI handler * * The NMI handler on the remote CPUs invokes @callback, if not * NULL, first and then disables virtualization to ensure that * INIT is recognized during reboot. * * nmi_shootdown_cpus() can only be invoked once. After the first * invocation all other CPUs are stuck in crash_nmi_callback() and * cannot respond to a second NMI.
*/ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{ unsignedlong msecs;
local_irq_disable();
/* * Avoid certain doom if a shootdown already occurred; re-registering * the NMI handler will cause list corruption, modifying the callback * will do who knows what, etc...
*/ if (WARN_ON_ONCE(crash_ipi_issued)) return;
/* Make a note of crashing cpu. Will be used in NMI callback. */
crashing_cpu = smp_processor_id();
/* * Set emergency handler to preempt other handlers.
*/
set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback);
apic_send_IPI_allbutself(NMI_VECTOR);
/* Kick CPUs looping in NMI context. */
WRITE_ONCE(crash_ipi_issued, 1);
msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
mdelay(1);
msecs--;
}
/* * Leave the nmi callback set, shootdown is a one-time thing. Clearing * the callback could result in a NULL pointer dereference if a CPU * (finally) responds after the timeout expires.
*/
}
staticinlinevoid nmi_shootdown_cpus_on_restart(void)
{ if (!crash_ipi_issued)
nmi_shootdown_cpus(NULL);
}
/* * Check if the crash dumping IPI got issued and if so, call its callback * directly. This function is used when we have already been in NMI handler. * It doesn't return.
*/ void run_crash_ipi_callback(struct pt_regs *regs)
{ if (crash_ipi_issued)
crash_nmi_callback(0, regs);
}
/* Override the weak function in kernel/panic.c */ void __noreturn nmi_panic_self_stop(struct pt_regs *regs)
{ while (1) { /* If no CPU is preparing crash dump, we simply loop here. */
run_crash_ipi_callback(regs);
cpu_relax();
}
}
#else/* !CONFIG_SMP */ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{ /* No other CPUs to shoot down */
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.