// SPDX-License-Identifier: GPL-2.0-only /* * Suspend support specific for i386/x86-64. * * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
*/
while (msr < end) { if (msr->valid)
wrmsrq(msr->info.msr_no, msr->info.reg.q);
msr++;
}
}
/** * __save_processor_state() - Save CPU registers before creating a * hibernation image and before restoring * the memory state from it * @ctxt: Structure to store the registers contents in. * * NOTE: If there is a CPU register the modification of which by the * boot kernel (ie. the kernel used for loading the hibernation image) * might affect the operations of the restored target kernel (ie. the one * saved in the hibernation image), then its contents must be saved by this * function. In other words, if kernel A is hibernated and different * kernel B is used for loading the hibernation image into memory, the * kernel A's __save_processor_state() function must save all registers * needed by kernel A, so that it can operate correctly after the resume * regardless of what kernel B does in the meantime.
*/ staticvoid __save_processor_state(struct saved_context *ctxt)
{ #ifdef CONFIG_X86_32
mtrr_save_fixed_ranges(NULL); #endif
kernel_fpu_begin();
/* * descriptor tables
*/
store_idt(&ctxt->idt);
/* * We save it here, but restore it only in the hibernate case. * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit * mode in "secondary_startup_64". In 32-bit mode it is done via * 'pmode_gdt' in wakeup_start.
*/
ctxt->gdt_desc.size = GDT_SIZE - 1;
ctxt->gdt_desc.address = (unsignedlong)get_cpu_gdt_rw(smp_processor_id());
store_tr(ctxt->tr);
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ /* * segment registers
*/
savesegment(gs, ctxt->gs); #ifdef CONFIG_X86_64
savesegment(fs, ctxt->fs);
savesegment(ds, ctxt->ds);
savesegment(es, ctxt->es);
staticvoid fix_processor_context(void)
{ int cpu = smp_processor_id(); #ifdef CONFIG_X86_64 struct desc_struct *desc = get_cpu_gdt_rw(cpu);
tss_desc tss; #endif
/* * We need to reload TR, which requires that we change the * GDT entry to indicate "available" first. * * XXX: This could probably all be replaced by a call to * force_reload_TR().
*/
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
#ifdef CONFIG_X86_64
memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
syscall_init(); /* This sets MSR_*STAR and related */ #else if (boot_cpu_has(X86_FEATURE_SEP))
enable_sep_cpu(); #endif
load_TR_desc(); /* This does ltr */
load_mm_ldt(current->active_mm); /* This does lldt */
initialize_tlbstate_and_flush();
fpu__resume_cpu();
/* The processor is back on the direct GDT, load back the fixmap */
load_fixmap_gdt(cpu);
}
/** * __restore_processor_state() - Restore the contents of CPU registers saved * by __save_processor_state() * @ctxt: Structure to load the registers contents from. * * The asm code that gets us here will have restored a usable GDT, although * it will be pointing to the wrong alias.
*/ staticvoid notrace __restore_processor_state(struct saved_context *ctxt)
{ struct cpuinfo_x86 *c;
if (ctxt->misc_enable_saved)
wrmsrq(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); /* * control registers
*/ /* cr4 was introduced in the Pentium CPU */ #ifdef CONFIG_X86_32 if (ctxt->cr4)
__write_cr4(ctxt->cr4); #else /* CONFIG X86_64 */
wrmsrq(MSR_EFER, ctxt->efer);
__write_cr4(ctxt->cr4); #endif
write_cr3(ctxt->cr3);
write_cr2(ctxt->cr2);
write_cr0(ctxt->cr0);
/* Restore the IDT. */
load_idt(&ctxt->idt);
/* * Just in case the asm code got us here with the SS, DS, or ES * out of sync with the GDT, update them.
*/
loadsegment(ss, __KERNEL_DS);
loadsegment(ds, __USER_DS);
loadsegment(es, __USER_DS);
/* * Restore percpu access. Percpu access can happen in exception * handlers or in complicated helpers like load_gs_index().
*/ #ifdef CONFIG_X86_64
wrmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base);
/* * Reinitialize FRED to ensure the FRED MSRs contain the same values * as before hibernation. * * Note, the setup of FRED RSPs requires access to percpu data * structures. Therefore, FRED reinitialization can only occur after * the percpu access pointer (i.e., MSR_GS_BASE) is restored.
*/ if (ctxt->cr4 & X86_CR4_FRED) {
cpu_init_fred_exceptions();
cpu_init_fred_rsps();
} #else
loadsegment(fs, __KERNEL_PERCPU); #endif
/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
fix_processor_context();
/* * Now that we have descriptor tables fully restored and working * exception handling, restore the usermode segments.
*/ #ifdef CONFIG_X86_64
loadsegment(ds, ctxt->es);
loadsegment(es, ctxt->es);
loadsegment(fs, ctxt->fs);
load_gs_index(ctxt->gs);
/* * Restore FSBASE and GSBASE after restoring the selectors, since * restoring the selectors clobbers the bases. Keep in mind * that MSR_KERNEL_GS_BASE is horribly misnamed.
*/
wrmsrq(MSR_FS_BASE, ctxt->fs_base);
wrmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); #else
loadsegment(gs, ctxt->gs); #endif
c = &cpu_data(smp_processor_id()); if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
init_ia32_feat_ctl(c);
microcode_bsp_resume();
/* * This needs to happen after the microcode has been updated upon resume * because some of the MSRs are "emulated" in microcode.
*/
msr_restore_context(ctxt);
}
int hibernate_resume_nonboot_cpu_disable(void)
{ void (*play_dead)(void) = smp_ops.play_dead; int ret;
/* * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop * during hibernate image restoration, because it is likely that the * monitored address will be actually written to at that time and then * the "dead" CPU will attempt to execute instructions again, but the * address in its instruction pointer may not be possible to resolve * any more at that point (the page tables used by it previously may * have been overwritten by hibernate image data). * * First, make sure that we wake up all the potentially disabled SMT * threads which have been initially brought up and then put into * mwait/cpuidle sleep. * Those will be put to proper (not interfering with hibernation * resume) sleep afterwards, and the resumed kernel will decide itself * what to do with them.
*/
ret = cpuhp_smt_enable(); if (ret) return ret;
smp_ops.play_dead = resume_play_dead;
ret = freeze_secondary_cpus(0);
smp_ops.play_dead = play_dead; return ret;
} #endif
/* * When bsp_check() is called in hibernate and suspend, cpu hotplug * is disabled already. So it's unnecessary to handle race condition between * cpumask query and cpu hotplug.
*/ staticint bsp_check(void)
{ if (cpumask_first(cpu_online_mask) != 0) {
pr_warn("CPU0 is offline.\n"); return -ENODEV;
}
return 0;
}
staticint bsp_pm_callback(struct notifier_block *nb, unsignedlong action, void *ptr)
{ int ret = 0;
switch (action) { case PM_SUSPEND_PREPARE: case PM_HIBERNATION_PREPARE:
ret = bsp_check(); break; default: break;
} return notifier_from_errno(ret);
}
staticint __init bsp_pm_check_init(void)
{ /* * Set this bsp_pm_callback as lower priority than * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called * earlier to disable cpu hotplug before bsp online check.
*/
pm_notifier(bsp_pm_callback, -INT_MAX); return 0;
}
core_initcall(bsp_pm_check_init);
staticint msr_build_context(const u32 *msr_id, constint num)
{ struct saved_msrs *saved_msrs = &saved_context.saved_msrs; struct saved_msr *msr_array; int total_num; int i, j;
total_num = saved_msrs->num + num;
msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); if (!msr_array) {
pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n"); return -ENOMEM;
}
if (saved_msrs->array) { /* * Multiple callbacks can invoke this function, so copy any * MSR save requests from previous invocations.
*/
memcpy(msr_array, saved_msrs->array, sizeof(struct saved_msr) * saved_msrs->num);
kfree(saved_msrs->array);
}
for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
u64 dummy;
/* * The following sections are a quirk framework for problematic BIOSen: * Sometimes MSRs are modified by the BIOSen after suspended to * RAM, this might cause unexpected behavior after wakeup. * Thus we save/restore these specified MSRs across suspend/resume * in order to work around it. * * For any further problematic BIOSen/platforms, * please add your own function similar to msr_initialize_bdw.
*/ staticint msr_initialize_bdw(conststruct dmi_system_id *d)
{ /* Add any extra MSR ids into this array. */
u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.