/* * Address to jump to in the last phase of restore in order to get to the image * kernel's text (this value is passed in the image header).
*/ unsignedlong restore_jump_address __visible; unsignedlong jump_address_phys;
/* * Value of the cr3 register from before the hibernation (this value is passed * in the image header).
*/ unsignedlong restore_cr3 __visible; unsignedlong temp_pgt __visible; unsignedlong relocated_restore_code __visible;
/** * pfn_is_nosave - check if given pfn is in the 'nosave' section * @pfn: the page frame number to check.
*/ int pfn_is_nosave(unsignedlong pfn)
{ unsignedlong nosave_begin_pfn; unsignedlong nosave_end_pfn;
/** * arch_hibernation_header_save - populate the architecture specific part * of a hibernation image header * @addr: address where architecture specific header data will be saved. * @max_size: maximum size of architecture specific data in hibernation header. * * Return: 0 on success, -EOVERFLOW if max_size is insufficient.
*/ int arch_hibernation_header_save(void *addr, unsignedint max_size)
{ struct restore_data_record *rdr = addr;
/* * The restore code fixes up CR3 and CR4 in the following sequence: * * [in hibernation asm] * 1. CR3 <= temporary page tables * 2. CR4 <= mmu_cr4_features (from the kernel that restores us) * 3. CR3 <= rdr->cr3 * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel) * [in restore_processor_state()] * 5. CR4 <= saved CR4 * 6. CR3 <= saved CR3 * * Our mmu_cr4_features has CR4.PCIDE=0, and toggling * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so * rdr->cr3 needs to point to valid page tables but must not * have any of the PCID bits set.
*/
rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
/** * arch_hibernation_header_restore - read the architecture specific data * from the hibernation image header * @addr: address to read the data from
*/ int arch_hibernation_header_restore(void *addr)
{ struct restore_data_record *rdr = addr;
/* * We reached this while coming out of hibernation. This means * that SMT siblings are sleeping in hlt, as mwait is not safe * against control transition during resume (see comment in * hibernate_resume_nonboot_cpu_disable()). * * If the resumed kernel has SMT disabled, we have to take all the * SMT siblings out of hlt, and offline them again so that they * end up in mwait proper. * * Called with hotplug disabled.
*/
cpu_hotplug_enable();
ret = arch_cpu_rescan_dead_smt_siblings();
cpu_hotplug_disable();
return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.