/** * struct arch_hibernate_hdr_invariants - container to store kernel build version. * @uts_version: to save the build number and date so that we do not resume with * a different kernel.
*/ struct arch_hibernate_hdr_invariants { char uts_version[__NEW_UTS_LEN + 1];
};
/** * struct arch_hibernate_hdr - helper parameters that help us to restore the image. * @invariants: container to store kernel build version. * @hartid: to make sure same boot_cpu executes the hibernate/restore code. * @saved_satp: original page table used by the hibernated image. * @restore_cpu_addr: the kernel's image address to restore the CPU context.
*/ staticstruct arch_hibernate_hdr { struct arch_hibernate_hdr_invariants invariants; unsignedlong hartid; unsignedlong saved_satp; unsignedlong restore_cpu_addr;
} resume_hdr;
/* * Check if the given pfn is in the 'nosave' section.
*/ int pfn_is_nosave(unsignedlong pfn)
{ unsignedlong nosave_begin_pfn = sym_to_pfn(&__nosave_begin); unsignedlong nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
/* * Helper parameters need to be saved to the hibernation image header.
*/ int arch_hibernation_header_save(void *addr, unsignedint max_size)
{ struct arch_hibernate_hdr *hdr = addr;
/* * Retrieve the helper parameters from the hibernation image header.
*/ int arch_hibernation_header_restore(void *addr)
{ struct arch_hibernate_hdr_invariants invariants; struct arch_hibernate_hdr *hdr = addr; int ret = 0;
arch_hdr_invariants(&invariants);
if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
pr_crit("Hibernate image not generated by this kernel!\n"); return -EINVAL;
}
sleep_cpu = riscv_hartid_to_cpuid(hdr->hartid); if (sleep_cpu < 0) {
pr_crit("Hibernated on a CPU not known to this kernel!\n");
sleep_cpu = -EINVAL; return -EINVAL;
}
#ifdef CONFIG_SMP
ret = bringup_hibernate_cpu(sleep_cpu); if (ret) {
sleep_cpu = -EINVAL; return ret;
} #endif
resume_hdr = *hdr;
/* Make the page containing the relocated code executable. */
set_memory_x((unsignedlong)page, 1);
return (unsignedlong)page;
}
int swsusp_arch_resume(void)
{ unsignedlong end = (unsignedlong)pfn_to_virt(max_low_pfn); unsignedlong start = PAGE_OFFSET; int ret;
/* * Memory allocated by get_safe_page() will be dealt with by the hibernation core, * we don't need to free it here.
*/
resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!resume_pg_dir) return -ENOMEM;
/* * Create a temporary page table and map the whole linear region as executable and * writable.
*/
ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE | _PAGE_EXEC)); if (ret) return ret;
/* Move the restore code to a new page so that it doesn't get overwritten by itself. */
relocated_restore_code = relocate_restore_code(); if (relocated_restore_code == -ENOMEM) return -ENOMEM;
/* * Map the __hibernate_cpu_resume() address to the temporary page table so that the * restore code can jumps to it after finished restore the image. The next execution * code doesn't find itself in a different address space after switching over to the * original page table used by the hibernated image. * The __hibernate_cpu_resume() mapping is unnecessary for RV32 since the kernel and * linear addresses are identical, but different for RV64. To ensure consistency, we * map it for both RV32 and RV64 kernels. * Additionally, we should ensure that the page is writable before restoring the image.
*/
start = (unsignedlong)resume_hdr.restore_cpu_addr;
end = start + PAGE_SIZE;
ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE)); if (ret) return ret;
#ifdef CONFIG_PM_SLEEP_SMP int hibernate_resume_nonboot_cpu_disable(void)
{ if (sleep_cpu < 0) {
pr_err("Failing to resume from hibernate on an unknown CPU\n"); return -ENODEV;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.