/** * machine_kexec_prepare - Prepare for a kexec reboot. * * Called from the core kexec code when a kernel image is loaded. * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus * are stuck in the kernel. This avoids a panic once we hit machine_kexec().
*/ int machine_kexec_prepare(struct kimage *kimage)
{ if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
pr_err("Can't kexec: CPUs are stuck in the kernel.\n"); return -EBUSY;
}
return 0;
}
/** * kexec_segment_flush - Helper to flush the kimage segments to PoC.
*/ staticvoid kexec_segment_flush(conststruct kimage *kimage)
{ unsignedlong i;
pr_debug("%s:\n", __func__);
for (i = 0; i < kimage->nr_segments; i++) {
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
i,
kimage->segment[i].mem,
kimage->segment[i].mem + kimage->segment[i].memsz,
kimage->segment[i].memsz,
kimage->segment[i].memsz / PAGE_SIZE);
int machine_kexec_post_load(struct kimage *kimage)
{ int rc;
pgd_t *trans_pgd; void *reloc_code = page_to_virt(kimage->control_code_page); long reloc_size; struct trans_pgd_info info = {
.trans_alloc_page = kexec_page_alloc,
.trans_alloc_arg = kimage,
};
/* If in place, relocation is not used, only flush next kernel */ if (kimage->head & IND_DONE) {
kexec_segment_flush(kimage);
kexec_image_info(kimage); return 0;
}
kimage->arch.el2_vectors = 0; if (is_hyp_nvhe()) {
rc = trans_pgd_copy_el2_vectors(&info,
&kimage->arch.el2_vectors); if (rc) return rc;
}
/* Create a copy of the linear map */
trans_pgd = kexec_page_alloc(kimage); if (!trans_pgd) return -ENOMEM;
rc = trans_pgd_create_copy(&info, &trans_pgd, PAGE_OFFSET, PAGE_END); if (rc) return rc;
kimage->arch.ttbr1 = __pa(trans_pgd);
kimage->arch.zero_page = __pa_symbol(empty_zero_page);
/* Flush the reloc_code in preparation for its execution. */
dcache_clean_inval_poc((unsignedlong)reloc_code,
(unsignedlong)reloc_code + reloc_size);
icache_inval_pou((uintptr_t)reloc_code,
(uintptr_t)reloc_code + reloc_size);
kexec_image_info(kimage);
return 0;
}
/** * machine_kexec - Do the kexec reboot. * * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
*/ void machine_kexec(struct kimage *kimage)
{ bool in_kexec_crash = (kimage == kexec_crash_image); bool stuck_cpus = cpus_are_stuck_in_kernel();
/* * New cpus may have become stuck_in_kernel after we loaded the image.
*/
BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1)));
WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()), "Some CPUs may be stale, kdump will be unreliable.\n");
pr_info("Bye!\n");
local_daif_mask();
/* * Both restart and kernel_reloc will shutdown the MMU, disable data * caches. However, restart will start new kernel or purgatory directly, * kernel_reloc contains the body of arm64_relocate_new_kernel * In kexec case, kimage->start points to purgatory assuming that * kernel entry and dtb address are embedded in purgatory by * userspace (kexec-tools). * In kexec_file case, the kernel starts directly without purgatory.
*/ if (kimage->head & IND_DONE) {
typeof(cpu_soft_restart) *restart;
/* for crashing cpu */
crash_save_cpu(regs, smp_processor_id());
machine_kexec_mask_interrupts();
pr_info("Starting crashdump kernel...\n");
}
#ifdefined(CONFIG_CRASH_DUMP) && defined(CONFIG_HIBERNATION) /* * To preserve the crash dump kernel image, the relevant memory segments * should be mapped again around the hibernation.
*/ void crash_prepare_suspend(void)
{ if (kexec_crash_image)
arch_kexec_unprotect_crashkres();
}
void crash_post_resume(void)
{ if (kexec_crash_image)
arch_kexec_protect_crashkres();
}
/* * crash_is_nosave * * Return true only if a page is part of reserved memory for crash dump kernel, * but does not hold any data of loaded kernel image. * * Note that all the pages in crash dump kernel memory have been initially * marked as Reserved as memory was allocated via memblock_reserve(). * * In hibernation, the pages which are Reserved and yet "nosave" are excluded * from the hibernation iamge. crash_is_nosave() does thich check for crash * dump kernel and will reduce the total size of hibernation image.
*/
bool crash_is_nosave(unsignedlong pfn)
{ int i;
phys_addr_t addr;
if (!crashk_res.end) returnfalse;
/* in reserved memory? */
addr = __pfn_to_phys(pfn); if ((addr < crashk_res.start) || (crashk_res.end < addr)) { if (!crashk_low_res.end) returnfalse;
if ((addr < crashk_low_res.start) || (crashk_low_res.end < addr)) returnfalse;
}
if (!kexec_crash_image) returntrue;
/* not part of loaded kernel image? */ for (i = 0; i < kexec_crash_image->nr_segments; i++) if (addr >= kexec_crash_image->segment[i].mem &&
addr < (kexec_crash_image->segment[i].mem +
kexec_crash_image->segment[i].memsz)) returnfalse;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.