// SPDX-License-Identifier: GPL-2.0-only /* * handle transition of Linux booting another kernel * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
*/
#ifdef CONFIG_ACPI /* * Used while adding mapping for ACPI tables. * Can be reused when other iomem regions need be mapped
*/ struct init_pgtable_data { struct x86_mapping_info *info;
pgd_t *level4p;
};
ret = walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1,
&data, mem_region_callback); if (ret && ret != -EINVAL) return ret;
/* ACPI tables could be located in ACPI Non-volatile Storage region */
ret = walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1,
&data, mem_region_callback); if (ret && ret != -EINVAL) return ret;
image->arch.pgd = alloc_pgt_page(image); if (!image->arch.pgd) return -ENOMEM;
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
info.page_flag |= _PAGE_ENC;
info.kernpg_flag |= _PAGE_ENC;
}
if (direct_gbpages)
info.direct_gbpages = true;
for (i = 0; i < nr_pfn_mapped; i++) {
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT;
result = kernel_ident_mapping_init(&info, image->arch.pgd,
mstart, mend); if (result) return result;
}
/* * segments's mem ranges could be outside 0 ~ max_pfn, * for example when jump back to original kernel from kexeced kernel. * or first kernel is booted with user mem map, and second kernel * could be loaded out of that range.
*/ for (i = 0; i < image->nr_segments; i++) {
mstart = image->segment[i].mem;
mend = mstart + image->segment[i].memsz;
result = kernel_ident_mapping_init(&info, image->arch.pgd,
mstart, mend);
if (result) return result;
}
/* * Prepare EFI systab and ACPI tables for kexec kernel since they are * not covered by pfn_mapped.
*/
result = map_efi_systab(&info, image->arch.pgd); if (result) return result;
result = map_acpi_tables(&info, image->arch.pgd); if (result) return result;
result = map_mmio_serial(&info, image->arch.pgd); if (result) return result;
/* * This must be last because the intermediate page table pages it * allocates will not be control pages and may overlap the image.
*/ return init_transition_pgtable(image, image->arch.pgd, control_page);
}
/* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now.
*/ void __nocfi machine_kexec(struct kimage *image)
{ unsignedlong reloc_start = (unsignedlong)__relocate_kernel_start;
relocate_kernel_fn *relocate_kernel_ptr; unsignedint host_mem_enc_active; int save_ftrace_enabled; void *control_page;
/* * This must be done before load_segments() since if call depth tracking * is used then GS must be valid to make any function calls.
*/
host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
#ifdef CONFIG_KEXEC_JUMP if (image->preserve_context)
save_processor_state(); #endif
save_ftrace_enabled = __ftrace_enabled_save();
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
hw_breakpoint_disable();
cet_disable();
if (image->preserve_context) { #ifdef CONFIG_X86_IO_APIC /* * We need to put APICs in legacy mode so that we can * get timer interrupts in second kernel. kexec/kdump * paths already have calls to restore_boot_irq_mode() * in one form or other. kexec jump path also need one.
*/
clear_IO_APIC();
restore_boot_irq_mode(); #endif
}
/* * Allow for the possibility that relocate_kernel might not be at * the very start of the page.
*/
relocate_kernel_ptr = control_page + (unsignedlong)relocate_kernel - reloc_start;
/* * The segment registers are funny things, they have both a * visible and an invisible part. Whenever the visible part is * set to a specific selector, the invisible part is loaded * with from a table in memory. At no other time is the * descriptor table in memory accessed. * * Take advantage of this here by force loading the segments, * before the GDT is zapped with an invalid value.
*/
load_segments();
/* now call it */
image->start = relocate_kernel_ptr((unsignedlong)image->head,
virt_to_phys(control_page),
image->start,
image->preserve_context,
host_mem_enc_active);
#ifdef CONFIG_KEXEC_JUMP if (image->preserve_context)
restore_processor_state(); #endif
__ftrace_enabled_restore(save_ftrace_enabled);
}
/* arch-dependent functionality related to kexec file-based syscall */
#ifdef CONFIG_KEXEC_FILE /* * Apply purgatory relocations. * * @pi: Purgatory to be relocated. * @section: Section relocations applying to. * @relsec: Section containing RELAs. * @symtabsec: Corresponding symtab. * * TODO: Some of the code belongs to generic code. Move that in kexec.c.
*/ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section, const Elf_Shdr *relsec, const Elf_Shdr *symtabsec)
{ unsignedint i;
Elf64_Rela *rel;
Elf64_Sym *sym; void *location; unsignedlong address, sec_base, value; constchar *strtab, *name, *shstrtab; const Elf_Shdr *sechdrs;
pr_debug("Applying relocate section %s to %u\n",
shstrtab + relsec->sh_name, relsec->sh_info);
for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) {
/* * rel[i].r_offset contains byte offset from beginning * of section to the storage unit affected. * * This is location to update. This is temporary buffer * where section is currently loaded. This will finally be * loaded to a different address later, pointed to by * ->sh_addr. kexec takes care of moving it * (kexec_load_segment()).
*/
location = pi->purgatory_buf;
location += section->sh_offset;
location += rel[i].r_offset;
/* Final address of the location */
address = section->sh_addr + rel[i].r_offset;
/* * rel[i].r_info contains information about symbol table index * w.r.t which relocation must be made and type of relocation * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get * these respectively.
*/
sym = (void *)pi->ehdr + symtabsec->sh_offset;
sym += ELF64_R_SYM(rel[i].r_info);
if (sym->st_name)
name = strtab + sym->st_name; else
name = shstrtab + sechdrs[sym->st_shndx].sh_name;
/* * For physical range: [start, end]. We must skip the unassigned * crashk resource with zero-valued "end" member.
*/ if (!end || start > end) return 0;
/* Don't touch the control code page used in crash_kexec().*/
control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
kexec_mark_range(crashk_res.start, control - 1, protect);
control += KEXEC_CONTROL_PAGE_SIZE;
kexec_mark_range(control, crashk_res.end, protect);
}
/* make the memory storing dm crypt keys in/accessible */ staticvoid kexec_mark_dm_crypt_keys(bool protect)
{ unsignedlong start_paddr, end_paddr; unsignedint nr_pages;
/* * During a traditional boot under SME, SME will encrypt the kernel, * so the SME kexec kernel also needs to be un-encrypted in order to * replicate a normal SME boot. * * During a traditional boot under SEV, the kernel has already been * loaded encrypted, so the SEV kexec kernel needs to be encrypted in * order to replicate a normal SEV boot.
*/ int arch_kexec_post_alloc_pages(void *vaddr, unsignedint pages, gfp_t gfp)
{ if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return 0;
/* * If host memory encryption is active we need to be sure that kexec * pages are not encrypted because when we boot to the new kernel the * pages won't be accessed encrypted (initially).
*/ return set_memory_decrypted((unsignedlong)vaddr, pages);
}
void arch_kexec_pre_free_pages(void *vaddr, unsignedint pages)
{ if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return;
/* * If host memory encryption is active we need to reset the pages back * to being an encrypted mapping before freeing them.
*/
set_memory_encrypted((unsignedlong)vaddr, pages);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.