// SPDX-License-Identifier: GPL-2.0 /* * x86_64 specific EFI support functions * Based on Extensible Firmware Interface Specification version 1.0 * * Copyright (C) 2005-2008 Intel Co. * Fenghua Yu <fenghua.yu@intel.com> * Bibo Mao <bibo.mao@intel.com> * Chandramouli Narayanan <mouli@linux.intel.com> * Huang Ying <ying.huang@intel.com> * * Code to convert EFI to E820 map has been implemented in elilo bootloader * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table * is setup appropriately for EFI runtime code. * - mouli 06/14/2007. *
*/
/* * We allocate runtime services regions top-down, starting from -4G, i.e. * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
*/ static u64 efi_va = EFI_VA_START; staticstruct mm_struct *efi_prev_mm;
/* * We need our own copy of the higher levels of the page tables * because we want to avoid inserting EFI region mappings (EFI_VA_END * to EFI_VA_START) into the standard kernel page tables. Everything * else can be shared, see efi_sync_low_kernel_mappings(). * * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the * allocation.
*/ int __init efi_alloc_page_tables(void)
{
pgd_t *pgd, *efi_pgd;
p4d_t *p4d;
pud_t *pud;
gfp_t gfp_mask;
/* * We share all the PUD entries apart from those that map the * EFI regions. Copy around them.
*/
BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
/* * It can happen that the physical address of new_memmap lands in memory * which is not mapped in the EFI page table. Therefore we need to go * and ident-map those pages containing the map before calling * phys_efi_set_virtual_address_map().
*/
pfn = pa_memmap >> PAGE_SHIFT;
pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC; if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap); return 1;
}
/* * Certain firmware versions are way too sentimental and still believe * they are exclusive and unquestionable owners of the first physical page, * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY * (but then write-access it later during SetVirtualAddressMap()). * * Create a 1:1 mapping for this page, to avoid triple faults during early * boot with such firmware. We are free to hand this page to the BIOS, * as trim_bios_range() will reserve the first page and isolate it away * from memory allocators anyway.
*/ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
pr_err("Failed to create 1:1 mapping for the first page!\n"); return 1;
}
/* * When SEV-ES is active, the GHCB as set by the kernel will be used * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
*/ if (sev_es_efi_map_ghcbs_cas(pgd)) {
pr_err("Failed to create 1:1 mapping for the GHCBs and CAs!\n"); return 1;
}
/* * When making calls to the firmware everything needs to be 1:1 * mapped and addressable with 32-bit pointers. Map the kernel * text and allocate a new stack because we can't rely on the * stack pointer being < 4GB.
*/ if (!efi_is_mixed()) return 0;
page = alloc_page(GFP_KERNEL|__GFP_DMA32); if (!page) {
pr_err("Unable to allocate EFI runtime stack < 4GB\n"); return 1;
}
efi_mixed_mode_stack_pa = page_to_phys(page + 1); /* stack grows down */
npages = (_etext - _text) >> PAGE_SHIFT;
text = __pa(_text);
if (kernel_unmap_pages_in_pgd(pgd, text, npages)) {
pr_err("Failed to unmap kernel text 1:1 mapping\n"); return 1;
}
/* * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF * executable images in memory that consist of both R-X and * RW- sections, so we cannot apply read-only or non-exec * permissions just yet. However, modern EFI systems provide * a memory attributes table that describes those sections * with the appropriate restricted permissions, which are * applied in efi_runtime_update_mappings() below. All other * regions can be mapped non-executable at this point, with * the exception of boot services code regions, but those will * be unmapped again entirely in efi_free_boot_services().
*/ if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_RUNTIME_SERVICES_CODE)
flags |= _PAGE_NX;
if (!(md->attribute & EFI_MEMORY_WB))
flags |= _PAGE_PCD;
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
md->type != EFI_MEMORY_MAPPED_IO)
flags |= _PAGE_ENC;
pfn = md->phys_addr >> PAGE_SHIFT; if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
md->phys_addr, va);
}
/* * Make sure the 1:1 mappings are present as a catch-all for b0rked * firmware which doesn't update all internal pointers after switching * to virtual mode and would otherwise crap on us.
*/
__map_region(md, md->phys_addr);
/* * Enforce the 1:1 mapping as the default virtual address when * booting in EFI mixed mode, because even though we may be * running a 64-bit kernel, the firmware may only be 32-bit.
*/ if (efi_is_mixed()) {
md->virt_addr = md->phys_addr; return;
}
efi_va -= size;
/* Is PA 2M-aligned? */ if (!(pa & (PMD_SIZE - 1))) {
efi_va &= PMD_MASK;
} else {
u64 pa_offset = pa & (PMD_SIZE - 1);
u64 prev_va = efi_va;
/* get us the same offset within this 2M page */
efi_va = (efi_va & PMD_MASK) + pa_offset;
if (efi_va > prev_va)
efi_va -= PMD_SIZE;
}
if (efi_va < EFI_VA_END) {
pr_warn(FW_WARN "VA address range overflow!\n"); return;
}
/* Do the VA map */
__map_region(md, efi_va);
md->virt_addr = efi_va;
}
/* * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges. * md->virt_addr is the original virtual address which had been mapped in kexec * 1st kernel.
*/ void __init efi_map_region_fixed(efi_memory_desc_t *md)
{
__map_region(md, md->phys_addr);
__map_region(md, md->virt_addr);
}
/* Update the 1:1 mapping */
pfn = md->phys_addr >> PAGE_SHIFT;
err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf); if (err1) {
pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
md->phys_addr, md->virt_addr);
}
err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf); if (err2) {
pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
md->phys_addr, md->virt_addr);
}
/* * Makes the calling thread switch to/from efi_mm context. Can be used * in a kernel thread and user context. Preemption needs to remain disabled * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm * can not change under us. * It should be ensured that there are no concurrent calls to this function.
*/ staticvoid efi_enter_mm(void)
{
efi_prev_mm = use_temporary_mm(&efi_mm);
}
/* * DS and ES contain user values. We need to save them. * The 32-bit EFI code needs a valid DS, ES, and SS. There's no * need to save the old SS: __KERNEL_DS is always acceptable.
*/ #define __efi_thunk(func, ...) \
({ \ unsignedshort __ds, __es; \
efi_status_t ____s; \
\
savesegment(ds, __ds); \
savesegment(es, __es); \
\
loadsegment(ss, __KERNEL_DS); \
loadsegment(ds, __KERNEL_DS); \
loadsegment(es, __KERNEL_DS); \
\
____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__); \
\
loadsegment(ds, __ds); \
loadsegment(es, __es); \
\
____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \
____s; \
})
/* * Switch to the EFI page tables early so that we can access the 1:1 * runtime services mappings which are not mapped in any other page * tables. * * Also, disable interrupts because the IDT points to 64-bit handlers, * which aren't going to function correctly when we switch to 32-bit.
*/ #define efi_thunk(func...) \
({ \
efi_status_t __s; \
\
arch_efi_call_virt_setup(); \
\
__s = __efi_thunk(func); \
\
arch_efi_call_virt_teardown(); \
\
__s; \
})
static efi_status_t
efi_thunk_update_capsule(efi_capsule_header_t **capsules, unsignedlong count, unsignedlong sg_list)
{ /* * To properly support this function we would need to repackage * 'capsules' because the firmware doesn't understand 64-bit * pointers.
*/ return EFI_UNSUPPORTED;
}
status = efi_thunk(query_variable_info, attr, phys_storage,
phys_remaining, phys_max);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
return status;
}
static efi_status_t
efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules, unsignedlong count, u64 *max_size, int *reset_type)
{ /* * To properly support this function we would need to repackage * 'capsules' because the firmware doesn't understand 64-bit * pointers.
*/ return EFI_UNSUPPORTED;
}
void __init efi_thunk_runtime_setup(void)
{ if (!IS_ENABLED(CONFIG_EFI_MIXED)) return;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.18Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.