// SPDX-License-Identifier: GPL-2.0-only /* * efi.c - EFI subsystem * * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> * * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, * allowing the efivarfs to be mounted or the efivars module to be loaded. * The existance of /sys/firmware/efi may also be used by userspace to * determine that the system supports EFI.
*/
/* * Let's not leave out systab information that snuck into * the efivars driver * Note, do not add more fields in systab sysfs file as it breaks sysfs * one value per file rule!
*/ static ssize_t systab_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{ char *str = buf;
if (!kobj || !buf) return -EINVAL;
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); if (efi.acpi != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); /* * If both SMBIOS and SMBIOS3 entry points are implemented, the * SMBIOS3 entry point shall be preferred, so we list it first to * let applications stop parsing after the first match.
*/ if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
if (i >= EFI_DEBUGFS_MAX_BLOBS) {
pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); break;
}
/* * We register the efi subsystem with the firmware subsystem and the * efivars subsystem with the efi subsystem, if the system was booted with * EFI.
*/ staticint __init efisubsys_init(void)
{ int error;
if (!efi_enabled(EFI_RUNTIME_SERVICES))
efi.runtime_supported_mask = 0;
if (!efi_enabled(EFI_BOOT)) return 0;
if (efi.runtime_supported_mask) { /* * Since we process only one efi_runtime_service() at a time, an * ordered workqueue (which creates only one execution context) * should suffice for all our needs.
*/
efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); if (!efi_rts_wq) {
pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
efi.runtime_supported_mask = 0; return 0;
}
}
if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
platform_device_register_simple("rtc-efi", 0, NULL, 0);
/* We register the efi directory at /sys/firmware/efi */
efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) {
pr_err("efi: Firmware registration failed.\n");
error = -ENOMEM; goto err_destroy_wq;
}
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
error = generic_ops_register(); if (error) goto err_put;
error = efivar_ssdt_load(); if (error)
pr_err("efi: failed to load SSDT, error %d.\n", error);
platform_device_register_simple("efivars", 0, NULL, 0);
}
BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); if (error) {
pr_err("efi: Sysfs attribute export failed with error %d.\n",
error); goto err_unregister;
}
/* and the standard mountpoint for efivarfs */
error = sysfs_create_mount_point(efi_kobj, "efivars"); if (error) {
pr_err("efivars: Subsystem registration failed.\n"); goto err_remove_group;
}
if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
efi_debugfs_init();
/* * Find the efi memory descriptor for a given physical address. Given a * physical address, determine if it exists within an EFI Memory Map entry, * and if so, populate the supplied memory descriptor with the appropriate * data.
*/ int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
efi_memory_desc_t *md;
if (!efi_enabled(EFI_MEMMAP)) {
pr_err_once("EFI_MEMMAP is not enabled.\n"); return -EINVAL;
}
if (!out_md) {
pr_err_once("out_md is null.\n"); return -EINVAL;
}
/** * efi_mem_reserve - Reserve an EFI memory region * @addr: Physical address to reserve * @size: Size of reservation * * Mark a region as reserved from general kernel allocation and * prevent it being released by efi_free_boot_services(). * * This function should be called drivers once they've parsed EFI * configuration tables to figure out where their data lives, e.g. * efi_esrt_init().
*/ void __init efi_mem_reserve(phys_addr_t addr, u64 size)
{ /* efi_mem_reserve() does not work under Xen */ if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT))) return;
if (!memblock_is_region_reserved(addr, size))
memblock_reserve(addr, size);
/* * Some architectures (x86) reserve all boot services ranges * until efi_free_boot_services() because of buggy firmware * implementations. This means the above memblock_reserve() is * superfluous on x86 and instead what it needs to do is * ensure the @start, @size is not freed.
*/
efi_arch_mem_reserve(addr, size);
}
/** * reserve_unaccepted - Map and reserve unaccepted configuration table * @unaccepted: Pointer to unaccepted memory table * * memblock_add() makes sure that the table is mapped in direct mapping. During * normal boot it happens automatically because the table is allocated from * usable memory. But during crashkernel boot only memory specifically reserved * for crash scenario is mapped. memblock_add() forces the table to be mapped * in crashkernel case. * * Align the range to the nearest page borders. Ranges smaller than page size * are not going to be mapped. * * memblock_reserve() makes sure that future allocations will not touch the * table.
*/
seed = early_memremap(efi_rng_seed, sizeof(*seed)); if (seed != NULL) {
size = min_t(u32, seed->size, SZ_1K); // sanity check
early_memunmap(seed, sizeof(*seed));
} else {
pr_err("Could not map UEFI random seed!\n");
} if (size > 0) {
seed = early_memremap(efi_rng_seed, sizeof(*seed) + size); if (seed != NULL) {
add_bootloader_randomness(seed->bits, size);
memzero_explicit(seed->bits, size);
early_memunmap(seed, sizeof(*seed) + size);
} else {
pr_err("Could not map UEFI random seed!\n");
}
}
}
if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
efi_memattr_init();
efi_tpm_eventlog_init();
if (mem_reserve != EFI_INVALID_TABLE_ADDR) { unsignedlong prsv = mem_reserve;
while (prsv) { struct linux_efi_memreserve *rsv;
u8 *p;
/* * Just map a full page: that is what we will get * anyway, and it permits us to map the entire entry * before knowing its size.
*/
p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
PAGE_SIZE); if (p == NULL) {
pr_err("Could not map UEFI memreserve entry!\n"); return -ENOMEM;
}
rsv = (void *)(p + prsv % PAGE_SIZE);
/* reserve the entry itself */
memblock_reserve(prsv,
struct_size(rsv, entry, rsv->size));
for (i = 0; i < atomic_read(&rsv->count); i++) {
memblock_reserve(rsv->entry[i].base,
rsv->entry[i].size);
}
/* * efi_mem_attributes - lookup memmap attributes for physical address * @phys_addr: the physical address to lookup * * Search in the EFI memory map for the region covering * @phys_addr. Returns the EFI memory attributes if the region * was found in the memory map, 0 otherwise.
*/
u64 efi_mem_attributes(unsignedlong phys_addr)
{
efi_memory_desc_t *md;
/* * efi_mem_type - lookup memmap type for physical address * @phys_addr: the physical address to lookup * * Search in the EFI memory map for the region covering @phys_addr. * Returns the EFI memory type if the region was found in the memory * map, -EINVAL otherwise.
*/ int efi_mem_type(unsignedlong phys_addr)
{ const efi_memory_desc_t *md;
/* we expect a conflict with a 'System RAM' region */
parent = request_resource_conflict(&iomem_resource, res);
ret = parent ? request_resource(parent, res) : 0;
/* * Given that efi_mem_reserve_iomem() can be called at any * time, only call memblock_reserve() if the architecture * keeps the infrastructure around.
*/ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
memblock_reserve(addr, size);
return ret;
}
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
{ struct linux_efi_memreserve *rsv; unsignedlong prsv; int rc, index;
if (efi_memreserve_root == (void *)ULONG_MAX) return -ENODEV;
if (!efi_memreserve_root) {
rc = efi_memreserve_map_root(); if (rc) return rc;
}
/* first try to find a slot in an existing linked list entry */ for (prsv = efi_memreserve_root->next; prsv; ) {
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); if (!rsv) return -ENOMEM;
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); if (index < rsv->size) {
rsv->entry[index].base = addr;
rsv->entry[index].size = size;
/* * The memremap() call above assumes that a linux_efi_memreserve entry * never crosses a page boundary, so let's ensure that this remains true * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by * using SZ_4K explicitly in the size calculation below.
*/
rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
atomic_set(&rsv->count, 1);
rsv->entry[0].base = addr;
rsv->entry[0].size = size;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.