// SPDX-License-Identifier: GPL-2.0 /* * misc.c * * This is a collection of several routines used to extract the kernel * which includes KASLR relocation, decompression, ELF parsing, and * relocation processing. Additionally included are the screen and serial * output functions and related debugging support functions. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * puts by Nick Holloway 1993, better puts by Martin Mares 1995 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
/* * WARNING!! * This code is compiled with -fPIC and it is relocated dynamically at * run time, but no relocation processing is performed. This means that * it is not safe to place pointers in static structures.
*/
/* Macros used by the included decompressor code below. */ #defineSTATICstatic /* Define an externally visible malloc()/free(). */ #define MALLOC_VISIBLE #include <linux/decompress/mm.h>
/* * Provide definitions of memzero and memmove as some of the decompressors will * try to define their own functions if these are not defined as macros.
*/ #define memzero(s, n) memset((s), 0, (n)) #ifndef memmove #define memmove memmove /* Functions used by the included decompressor code below. */ void *memmove(void *dest, constvoid *src, size_t n); #endif
/* * This is set up by the setup-routine at boot-time
*/ struct boot_params *boot_params_ptr;
struct port_io_ops pio_ops;
memptr free_mem_ptr;
memptr free_mem_end_ptr; int spurious_nmi_count;
staticchar *vidmem; staticint vidport;
/* These might be accessed before .bss is cleared, so use .data instead. */ staticint lines __section(".data"); staticint cols __section(".data");
#ifdef CONFIG_KERNEL_ZSTD #include"../../../../lib/decompress_unzstd.c" #endif /* * NOTE: When adding a new decompressor, please update the analysis in * ../header.S.
*/
staticvoid scroll(void)
{ int i;
memmove(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2); for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
vidmem[i] = ' ';
}
/* * Calculate the delta between where vmlinux was linked to load * and where it was actually loaded.
*/
delta = min_addr - LOAD_PHYSICAL_ADDR;
/* * The kernel contains a table of relocation addresses. Those * addresses have the final load address of the kernel in virtual * memory. We are currently working in the self map. So we need to * create an adjustment for kernel memory addresses to the self map. * This will involve subtracting out the base address of the kernel.
*/
map = delta - __START_KERNEL_map;
/* * 32-bit always performs relocations. 64-bit relocations are only * needed if KASLR has chosen a different starting address offset * from __START_KERNEL_map.
*/ if (IS_ENABLED(CONFIG_X86_64))
delta = virt_addr - LOAD_PHYSICAL_ADDR;
/* * Process relocations: 32 bit relocations first then 64 bit after. * Two sets of binary relocations are added to the end of the kernel * before compression. Each relocation table entry is the kernel * address of the location which needs to be updated stored as a * 32-bit value which is sign extended to 64 bits. * * Format is: * * kernel bits... * 0 - zero terminator for 64 bit relocations * 64 bit relocation repeated * 0 - zero terminator for 32 bit relocations * 32 bit relocation repeated * * So we work backwards from the end of the decompressed image.
*/ for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) { long extended = *reloc;
extended += map;
ptr = (unsignedlong)extended; if (ptr < min_addr || ptr > max_addr)
error("32-bit relocation outside of kernel!\n");
*(uint32_t *)ptr += delta;
} #ifdef CONFIG_X86_64 for (reloc--; *reloc; reloc--) { long extended = *reloc;
extended += map;
ptr = (unsignedlong)extended; if (ptr < min_addr || ptr > max_addr)
error("64-bit relocation outside of kernel!\n");
/* * Set the memory encryption xloadflag based on the mem_encrypt= command line * parameter, if provided.
*/ staticvoid parse_mem_encrypt(struct setup_header *hdr)
{ int on = cmdline_find_option_bool("mem_encrypt=on"); int off = cmdline_find_option_bool("mem_encrypt=off");
if (on > off)
hdr->xloadflags |= XLF_MEM_ENCRYPTION;
}
staticvoid early_sev_detect(void)
{ /* * Accessing video memory causes guest termination because * the boot stage2 #VC handler of SEV-ES/SNP guests does not * support MMIO handling and kexec -c adds screen_info to the * boot parameters passed to the kexec kernel, which causes * console output to be dumped to both video and serial.
*/ if (sev_status & MSR_AMD64_SEV_ES_ENABLED)
lines = cols = 0;
}
/* * The compressed kernel image (ZO), has been moved so that its position * is against the end of the buffer used to hold the uncompressed kernel * image (VO) and the execution environment (.bss, .brk), which makes sure * there is room to do the in-place decompression. (See header.S for the * calculations.) * * |-----compressed kernel image------| * V V * 0 extract_offset +INIT_SIZE * |-----------|---------------|-------------------------|--------| * | | | | * VO__text startup_32 of ZO VO__end ZO__end * ^ ^ * |-------uncompressed kernel image---------| *
*/
asmlinkage __visible void *extract_kernel(void *rmode, unsignedchar *output)
{ unsignedlong virt_addr = LOAD_PHYSICAL_ADDR;
memptr heap = (memptr)boot_heap; unsignedlong needed_size;
size_t entry_offset;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params_ptr = rmode;
/* * Detect TDX guest environment. * * It has to be done before console_init() in order to use * paravirtualized port I/O operations if needed.
*/
early_tdx_detect();
early_sev_detect();
console_init();
/* * Save RSDP address for later use. Have this after console_init() * so that early debugging output from the RSDP parsing code can be * collected.
*/
boot_params_ptr->acpi_rsdp_addr = get_rsdp_addr();
debug_putstr("early console in extract_kernel\n");
/* * The memory hole needed for the kernel is the larger of either * the entire decompressed kernel plus relocation table, or the * entire decompressed kernel plus .bss and .brk sections. * * On X86_64, the memory is mapped with PMD pages. Round the * size up so that the full extent of PMD pages mapped is * included in the check against the valid memory table * entries. This ensures the full mapped area is usable RAM * and doesn't include any reserved areas.
*/
needed_size = max_t(unsignedlong, output_len, kernel_total_size); #ifdef CONFIG_X86_64
needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN); #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.