/* SPDX-License-Identifier: GPL-2.0-only */ /* * relocate_kernel.S - put the kernel image in place to boot * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
*/
/* * The .text..relocate_kernel and .data..relocate_kernel sections are copied * into the control page, and the remainder of the page is used as the stack.
*/
/* store the start address on the stack */
pushq %rdx
/* Create a GDTR (16 bits limit, 64 bits addr) on stack */
leaq kexec_debug_gdt(%rip), %rax
pushq %rax
pushw (%rax)
/* Load the GDT, put the stack back */
lgdt (%rsp)
addq $10, %rsp
/* Test that we can load segments */
movq %ds, %rax
movq %rax, %ds
/* Now an IDTR on the stack to load the IDT the kernel created */
leaq kexec_debug_idt(%rip), %rsi
pushq %rsi
pushw $0xff
lidt (%rsp)
addq $10, %rsp
//int3
/* * Clear X86_CR4_CET (if it was set) such that we can clear CR0_WP * below.
*/
movq %cr4, %rax
andq $~(X86_CR4_CET), %rax
movq %rax, %cr4
/* * Set cr0 to a known state: * - Paging enabled * - Alignment check disabled * - Write protect disabled * - No task switch * - Don't do FP software emulation. * - Protected mode enabled
*/
movq %cr0, %rax
andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
orl $(X86_CR0_PG | X86_CR0_PE), %eax
movq %rax, %cr0
/* * Set cr4 to a known state: * - physical address extension enabled * - 5-level paging, if it was enabled before * - Machine check exception on TDX guest, if it was enabled before. * Clearing MCE might not be allowed in TDX guests, depending on setup. * * Use R13 that contains the original CR4 value, read in relocate_kernel(). * PAE is always set in the original CR4.
*/
andl $(X86_CR4_PAE | X86_CR4_LA57), %r13d
ALTERNATIVE "", __stringify(orl $X86_CR4_MCE, %r13d), X86_FEATURE_TDX_GUEST
movq %r13, %cr4
/* Flush the TLB (needed?) */
movq %r9, %cr3
/* * If SME is active, there could be old encrypted cache line * entries that will conflict with the now unencrypted memory * used by kexec. Flush the caches before copying the kernel.
*/
testq %r8, %r8
jz .Lsme_off
wbinvd
.Lsme_off:
call swap_pages
/* * To be certain of avoiding problems with self-modifying code * I need to execute a serializing instruction here. * So I flush the TLB by reloading %cr3 here, it's handy, * and not processor dependent.
*/
movq %cr3, %rax
movq %rax, %cr3
/* Use the swap page for the callee's stack */
movq kexec_pa_swap_page(%rip), %r10
leaq PAGE_SIZE(%r10), %rsp
/* push the existing entry point onto the callee's stack */
pushq %rdx
ANNOTATE_RETPOLINE_SAFE
call *%rdx
/* get the re-entry point of the peer system */
popq %rbp
movq kexec_pa_swap_page(%rip), %r10
movq pa_backup_pages_map(%rip), %rdi
movq kexec_pa_table_page(%rip), %rax
movq %rax, %cr3
/* Find start (and end) of this physical mapping of control page */
leaq (%rip), %r8
ANNOTATE_NOENDBR
andq $PAGE_MASK, %r8
lea PAGE_SIZE(%r8), %rsp
movl $1, %r11d /* Ensure preserve_context flag is set */
call swap_pages
movq kexec_va_control_page(%rip), %rax
0: addq $virtual_mapped - 0b, %rax
subq $__relocate_kernel_start - 0b, %rax
pushq %rax
ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(identity_mapped)
/* Do the copies */
SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
UNWIND_HINT_END_OF_STACK /* * %rdi indirection page * %r11 preserve_context
*/
movq %rdi, %rcx /* Put the indirection_page in %rcx */
xorl %edi, %edi
xorl %esi, %esi
jmp .Lstart /* Should start with an indirection record */
.Lloop: /* top, read another word for the indirection page */
movq (%rbx), %rcx
addq $8, %rbx
.Lstart:
testb $0x1, %cl /* is it a destination page? */
jz .Lnotdest
movq %rcx, %rdi
andq $0xfffffffffffff000, %rdi
jmp .Lloop
.Lnotdest:
testb $0x2, %cl /* is it an indirection page? */
jz .Lnotind
movq %rcx, %rbx
andq $0xfffffffffffff000, %rbx
jmp .Lloop
.Lnotind:
testb $0x4, %cl /* is it the done indicator? */
jz .Lnotdone
jmp .Ldone
.Lnotdone:
testb $0x8, %cl /* is it the source indicator? */
jz .Lloop /* Ignore it otherwise */
movq %rcx, %rsi /* For ever source page do a copy */
andq $0xfffffffffffff000, %rsi
movq %rdi, %rdx /* Save destination page to %rdx */
movq %rsi, %rax /* Save source page to %rax */
testq %r11, %r11 /* Only actually swap for ::preserve_context */
jz .Lnoswap
.Lready_mmio:
movb %al, (%rdx)
ANNOTATE_UNRET_SAFE
ret
SYM_CODE_END(pr_char_8250_mmio32)
/* * Load pr_char function pointer into %rsi and load %rdx with whatever * that function wants to see there (typically port/MMIO address).
*/
.macro pr_setup
leaq pr_char_8250(%rip), %rsi
movw kexec_debug_8250_port(%rip), %dx
testw %dx, %dx
jnz 1f
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.